1
2
3
4
5
6
7
8
9#include <linux/dma-mapping.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/export.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/log2.h>
18#include <linux/pm.h>
19#include <linux/pm_runtime.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/of_dma.h>
23#include <linux/amba/bus.h>
24#include <linux/regulator/consumer.h>
25#include <linux/platform_data/dma-ste-dma40.h>
26
27#include "dmaengine.h"
28#include "ste_dma40_ll.h"
29
30#define D40_NAME "dma40"
31
32#define D40_PHY_CHAN -1
33
34
35#define D40_CHAN_POS(chan) (2 * (chan / 2))
36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37
38
39#define D40_SUSPEND_MAX_IT 500
40
41
42#define DMA40_AUTOSUSPEND_DELAY 100
43
44
45#define LCLA_ALIGNMENT 0x40000
46
47
48#define D40_LCLA_LINK_PER_EVENT_GRP 128
49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50
51
52#define D40_MAX_LOG_CHAN_PER_PHY 32
53
54
55#define MAX_LCLA_ALLOC_ATTEMPTS 256
56
57
58#define D40_ALLOC_FREE BIT(31)
59#define D40_ALLOC_PHY BIT(30)
60#define D40_ALLOC_LOG_FREE 0
61
62#define D40_MEMCPY_MAX_CHANS 8
63
64
65#define DB8500_DMA_MEMCPY_EV_0 51
66#define DB8500_DMA_MEMCPY_EV_1 56
67#define DB8500_DMA_MEMCPY_EV_2 57
68#define DB8500_DMA_MEMCPY_EV_3 58
69#define DB8500_DMA_MEMCPY_EV_4 59
70#define DB8500_DMA_MEMCPY_EV_5 60
71
72static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5,
79};
80
81
82static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM,
85
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93};
94
95
96static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM,
99
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107};
108
109
110
111
112
113
114
115
116
117enum d40_command {
118 D40_DMA_STOP = 0,
119 D40_DMA_RUN = 1,
120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3
122};
123
124
125
126
127
128
129
130
131
132
133enum d40_events {
134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3
138};
139
140
141
142
143
144
145static u32 d40_backup_regs[] = {
146 D40_DREG_LCPA,
147 D40_DREG_LCLA,
148 D40_DREG_PRMSE,
149 D40_DREG_PRMSO,
150 D40_DREG_PRMOE,
151 D40_DREG_PRMOO,
152};
153
154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155
156
157
158
159
160
161
162
163
164
165
166
167
168static u32 d40_backup_regs_v4a[] = {
169 D40_DREG_PSEG1,
170 D40_DREG_PSEG2,
171 D40_DREG_PSEG3,
172 D40_DREG_PSEG4,
173 D40_DREG_PCEG1,
174 D40_DREG_PCEG2,
175 D40_DREG_PCEG3,
176 D40_DREG_PCEG4,
177 D40_DREG_RSEG1,
178 D40_DREG_RSEG2,
179 D40_DREG_RSEG3,
180 D40_DREG_RSEG4,
181 D40_DREG_RCEG1,
182 D40_DREG_RCEG2,
183 D40_DREG_RCEG3,
184 D40_DREG_RCEG4,
185};
186
187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188
189static u32 d40_backup_regs_v4b[] = {
190 D40_DREG_CPSEG1,
191 D40_DREG_CPSEG2,
192 D40_DREG_CPSEG3,
193 D40_DREG_CPSEG4,
194 D40_DREG_CPSEG5,
195 D40_DREG_CPCEG1,
196 D40_DREG_CPCEG2,
197 D40_DREG_CPCEG3,
198 D40_DREG_CPCEG4,
199 D40_DREG_CPCEG5,
200 D40_DREG_CRSEG1,
201 D40_DREG_CRSEG2,
202 D40_DREG_CRSEG3,
203 D40_DREG_CRSEG4,
204 D40_DREG_CRSEG5,
205 D40_DREG_CRCEG1,
206 D40_DREG_CRCEG2,
207 D40_DREG_CRCEG3,
208 D40_DREG_CRCEG4,
209 D40_DREG_CRCEG5,
210};
211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213
214static u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR,
218 D40_CHAN_REG_SSLNK,
219 D40_CHAN_REG_SDCFG,
220 D40_CHAN_REG_SDELT,
221 D40_CHAN_REG_SDPTR,
222 D40_CHAN_REG_SDLNK,
223};
224
225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227
228
229
230
231
232
233
234
235
236
237struct d40_interrupt_lookup {
238 u32 src;
239 u32 clr;
240 bool is_error;
241 int offset;
242};
243
244
245static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
256};
257
258static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
271};
272
273
274
275
276
277
278
279struct d40_reg_val {
280 unsigned int reg;
281 unsigned int val;
282};
283
284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
287
288
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301};
302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
305
306
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322};
323
324
325
326
327
328
329
330
331
332
333
334
335struct d40_lli_pool {
336 void *base;
337 int size;
338 dma_addr_t dma_addr;
339
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341};
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362struct d40_desc {
363
364 struct d40_phy_lli_bidir lli_phy;
365
366 struct d40_log_lli_bidir lli_log;
367
368 struct d40_lli_pool lli_pool;
369 int lli_len;
370 int lli_current;
371 int lcla_alloc;
372
373 struct dma_async_tx_descriptor txd;
374 struct list_head node;
375
376 bool is_in_client_list;
377 bool cyclic;
378};
379
380
381
382
383
384
385
386
387
388
389
390
391struct d40_lcla_pool {
392 void *base;
393 dma_addr_t dma_addr;
394 void *base_unaligned;
395 int pages;
396 spinlock_t lock;
397 struct d40_desc **alloc_map;
398};
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414struct d40_phy_res {
415 spinlock_t lock;
416 bool reserved;
417 int num;
418 u32 allocated_src;
419 u32 allocated_dst;
420 bool use_soft_lli;
421};
422
423struct d40_base;
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456struct d40_chan {
457 spinlock_t lock;
458 int log_num;
459 int pending_tx;
460 bool busy;
461 struct d40_phy_res *phy_chan;
462 struct dma_chan chan;
463 struct tasklet_struct tasklet;
464 struct list_head client;
465 struct list_head pending_queue;
466 struct list_head active;
467 struct list_head done;
468 struct list_head queue;
469 struct list_head prepare_queue;
470 struct stedma40_chan_cfg dma_cfg;
471 bool configured;
472 struct d40_base *base;
473
474 u32 src_def_cfg;
475 u32 dst_def_cfg;
476 struct d40_def_lcsp log_def;
477 struct d40_log_lli_full *lcpa;
478
479 dma_addr_t runtime_addr;
480 enum dma_transfer_direction runtime_direction;
481};
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500struct d40_gen_dmac {
501 u32 *backup;
502 u32 backup_size;
503 u32 realtime_en;
504 u32 realtime_clear;
505 u32 high_prio_en;
506 u32 high_prio_clear;
507 u32 interrupt_en;
508 u32 interrupt_clear;
509 struct d40_interrupt_lookup *il;
510 u32 il_size;
511 struct d40_reg_val *init_reg;
512 u32 init_reg_size;
513};
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562struct d40_base {
563 spinlock_t interrupt_lock;
564 spinlock_t execmd_lock;
565 struct device *dev;
566 void __iomem *virtbase;
567 u8 rev:4;
568 struct clk *clk;
569 phys_addr_t phy_start;
570 resource_size_t phy_size;
571 int irq;
572 int num_memcpy_chans;
573 int num_phy_chans;
574 int num_log_chans;
575 struct device_dma_parameters dma_parms;
576 struct dma_device dma_both;
577 struct dma_device dma_slave;
578 struct dma_device dma_memcpy;
579 struct d40_chan *phy_chans;
580 struct d40_chan *log_chans;
581 struct d40_chan **lookup_log_chans;
582 struct d40_chan **lookup_phy_chans;
583 struct stedma40_platform_data *plat_data;
584 struct regulator *lcpa_regulator;
585
586 struct d40_phy_res *phy_res;
587 struct d40_lcla_pool lcla_pool;
588 void *lcpa_base;
589 dma_addr_t phy_lcpa;
590 resource_size_t lcpa_size;
591 struct kmem_cache *desc_slab;
592 u32 reg_val_backup[BACKUP_REGS_SZ];
593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
594 u32 *reg_val_backup_chan;
595 u16 gcc_pwr_off_mask;
596 struct d40_gen_dmac gen_dmac;
597};
598
599static struct device *chan2dev(struct d40_chan *d40c)
600{
601 return &d40c->chan.dev->device;
602}
603
604static bool chan_is_physical(struct d40_chan *chan)
605{
606 return chan->log_num == D40_PHY_CHAN;
607}
608
609static bool chan_is_logical(struct d40_chan *chan)
610{
611 return !chan_is_physical(chan);
612}
613
614static void __iomem *chan_base(struct d40_chan *chan)
615{
616 return chan->base->virtbase + D40_DREG_PCBASE +
617 chan->phy_chan->num * D40_DREG_PCDELTA;
618}
619
620#define d40_err(dev, format, arg...) \
621 dev_err(dev, "[%s] " format, __func__, ## arg)
622
623#define chan_err(d40c, format, arg...) \
624 d40_err(chan2dev(d40c), format, ## arg)
625
626static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
627 int lli_len)
628{
629 bool is_log = chan_is_logical(d40c);
630 u32 align;
631 void *base;
632
633 if (is_log)
634 align = sizeof(struct d40_log_lli);
635 else
636 align = sizeof(struct d40_phy_lli);
637
638 if (lli_len == 1) {
639 base = d40d->lli_pool.pre_alloc_lli;
640 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
641 d40d->lli_pool.base = NULL;
642 } else {
643 d40d->lli_pool.size = lli_len * 2 * align;
644
645 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
646 d40d->lli_pool.base = base;
647
648 if (d40d->lli_pool.base == NULL)
649 return -ENOMEM;
650 }
651
652 if (is_log) {
653 d40d->lli_log.src = PTR_ALIGN(base, align);
654 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
655
656 d40d->lli_pool.dma_addr = 0;
657 } else {
658 d40d->lli_phy.src = PTR_ALIGN(base, align);
659 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
660
661 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
662 d40d->lli_phy.src,
663 d40d->lli_pool.size,
664 DMA_TO_DEVICE);
665
666 if (dma_mapping_error(d40c->base->dev,
667 d40d->lli_pool.dma_addr)) {
668 kfree(d40d->lli_pool.base);
669 d40d->lli_pool.base = NULL;
670 d40d->lli_pool.dma_addr = 0;
671 return -ENOMEM;
672 }
673 }
674
675 return 0;
676}
677
678static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
679{
680 if (d40d->lli_pool.dma_addr)
681 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
682 d40d->lli_pool.size, DMA_TO_DEVICE);
683
684 kfree(d40d->lli_pool.base);
685 d40d->lli_pool.base = NULL;
686 d40d->lli_pool.size = 0;
687 d40d->lli_log.src = NULL;
688 d40d->lli_log.dst = NULL;
689 d40d->lli_phy.src = NULL;
690 d40d->lli_phy.dst = NULL;
691}
692
693static int d40_lcla_alloc_one(struct d40_chan *d40c,
694 struct d40_desc *d40d)
695{
696 unsigned long flags;
697 int i;
698 int ret = -EINVAL;
699
700 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
701
702
703
704
705
706 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
707 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
708
709 if (!d40c->base->lcla_pool.alloc_map[idx]) {
710 d40c->base->lcla_pool.alloc_map[idx] = d40d;
711 d40d->lcla_alloc++;
712 ret = i;
713 break;
714 }
715 }
716
717 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
718
719 return ret;
720}
721
722static int d40_lcla_free_all(struct d40_chan *d40c,
723 struct d40_desc *d40d)
724{
725 unsigned long flags;
726 int i;
727 int ret = -EINVAL;
728
729 if (chan_is_physical(d40c))
730 return 0;
731
732 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
733
734 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
735 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
736
737 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
738 d40c->base->lcla_pool.alloc_map[idx] = NULL;
739 d40d->lcla_alloc--;
740 if (d40d->lcla_alloc == 0) {
741 ret = 0;
742 break;
743 }
744 }
745 }
746
747 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
748
749 return ret;
750
751}
752
753static void d40_desc_remove(struct d40_desc *d40d)
754{
755 list_del(&d40d->node);
756}
757
758static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
759{
760 struct d40_desc *desc = NULL;
761
762 if (!list_empty(&d40c->client)) {
763 struct d40_desc *d;
764 struct d40_desc *_d;
765
766 list_for_each_entry_safe(d, _d, &d40c->client, node) {
767 if (async_tx_test_ack(&d->txd)) {
768 d40_desc_remove(d);
769 desc = d;
770 memset(desc, 0, sizeof(*desc));
771 break;
772 }
773 }
774 }
775
776 if (!desc)
777 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
778
779 if (desc)
780 INIT_LIST_HEAD(&desc->node);
781
782 return desc;
783}
784
785static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
786{
787
788 d40_pool_lli_free(d40c, d40d);
789 d40_lcla_free_all(d40c, d40d);
790 kmem_cache_free(d40c->base->desc_slab, d40d);
791}
792
793static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
794{
795 list_add_tail(&desc->node, &d40c->active);
796}
797
798static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
799{
800 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
801 struct d40_phy_lli *lli_src = desc->lli_phy.src;
802 void __iomem *base = chan_base(chan);
803
804 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
805 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
806 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
807 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
808
809 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
810 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
811 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
812 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
813}
814
815static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
816{
817 list_add_tail(&desc->node, &d40c->done);
818}
819
820static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
821{
822 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
823 struct d40_log_lli_bidir *lli = &desc->lli_log;
824 int lli_current = desc->lli_current;
825 int lli_len = desc->lli_len;
826 bool cyclic = desc->cyclic;
827 int curr_lcla = -EINVAL;
828 int first_lcla = 0;
829 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
830 bool linkback;
831
832
833
834
835
836 linkback = cyclic && lli_current == 0;
837
838
839
840
841
842 if (linkback || (lli_len - lli_current > 1)) {
843
844
845
846
847
848
849 if (!(chan->phy_chan->use_soft_lli &&
850 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
851 curr_lcla = d40_lcla_alloc_one(chan, desc);
852
853 first_lcla = curr_lcla;
854 }
855
856
857
858
859
860
861
862 if (!linkback || curr_lcla == -EINVAL) {
863 unsigned int flags = 0;
864
865 if (curr_lcla == -EINVAL)
866 flags |= LLI_TERM_INT;
867
868 d40_log_lli_lcpa_write(chan->lcpa,
869 &lli->dst[lli_current],
870 &lli->src[lli_current],
871 curr_lcla,
872 flags);
873 lli_current++;
874 }
875
876 if (curr_lcla < 0)
877 goto set_current;
878
879 for (; lli_current < lli_len; lli_current++) {
880 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
881 8 * curr_lcla * 2;
882 struct d40_log_lli *lcla = pool->base + lcla_offset;
883 unsigned int flags = 0;
884 int next_lcla;
885
886 if (lli_current + 1 < lli_len)
887 next_lcla = d40_lcla_alloc_one(chan, desc);
888 else
889 next_lcla = linkback ? first_lcla : -EINVAL;
890
891 if (cyclic || next_lcla == -EINVAL)
892 flags |= LLI_TERM_INT;
893
894 if (linkback && curr_lcla == first_lcla) {
895
896 d40_log_lli_lcpa_write(chan->lcpa,
897 &lli->dst[lli_current],
898 &lli->src[lli_current],
899 next_lcla, flags);
900 }
901
902
903
904
905
906 d40_log_lli_lcla_write(lcla,
907 &lli->dst[lli_current],
908 &lli->src[lli_current],
909 next_lcla, flags);
910
911
912
913
914
915 if (!use_esram_lcla) {
916 dma_sync_single_range_for_device(chan->base->dev,
917 pool->dma_addr, lcla_offset,
918 2 * sizeof(struct d40_log_lli),
919 DMA_TO_DEVICE);
920 }
921 curr_lcla = next_lcla;
922
923 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
924 lli_current++;
925 break;
926 }
927 }
928 set_current:
929 desc->lli_current = lli_current;
930}
931
932static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
933{
934 if (chan_is_physical(d40c)) {
935 d40_phy_lli_load(d40c, d40d);
936 d40d->lli_current = d40d->lli_len;
937 } else
938 d40_log_lli_to_lcxa(d40c, d40d);
939}
940
941static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
942{
943 return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
944}
945
946
947static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
948{
949 d40_desc_remove(desc);
950 desc->is_in_client_list = false;
951 list_add_tail(&desc->node, &d40c->pending_queue);
952}
953
954static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
955{
956 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
957 node);
958}
959
960static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
961{
962 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
963}
964
965static struct d40_desc *d40_first_done(struct d40_chan *d40c)
966{
967 return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
968}
969
970static int d40_psize_2_burst_size(bool is_log, int psize)
971{
972 if (is_log) {
973 if (psize == STEDMA40_PSIZE_LOG_1)
974 return 1;
975 } else {
976 if (psize == STEDMA40_PSIZE_PHY_1)
977 return 1;
978 }
979
980 return 2 << psize;
981}
982
983
984
985
986
987
988
989static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
990{
991 int dmalen;
992 u32 max_w = max(data_width1, data_width2);
993 u32 min_w = min(data_width1, data_width2);
994 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
995
996 if (seg_max > STEDMA40_MAX_SEG_SIZE)
997 seg_max -= max_w;
998
999 if (!IS_ALIGNED(size, max_w))
1000 return -EINVAL;
1001
1002 if (size <= seg_max)
1003 dmalen = 1;
1004 else {
1005 dmalen = size / seg_max;
1006 if (dmalen * seg_max < size)
1007 dmalen++;
1008 }
1009 return dmalen;
1010}
1011
1012static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1013 u32 data_width1, u32 data_width2)
1014{
1015 struct scatterlist *sg;
1016 int i;
1017 int len = 0;
1018 int ret;
1019
1020 for_each_sg(sgl, sg, sg_len, i) {
1021 ret = d40_size_2_dmalen(sg_dma_len(sg),
1022 data_width1, data_width2);
1023 if (ret < 0)
1024 return ret;
1025 len += ret;
1026 }
1027 return len;
1028}
1029
1030static int __d40_execute_command_phy(struct d40_chan *d40c,
1031 enum d40_command command)
1032{
1033 u32 status;
1034 int i;
1035 void __iomem *active_reg;
1036 int ret = 0;
1037 unsigned long flags;
1038 u32 wmask;
1039
1040 if (command == D40_DMA_STOP) {
1041 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1042 if (ret)
1043 return ret;
1044 }
1045
1046 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1047
1048 if (d40c->phy_chan->num % 2 == 0)
1049 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1050 else
1051 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1052
1053 if (command == D40_DMA_SUSPEND_REQ) {
1054 status = (readl(active_reg) &
1055 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1056 D40_CHAN_POS(d40c->phy_chan->num);
1057
1058 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1059 goto unlock;
1060 }
1061
1062 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1063 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1064 active_reg);
1065
1066 if (command == D40_DMA_SUSPEND_REQ) {
1067
1068 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1069 status = (readl(active_reg) &
1070 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1071 D40_CHAN_POS(d40c->phy_chan->num);
1072
1073 cpu_relax();
1074
1075
1076
1077
1078 udelay(3);
1079
1080 if (status == D40_DMA_STOP ||
1081 status == D40_DMA_SUSPENDED)
1082 break;
1083 }
1084
1085 if (i == D40_SUSPEND_MAX_IT) {
1086 chan_err(d40c,
1087 "unable to suspend the chl %d (log: %d) status %x\n",
1088 d40c->phy_chan->num, d40c->log_num,
1089 status);
1090 dump_stack();
1091 ret = -EBUSY;
1092 }
1093
1094 }
1095 unlock:
1096 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1097 return ret;
1098}
1099
1100static void d40_term_all(struct d40_chan *d40c)
1101{
1102 struct d40_desc *d40d;
1103 struct d40_desc *_d;
1104
1105
1106 while ((d40d = d40_first_done(d40c))) {
1107 d40_desc_remove(d40d);
1108 d40_desc_free(d40c, d40d);
1109 }
1110
1111
1112 while ((d40d = d40_first_active_get(d40c))) {
1113 d40_desc_remove(d40d);
1114 d40_desc_free(d40c, d40d);
1115 }
1116
1117
1118 while ((d40d = d40_first_queued(d40c))) {
1119 d40_desc_remove(d40d);
1120 d40_desc_free(d40c, d40d);
1121 }
1122
1123
1124 while ((d40d = d40_first_pending(d40c))) {
1125 d40_desc_remove(d40d);
1126 d40_desc_free(d40c, d40d);
1127 }
1128
1129
1130 if (!list_empty(&d40c->client))
1131 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1132 d40_desc_remove(d40d);
1133 d40_desc_free(d40c, d40d);
1134 }
1135
1136
1137 if (!list_empty(&d40c->prepare_queue))
1138 list_for_each_entry_safe(d40d, _d,
1139 &d40c->prepare_queue, node) {
1140 d40_desc_remove(d40d);
1141 d40_desc_free(d40c, d40d);
1142 }
1143
1144 d40c->pending_tx = 0;
1145}
1146
1147static void __d40_config_set_event(struct d40_chan *d40c,
1148 enum d40_events event_type, u32 event,
1149 int reg)
1150{
1151 void __iomem *addr = chan_base(d40c) + reg;
1152 int tries;
1153 u32 status;
1154
1155 switch (event_type) {
1156
1157 case D40_DEACTIVATE_EVENTLINE:
1158
1159 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1160 | ~D40_EVENTLINE_MASK(event), addr);
1161 break;
1162
1163 case D40_SUSPEND_REQ_EVENTLINE:
1164 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1165 D40_EVENTLINE_POS(event);
1166
1167 if (status == D40_DEACTIVATE_EVENTLINE ||
1168 status == D40_SUSPEND_REQ_EVENTLINE)
1169 break;
1170
1171 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1172 | ~D40_EVENTLINE_MASK(event), addr);
1173
1174 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1175
1176 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1177 D40_EVENTLINE_POS(event);
1178
1179 cpu_relax();
1180
1181
1182
1183
1184 udelay(3);
1185
1186 if (status == D40_DEACTIVATE_EVENTLINE)
1187 break;
1188 }
1189
1190 if (tries == D40_SUSPEND_MAX_IT) {
1191 chan_err(d40c,
1192 "unable to stop the event_line chl %d (log: %d)"
1193 "status %x\n", d40c->phy_chan->num,
1194 d40c->log_num, status);
1195 }
1196 break;
1197
1198 case D40_ACTIVATE_EVENTLINE:
1199
1200
1201
1202
1203
1204 tries = 100;
1205 while (--tries) {
1206 writel((D40_ACTIVATE_EVENTLINE <<
1207 D40_EVENTLINE_POS(event)) |
1208 ~D40_EVENTLINE_MASK(event), addr);
1209
1210 if (readl(addr) & D40_EVENTLINE_MASK(event))
1211 break;
1212 }
1213
1214 if (tries != 99)
1215 dev_dbg(chan2dev(d40c),
1216 "[%s] workaround enable S%cLNK (%d tries)\n",
1217 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1218 100 - tries);
1219
1220 WARN_ON(!tries);
1221 break;
1222
1223 case D40_ROUND_EVENTLINE:
1224 BUG();
1225 break;
1226
1227 }
1228}
1229
1230static void d40_config_set_event(struct d40_chan *d40c,
1231 enum d40_events event_type)
1232{
1233 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1234
1235
1236 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1237 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1238 __d40_config_set_event(d40c, event_type, event,
1239 D40_CHAN_REG_SSLNK);
1240
1241 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1242 __d40_config_set_event(d40c, event_type, event,
1243 D40_CHAN_REG_SDLNK);
1244}
1245
1246static u32 d40_chan_has_events(struct d40_chan *d40c)
1247{
1248 void __iomem *chanbase = chan_base(d40c);
1249 u32 val;
1250
1251 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1252 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1253
1254 return val;
1255}
1256
1257static int
1258__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1259{
1260 unsigned long flags;
1261 int ret = 0;
1262 u32 active_status;
1263 void __iomem *active_reg;
1264
1265 if (d40c->phy_chan->num % 2 == 0)
1266 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1267 else
1268 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1269
1270
1271 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1272
1273 switch (command) {
1274 case D40_DMA_STOP:
1275 case D40_DMA_SUSPEND_REQ:
1276
1277 active_status = (readl(active_reg) &
1278 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1279 D40_CHAN_POS(d40c->phy_chan->num);
1280
1281 if (active_status == D40_DMA_RUN)
1282 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1283 else
1284 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1285
1286 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1287 ret = __d40_execute_command_phy(d40c, command);
1288
1289 break;
1290
1291 case D40_DMA_RUN:
1292
1293 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1294 ret = __d40_execute_command_phy(d40c, command);
1295 break;
1296
1297 case D40_DMA_SUSPENDED:
1298 BUG();
1299 break;
1300 }
1301
1302 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1303 return ret;
1304}
1305
1306static int d40_channel_execute_command(struct d40_chan *d40c,
1307 enum d40_command command)
1308{
1309 if (chan_is_logical(d40c))
1310 return __d40_execute_command_log(d40c, command);
1311 else
1312 return __d40_execute_command_phy(d40c, command);
1313}
1314
1315static u32 d40_get_prmo(struct d40_chan *d40c)
1316{
1317 static const unsigned int phy_map[] = {
1318 [STEDMA40_PCHAN_BASIC_MODE]
1319 = D40_DREG_PRMO_PCHAN_BASIC,
1320 [STEDMA40_PCHAN_MODULO_MODE]
1321 = D40_DREG_PRMO_PCHAN_MODULO,
1322 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1323 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1324 };
1325 static const unsigned int log_map[] = {
1326 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1327 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1328 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1329 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1330 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1331 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1332 };
1333
1334 if (chan_is_physical(d40c))
1335 return phy_map[d40c->dma_cfg.mode_opt];
1336 else
1337 return log_map[d40c->dma_cfg.mode_opt];
1338}
1339
1340static void d40_config_write(struct d40_chan *d40c)
1341{
1342 u32 addr_base;
1343 u32 var;
1344
1345
1346 addr_base = (d40c->phy_chan->num % 2) * 4;
1347
1348 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1349 D40_CHAN_POS(d40c->phy_chan->num);
1350 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1351
1352
1353 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1354
1355 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1356
1357 if (chan_is_logical(d40c)) {
1358 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1359 & D40_SREG_ELEM_LOG_LIDX_MASK;
1360 void __iomem *chanbase = chan_base(d40c);
1361
1362
1363 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1364 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1365
1366
1367 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1368 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1369
1370
1371 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1372 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1373 }
1374}
1375
1376static u32 d40_residue(struct d40_chan *d40c)
1377{
1378 u32 num_elt;
1379
1380 if (chan_is_logical(d40c))
1381 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1382 >> D40_MEM_LCSP2_ECNT_POS;
1383 else {
1384 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1385 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1386 >> D40_SREG_ELEM_PHY_ECNT_POS;
1387 }
1388
1389 return num_elt * d40c->dma_cfg.dst_info.data_width;
1390}
1391
1392static bool d40_tx_is_linked(struct d40_chan *d40c)
1393{
1394 bool is_link;
1395
1396 if (chan_is_logical(d40c))
1397 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1398 else
1399 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1400 & D40_SREG_LNK_PHYS_LNK_MASK;
1401
1402 return is_link;
1403}
1404
1405static int d40_pause(struct dma_chan *chan)
1406{
1407 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1408 int res = 0;
1409 unsigned long flags;
1410
1411 if (d40c->phy_chan == NULL) {
1412 chan_err(d40c, "Channel is not allocated!\n");
1413 return -EINVAL;
1414 }
1415
1416 if (!d40c->busy)
1417 return 0;
1418
1419 spin_lock_irqsave(&d40c->lock, flags);
1420 pm_runtime_get_sync(d40c->base->dev);
1421
1422 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1423
1424 pm_runtime_mark_last_busy(d40c->base->dev);
1425 pm_runtime_put_autosuspend(d40c->base->dev);
1426 spin_unlock_irqrestore(&d40c->lock, flags);
1427 return res;
1428}
1429
1430static int d40_resume(struct dma_chan *chan)
1431{
1432 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1433 int res = 0;
1434 unsigned long flags;
1435
1436 if (d40c->phy_chan == NULL) {
1437 chan_err(d40c, "Channel is not allocated!\n");
1438 return -EINVAL;
1439 }
1440
1441 if (!d40c->busy)
1442 return 0;
1443
1444 spin_lock_irqsave(&d40c->lock, flags);
1445 pm_runtime_get_sync(d40c->base->dev);
1446
1447
1448 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1449 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1450
1451 pm_runtime_mark_last_busy(d40c->base->dev);
1452 pm_runtime_put_autosuspend(d40c->base->dev);
1453 spin_unlock_irqrestore(&d40c->lock, flags);
1454 return res;
1455}
1456
1457static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1458{
1459 struct d40_chan *d40c = container_of(tx->chan,
1460 struct d40_chan,
1461 chan);
1462 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1463 unsigned long flags;
1464 dma_cookie_t cookie;
1465
1466 spin_lock_irqsave(&d40c->lock, flags);
1467 cookie = dma_cookie_assign(tx);
1468 d40_desc_queue(d40c, d40d);
1469 spin_unlock_irqrestore(&d40c->lock, flags);
1470
1471 return cookie;
1472}
1473
1474static int d40_start(struct d40_chan *d40c)
1475{
1476 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1477}
1478
1479static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1480{
1481 struct d40_desc *d40d;
1482 int err;
1483
1484
1485 d40d = d40_first_queued(d40c);
1486
1487 if (d40d != NULL) {
1488 if (!d40c->busy) {
1489 d40c->busy = true;
1490 pm_runtime_get_sync(d40c->base->dev);
1491 }
1492
1493
1494 d40_desc_remove(d40d);
1495
1496
1497 d40_desc_submit(d40c, d40d);
1498
1499
1500 d40_desc_load(d40c, d40d);
1501
1502
1503 err = d40_start(d40c);
1504
1505 if (err)
1506 return NULL;
1507 }
1508
1509 return d40d;
1510}
1511
1512
1513static void dma_tc_handle(struct d40_chan *d40c)
1514{
1515 struct d40_desc *d40d;
1516
1517
1518 d40d = d40_first_active_get(d40c);
1519
1520 if (d40d == NULL)
1521 return;
1522
1523 if (d40d->cyclic) {
1524
1525
1526
1527
1528
1529
1530 if (d40d->lli_current < d40d->lli_len
1531 && !d40_tx_is_linked(d40c)
1532 && !d40_residue(d40c)) {
1533 d40_lcla_free_all(d40c, d40d);
1534 d40_desc_load(d40c, d40d);
1535 (void) d40_start(d40c);
1536
1537 if (d40d->lli_current == d40d->lli_len)
1538 d40d->lli_current = 0;
1539 }
1540 } else {
1541 d40_lcla_free_all(d40c, d40d);
1542
1543 if (d40d->lli_current < d40d->lli_len) {
1544 d40_desc_load(d40c, d40d);
1545
1546 (void) d40_start(d40c);
1547 return;
1548 }
1549
1550 if (d40_queue_start(d40c) == NULL) {
1551 d40c->busy = false;
1552
1553 pm_runtime_mark_last_busy(d40c->base->dev);
1554 pm_runtime_put_autosuspend(d40c->base->dev);
1555 }
1556
1557 d40_desc_remove(d40d);
1558 d40_desc_done(d40c, d40d);
1559 }
1560
1561 d40c->pending_tx++;
1562 tasklet_schedule(&d40c->tasklet);
1563
1564}
1565
1566static void dma_tasklet(unsigned long data)
1567{
1568 struct d40_chan *d40c = (struct d40_chan *) data;
1569 struct d40_desc *d40d;
1570 unsigned long flags;
1571 bool callback_active;
1572 struct dmaengine_desc_callback cb;
1573
1574 spin_lock_irqsave(&d40c->lock, flags);
1575
1576
1577 d40d = d40_first_done(d40c);
1578 if (d40d == NULL) {
1579
1580 d40d = d40_first_active_get(d40c);
1581 if (d40d == NULL || !d40d->cyclic)
1582 goto check_pending_tx;
1583 }
1584
1585 if (!d40d->cyclic)
1586 dma_cookie_complete(&d40d->txd);
1587
1588
1589
1590
1591
1592 if (d40c->pending_tx == 0) {
1593 spin_unlock_irqrestore(&d40c->lock, flags);
1594 return;
1595 }
1596
1597
1598 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1599 dmaengine_desc_get_callback(&d40d->txd, &cb);
1600
1601 if (!d40d->cyclic) {
1602 if (async_tx_test_ack(&d40d->txd)) {
1603 d40_desc_remove(d40d);
1604 d40_desc_free(d40c, d40d);
1605 } else if (!d40d->is_in_client_list) {
1606 d40_desc_remove(d40d);
1607 d40_lcla_free_all(d40c, d40d);
1608 list_add_tail(&d40d->node, &d40c->client);
1609 d40d->is_in_client_list = true;
1610 }
1611 }
1612
1613 d40c->pending_tx--;
1614
1615 if (d40c->pending_tx)
1616 tasklet_schedule(&d40c->tasklet);
1617
1618 spin_unlock_irqrestore(&d40c->lock, flags);
1619
1620 if (callback_active)
1621 dmaengine_desc_callback_invoke(&cb, NULL);
1622
1623 return;
1624 check_pending_tx:
1625
1626 if (d40c->pending_tx > 0)
1627 d40c->pending_tx--;
1628 spin_unlock_irqrestore(&d40c->lock, flags);
1629}
1630
1631static irqreturn_t d40_handle_interrupt(int irq, void *data)
1632{
1633 int i;
1634 u32 idx;
1635 u32 row;
1636 long chan = -1;
1637 struct d40_chan *d40c;
1638 unsigned long flags;
1639 struct d40_base *base = data;
1640 u32 regs[base->gen_dmac.il_size];
1641 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1642 u32 il_size = base->gen_dmac.il_size;
1643
1644 spin_lock_irqsave(&base->interrupt_lock, flags);
1645
1646
1647 for (i = 0; i < il_size; i++)
1648 regs[i] = readl(base->virtbase + il[i].src);
1649
1650 for (;;) {
1651
1652 chan = find_next_bit((unsigned long *)regs,
1653 BITS_PER_LONG * il_size, chan + 1);
1654
1655
1656 if (chan == BITS_PER_LONG * il_size)
1657 break;
1658
1659 row = chan / BITS_PER_LONG;
1660 idx = chan & (BITS_PER_LONG - 1);
1661
1662 if (il[row].offset == D40_PHY_CHAN)
1663 d40c = base->lookup_phy_chans[idx];
1664 else
1665 d40c = base->lookup_log_chans[il[row].offset + idx];
1666
1667 if (!d40c) {
1668
1669
1670
1671
1672 continue;
1673 }
1674
1675
1676 writel(BIT(idx), base->virtbase + il[row].clr);
1677
1678 spin_lock(&d40c->lock);
1679
1680 if (!il[row].is_error)
1681 dma_tc_handle(d40c);
1682 else
1683 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1684 chan, il[row].offset, idx);
1685
1686 spin_unlock(&d40c->lock);
1687 }
1688
1689 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1690
1691 return IRQ_HANDLED;
1692}
1693
1694static int d40_validate_conf(struct d40_chan *d40c,
1695 struct stedma40_chan_cfg *conf)
1696{
1697 int res = 0;
1698 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1699
1700 if (!conf->dir) {
1701 chan_err(d40c, "Invalid direction.\n");
1702 res = -EINVAL;
1703 }
1704
1705 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1706 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1707 (conf->dev_type < 0)) {
1708 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1709 res = -EINVAL;
1710 }
1711
1712 if (conf->dir == DMA_DEV_TO_DEV) {
1713
1714
1715
1716
1717 chan_err(d40c, "periph to periph not supported\n");
1718 res = -EINVAL;
1719 }
1720
1721 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1722 conf->src_info.data_width !=
1723 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1724 conf->dst_info.data_width) {
1725
1726
1727
1728
1729
1730 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1731 res = -EINVAL;
1732 }
1733
1734 return res;
1735}
1736
1737static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1738 bool is_src, int log_event_line, bool is_log,
1739 bool *first_user)
1740{
1741 unsigned long flags;
1742 spin_lock_irqsave(&phy->lock, flags);
1743
1744 *first_user = ((phy->allocated_src | phy->allocated_dst)
1745 == D40_ALLOC_FREE);
1746
1747 if (!is_log) {
1748
1749 if (phy->allocated_src == D40_ALLOC_FREE &&
1750 phy->allocated_dst == D40_ALLOC_FREE) {
1751 phy->allocated_dst = D40_ALLOC_PHY;
1752 phy->allocated_src = D40_ALLOC_PHY;
1753 goto found_unlock;
1754 } else
1755 goto not_found_unlock;
1756 }
1757
1758
1759 if (is_src) {
1760 if (phy->allocated_src == D40_ALLOC_PHY)
1761 goto not_found_unlock;
1762
1763 if (phy->allocated_src == D40_ALLOC_FREE)
1764 phy->allocated_src = D40_ALLOC_LOG_FREE;
1765
1766 if (!(phy->allocated_src & BIT(log_event_line))) {
1767 phy->allocated_src |= BIT(log_event_line);
1768 goto found_unlock;
1769 } else
1770 goto not_found_unlock;
1771 } else {
1772 if (phy->allocated_dst == D40_ALLOC_PHY)
1773 goto not_found_unlock;
1774
1775 if (phy->allocated_dst == D40_ALLOC_FREE)
1776 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1777
1778 if (!(phy->allocated_dst & BIT(log_event_line))) {
1779 phy->allocated_dst |= BIT(log_event_line);
1780 goto found_unlock;
1781 }
1782 }
1783 not_found_unlock:
1784 spin_unlock_irqrestore(&phy->lock, flags);
1785 return false;
1786 found_unlock:
1787 spin_unlock_irqrestore(&phy->lock, flags);
1788 return true;
1789}
1790
1791static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1792 int log_event_line)
1793{
1794 unsigned long flags;
1795 bool is_free = false;
1796
1797 spin_lock_irqsave(&phy->lock, flags);
1798 if (!log_event_line) {
1799 phy->allocated_dst = D40_ALLOC_FREE;
1800 phy->allocated_src = D40_ALLOC_FREE;
1801 is_free = true;
1802 goto unlock;
1803 }
1804
1805
1806 if (is_src) {
1807 phy->allocated_src &= ~BIT(log_event_line);
1808 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1809 phy->allocated_src = D40_ALLOC_FREE;
1810 } else {
1811 phy->allocated_dst &= ~BIT(log_event_line);
1812 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1813 phy->allocated_dst = D40_ALLOC_FREE;
1814 }
1815
1816 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1817 D40_ALLOC_FREE);
1818 unlock:
1819 spin_unlock_irqrestore(&phy->lock, flags);
1820
1821 return is_free;
1822}
1823
1824static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1825{
1826 int dev_type = d40c->dma_cfg.dev_type;
1827 int event_group;
1828 int event_line;
1829 struct d40_phy_res *phys;
1830 int i;
1831 int j;
1832 int log_num;
1833 int num_phy_chans;
1834 bool is_src;
1835 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1836
1837 phys = d40c->base->phy_res;
1838 num_phy_chans = d40c->base->num_phy_chans;
1839
1840 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1841 log_num = 2 * dev_type;
1842 is_src = true;
1843 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1844 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1845
1846 log_num = 2 * dev_type + 1;
1847 is_src = false;
1848 } else
1849 return -EINVAL;
1850
1851 event_group = D40_TYPE_TO_GROUP(dev_type);
1852 event_line = D40_TYPE_TO_EVENT(dev_type);
1853
1854 if (!is_log) {
1855 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1856
1857 if (d40c->dma_cfg.use_fixed_channel) {
1858 i = d40c->dma_cfg.phy_channel;
1859 if (d40_alloc_mask_set(&phys[i], is_src,
1860 0, is_log,
1861 first_phy_user))
1862 goto found_phy;
1863 } else {
1864 for (i = 0; i < num_phy_chans; i++) {
1865 if (d40_alloc_mask_set(&phys[i], is_src,
1866 0, is_log,
1867 first_phy_user))
1868 goto found_phy;
1869 }
1870 }
1871 } else
1872 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1873 int phy_num = j + event_group * 2;
1874 for (i = phy_num; i < phy_num + 2; i++) {
1875 if (d40_alloc_mask_set(&phys[i],
1876 is_src,
1877 0,
1878 is_log,
1879 first_phy_user))
1880 goto found_phy;
1881 }
1882 }
1883 return -EINVAL;
1884found_phy:
1885 d40c->phy_chan = &phys[i];
1886 d40c->log_num = D40_PHY_CHAN;
1887 goto out;
1888 }
1889 if (dev_type == -1)
1890 return -EINVAL;
1891
1892
1893 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1894 int phy_num = j + event_group * 2;
1895
1896 if (d40c->dma_cfg.use_fixed_channel) {
1897 i = d40c->dma_cfg.phy_channel;
1898
1899 if ((i != phy_num) && (i != phy_num + 1)) {
1900 dev_err(chan2dev(d40c),
1901 "invalid fixed phy channel %d\n", i);
1902 return -EINVAL;
1903 }
1904
1905 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1906 is_log, first_phy_user))
1907 goto found_log;
1908
1909 dev_err(chan2dev(d40c),
1910 "could not allocate fixed phy channel %d\n", i);
1911 return -EINVAL;
1912 }
1913
1914
1915
1916
1917
1918
1919 if (is_src) {
1920 for (i = phy_num; i < phy_num + 2; i++) {
1921 if (d40_alloc_mask_set(&phys[i], is_src,
1922 event_line, is_log,
1923 first_phy_user))
1924 goto found_log;
1925 }
1926 } else {
1927 for (i = phy_num + 1; i >= phy_num; i--) {
1928 if (d40_alloc_mask_set(&phys[i], is_src,
1929 event_line, is_log,
1930 first_phy_user))
1931 goto found_log;
1932 }
1933 }
1934 }
1935 return -EINVAL;
1936
1937found_log:
1938 d40c->phy_chan = &phys[i];
1939 d40c->log_num = log_num;
1940out:
1941
1942 if (is_log)
1943 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1944 else
1945 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1946
1947 return 0;
1948
1949}
1950
1951static int d40_config_memcpy(struct d40_chan *d40c)
1952{
1953 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1954
1955 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1956 d40c->dma_cfg = dma40_memcpy_conf_log;
1957 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1958
1959 d40_log_cfg(&d40c->dma_cfg,
1960 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1961
1962 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1963 dma_has_cap(DMA_SLAVE, cap)) {
1964 d40c->dma_cfg = dma40_memcpy_conf_phy;
1965
1966
1967 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1968
1969
1970 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1971 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1972
1973 } else {
1974 chan_err(d40c, "No memcpy\n");
1975 return -EINVAL;
1976 }
1977
1978 return 0;
1979}
1980
1981static int d40_free_dma(struct d40_chan *d40c)
1982{
1983
1984 int res = 0;
1985 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1986 struct d40_phy_res *phy = d40c->phy_chan;
1987 bool is_src;
1988
1989
1990 d40_term_all(d40c);
1991
1992 if (phy == NULL) {
1993 chan_err(d40c, "phy == null\n");
1994 return -EINVAL;
1995 }
1996
1997 if (phy->allocated_src == D40_ALLOC_FREE &&
1998 phy->allocated_dst == D40_ALLOC_FREE) {
1999 chan_err(d40c, "channel already free\n");
2000 return -EINVAL;
2001 }
2002
2003 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2004 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2005 is_src = false;
2006 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2007 is_src = true;
2008 else {
2009 chan_err(d40c, "Unknown direction\n");
2010 return -EINVAL;
2011 }
2012
2013 pm_runtime_get_sync(d40c->base->dev);
2014 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2015 if (res) {
2016 chan_err(d40c, "stop failed\n");
2017 goto mark_last_busy;
2018 }
2019
2020 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2021
2022 if (chan_is_logical(d40c))
2023 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2024 else
2025 d40c->base->lookup_phy_chans[phy->num] = NULL;
2026
2027 if (d40c->busy) {
2028 pm_runtime_mark_last_busy(d40c->base->dev);
2029 pm_runtime_put_autosuspend(d40c->base->dev);
2030 }
2031
2032 d40c->busy = false;
2033 d40c->phy_chan = NULL;
2034 d40c->configured = false;
2035 mark_last_busy:
2036 pm_runtime_mark_last_busy(d40c->base->dev);
2037 pm_runtime_put_autosuspend(d40c->base->dev);
2038 return res;
2039}
2040
2041static bool d40_is_paused(struct d40_chan *d40c)
2042{
2043 void __iomem *chanbase = chan_base(d40c);
2044 bool is_paused = false;
2045 unsigned long flags;
2046 void __iomem *active_reg;
2047 u32 status;
2048 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2049
2050 spin_lock_irqsave(&d40c->lock, flags);
2051
2052 if (chan_is_physical(d40c)) {
2053 if (d40c->phy_chan->num % 2 == 0)
2054 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2055 else
2056 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2057
2058 status = (readl(active_reg) &
2059 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2060 D40_CHAN_POS(d40c->phy_chan->num);
2061 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2062 is_paused = true;
2063 goto unlock;
2064 }
2065
2066 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2067 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2068 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2069 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2070 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2071 } else {
2072 chan_err(d40c, "Unknown direction\n");
2073 goto unlock;
2074 }
2075
2076 status = (status & D40_EVENTLINE_MASK(event)) >>
2077 D40_EVENTLINE_POS(event);
2078
2079 if (status != D40_DMA_RUN)
2080 is_paused = true;
2081 unlock:
2082 spin_unlock_irqrestore(&d40c->lock, flags);
2083 return is_paused;
2084
2085}
2086
2087static u32 stedma40_residue(struct dma_chan *chan)
2088{
2089 struct d40_chan *d40c =
2090 container_of(chan, struct d40_chan, chan);
2091 u32 bytes_left;
2092 unsigned long flags;
2093
2094 spin_lock_irqsave(&d40c->lock, flags);
2095 bytes_left = d40_residue(d40c);
2096 spin_unlock_irqrestore(&d40c->lock, flags);
2097
2098 return bytes_left;
2099}
2100
2101static int
2102d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2103 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2104 unsigned int sg_len, dma_addr_t src_dev_addr,
2105 dma_addr_t dst_dev_addr)
2106{
2107 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2108 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2109 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2110 int ret;
2111
2112 ret = d40_log_sg_to_lli(sg_src, sg_len,
2113 src_dev_addr,
2114 desc->lli_log.src,
2115 chan->log_def.lcsp1,
2116 src_info->data_width,
2117 dst_info->data_width);
2118
2119 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2120 dst_dev_addr,
2121 desc->lli_log.dst,
2122 chan->log_def.lcsp3,
2123 dst_info->data_width,
2124 src_info->data_width);
2125
2126 return ret < 0 ? ret : 0;
2127}
2128
2129static int
2130d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2131 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2132 unsigned int sg_len, dma_addr_t src_dev_addr,
2133 dma_addr_t dst_dev_addr)
2134{
2135 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2136 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2137 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2138 unsigned long flags = 0;
2139 int ret;
2140
2141 if (desc->cyclic)
2142 flags |= LLI_CYCLIC | LLI_TERM_INT;
2143
2144 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2145 desc->lli_phy.src,
2146 virt_to_phys(desc->lli_phy.src),
2147 chan->src_def_cfg,
2148 src_info, dst_info, flags);
2149
2150 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2151 desc->lli_phy.dst,
2152 virt_to_phys(desc->lli_phy.dst),
2153 chan->dst_def_cfg,
2154 dst_info, src_info, flags);
2155
2156 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2157 desc->lli_pool.size, DMA_TO_DEVICE);
2158
2159 return ret < 0 ? ret : 0;
2160}
2161
2162static struct d40_desc *
2163d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2164 unsigned int sg_len, unsigned long dma_flags)
2165{
2166 struct stedma40_chan_cfg *cfg;
2167 struct d40_desc *desc;
2168 int ret;
2169
2170 desc = d40_desc_get(chan);
2171 if (!desc)
2172 return NULL;
2173
2174 cfg = &chan->dma_cfg;
2175 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2176 cfg->dst_info.data_width);
2177 if (desc->lli_len < 0) {
2178 chan_err(chan, "Unaligned size\n");
2179 goto free_desc;
2180 }
2181
2182 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2183 if (ret < 0) {
2184 chan_err(chan, "Could not allocate lli\n");
2185 goto free_desc;
2186 }
2187
2188 desc->lli_current = 0;
2189 desc->txd.flags = dma_flags;
2190 desc->txd.tx_submit = d40_tx_submit;
2191
2192 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2193
2194 return desc;
2195 free_desc:
2196 d40_desc_free(chan, desc);
2197 return NULL;
2198}
2199
2200static struct dma_async_tx_descriptor *
2201d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2202 struct scatterlist *sg_dst, unsigned int sg_len,
2203 enum dma_transfer_direction direction, unsigned long dma_flags)
2204{
2205 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2206 dma_addr_t src_dev_addr;
2207 dma_addr_t dst_dev_addr;
2208 struct d40_desc *desc;
2209 unsigned long flags;
2210 int ret;
2211
2212 if (!chan->phy_chan) {
2213 chan_err(chan, "Cannot prepare unallocated channel\n");
2214 return NULL;
2215 }
2216
2217 spin_lock_irqsave(&chan->lock, flags);
2218
2219 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2220 if (desc == NULL)
2221 goto unlock;
2222
2223 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2224 desc->cyclic = true;
2225
2226 src_dev_addr = 0;
2227 dst_dev_addr = 0;
2228 if (direction == DMA_DEV_TO_MEM)
2229 src_dev_addr = chan->runtime_addr;
2230 else if (direction == DMA_MEM_TO_DEV)
2231 dst_dev_addr = chan->runtime_addr;
2232
2233 if (chan_is_logical(chan))
2234 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2235 sg_len, src_dev_addr, dst_dev_addr);
2236 else
2237 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2238 sg_len, src_dev_addr, dst_dev_addr);
2239
2240 if (ret) {
2241 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2242 chan_is_logical(chan) ? "log" : "phy", ret);
2243 goto free_desc;
2244 }
2245
2246
2247
2248
2249
2250 list_add_tail(&desc->node, &chan->prepare_queue);
2251
2252 spin_unlock_irqrestore(&chan->lock, flags);
2253
2254 return &desc->txd;
2255 free_desc:
2256 d40_desc_free(chan, desc);
2257 unlock:
2258 spin_unlock_irqrestore(&chan->lock, flags);
2259 return NULL;
2260}
2261
2262bool stedma40_filter(struct dma_chan *chan, void *data)
2263{
2264 struct stedma40_chan_cfg *info = data;
2265 struct d40_chan *d40c =
2266 container_of(chan, struct d40_chan, chan);
2267 int err;
2268
2269 if (data) {
2270 err = d40_validate_conf(d40c, info);
2271 if (!err)
2272 d40c->dma_cfg = *info;
2273 } else
2274 err = d40_config_memcpy(d40c);
2275
2276 if (!err)
2277 d40c->configured = true;
2278
2279 return err == 0;
2280}
2281EXPORT_SYMBOL(stedma40_filter);
2282
2283static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2284{
2285 bool realtime = d40c->dma_cfg.realtime;
2286 bool highprio = d40c->dma_cfg.high_priority;
2287 u32 rtreg;
2288 u32 event = D40_TYPE_TO_EVENT(dev_type);
2289 u32 group = D40_TYPE_TO_GROUP(dev_type);
2290 u32 bit = BIT(event);
2291 u32 prioreg;
2292 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2293
2294 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2295
2296
2297
2298
2299
2300
2301
2302
2303 if (!src && chan_is_logical(d40c))
2304 highprio = false;
2305
2306 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2307
2308
2309 if (!src)
2310 bit <<= 16;
2311
2312 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2313 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2314}
2315
2316static void d40_set_prio_realtime(struct d40_chan *d40c)
2317{
2318 if (d40c->base->rev < 3)
2319 return;
2320
2321 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2322 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2323 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2324
2325 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2326 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2327 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2328}
2329
2330#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2331#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2332#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2333#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2334#define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
2335
2336static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2337 struct of_dma *ofdma)
2338{
2339 struct stedma40_chan_cfg cfg;
2340 dma_cap_mask_t cap;
2341 u32 flags;
2342
2343 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2344
2345 dma_cap_zero(cap);
2346 dma_cap_set(DMA_SLAVE, cap);
2347
2348 cfg.dev_type = dma_spec->args[0];
2349 flags = dma_spec->args[2];
2350
2351 switch (D40_DT_FLAGS_MODE(flags)) {
2352 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2353 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2354 }
2355
2356 switch (D40_DT_FLAGS_DIR(flags)) {
2357 case 0:
2358 cfg.dir = DMA_MEM_TO_DEV;
2359 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2360 break;
2361 case 1:
2362 cfg.dir = DMA_DEV_TO_MEM;
2363 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2364 break;
2365 }
2366
2367 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2368 cfg.phy_channel = dma_spec->args[1];
2369 cfg.use_fixed_channel = true;
2370 }
2371
2372 if (D40_DT_FLAGS_HIGH_PRIO(flags))
2373 cfg.high_priority = true;
2374
2375 return dma_request_channel(cap, stedma40_filter, &cfg);
2376}
2377
2378
2379static int d40_alloc_chan_resources(struct dma_chan *chan)
2380{
2381 int err;
2382 unsigned long flags;
2383 struct d40_chan *d40c =
2384 container_of(chan, struct d40_chan, chan);
2385 bool is_free_phy;
2386 spin_lock_irqsave(&d40c->lock, flags);
2387
2388 dma_cookie_init(chan);
2389
2390
2391 if (!d40c->configured) {
2392 err = d40_config_memcpy(d40c);
2393 if (err) {
2394 chan_err(d40c, "Failed to configure memcpy channel\n");
2395 goto mark_last_busy;
2396 }
2397 }
2398
2399 err = d40_allocate_channel(d40c, &is_free_phy);
2400 if (err) {
2401 chan_err(d40c, "Failed to allocate channel\n");
2402 d40c->configured = false;
2403 goto mark_last_busy;
2404 }
2405
2406 pm_runtime_get_sync(d40c->base->dev);
2407
2408 d40_set_prio_realtime(d40c);
2409
2410 if (chan_is_logical(d40c)) {
2411 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2412 d40c->lcpa = d40c->base->lcpa_base +
2413 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2414 else
2415 d40c->lcpa = d40c->base->lcpa_base +
2416 d40c->dma_cfg.dev_type *
2417 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2418
2419
2420 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2421 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2422 }
2423
2424 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2425 chan_is_logical(d40c) ? "logical" : "physical",
2426 d40c->phy_chan->num,
2427 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2428
2429
2430
2431
2432
2433
2434
2435 if (is_free_phy)
2436 d40_config_write(d40c);
2437 mark_last_busy:
2438 pm_runtime_mark_last_busy(d40c->base->dev);
2439 pm_runtime_put_autosuspend(d40c->base->dev);
2440 spin_unlock_irqrestore(&d40c->lock, flags);
2441 return err;
2442}
2443
2444static void d40_free_chan_resources(struct dma_chan *chan)
2445{
2446 struct d40_chan *d40c =
2447 container_of(chan, struct d40_chan, chan);
2448 int err;
2449 unsigned long flags;
2450
2451 if (d40c->phy_chan == NULL) {
2452 chan_err(d40c, "Cannot free unallocated channel\n");
2453 return;
2454 }
2455
2456 spin_lock_irqsave(&d40c->lock, flags);
2457
2458 err = d40_free_dma(d40c);
2459
2460 if (err)
2461 chan_err(d40c, "Failed to free channel\n");
2462 spin_unlock_irqrestore(&d40c->lock, flags);
2463}
2464
2465static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2466 dma_addr_t dst,
2467 dma_addr_t src,
2468 size_t size,
2469 unsigned long dma_flags)
2470{
2471 struct scatterlist dst_sg;
2472 struct scatterlist src_sg;
2473
2474 sg_init_table(&dst_sg, 1);
2475 sg_init_table(&src_sg, 1);
2476
2477 sg_dma_address(&dst_sg) = dst;
2478 sg_dma_address(&src_sg) = src;
2479
2480 sg_dma_len(&dst_sg) = size;
2481 sg_dma_len(&src_sg) = size;
2482
2483 return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2484 DMA_MEM_TO_MEM, dma_flags);
2485}
2486
2487static struct dma_async_tx_descriptor *
2488d40_prep_memcpy_sg(struct dma_chan *chan,
2489 struct scatterlist *dst_sg, unsigned int dst_nents,
2490 struct scatterlist *src_sg, unsigned int src_nents,
2491 unsigned long dma_flags)
2492{
2493 if (dst_nents != src_nents)
2494 return NULL;
2495
2496 return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
2497 DMA_MEM_TO_MEM, dma_flags);
2498}
2499
2500static struct dma_async_tx_descriptor *
2501d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2502 unsigned int sg_len, enum dma_transfer_direction direction,
2503 unsigned long dma_flags, void *context)
2504{
2505 if (!is_slave_direction(direction))
2506 return NULL;
2507
2508 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2509}
2510
2511static struct dma_async_tx_descriptor *
2512dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2513 size_t buf_len, size_t period_len,
2514 enum dma_transfer_direction direction, unsigned long flags)
2515{
2516 unsigned int periods = buf_len / period_len;
2517 struct dma_async_tx_descriptor *txd;
2518 struct scatterlist *sg;
2519 int i;
2520
2521 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2522 if (!sg)
2523 return NULL;
2524
2525 for (i = 0; i < periods; i++) {
2526 sg_dma_address(&sg[i]) = dma_addr;
2527 sg_dma_len(&sg[i]) = period_len;
2528 dma_addr += period_len;
2529 }
2530
2531 sg[periods].offset = 0;
2532 sg_dma_len(&sg[periods]) = 0;
2533 sg[periods].page_link =
2534 ((unsigned long)sg | 0x01) & ~0x02;
2535
2536 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2537 DMA_PREP_INTERRUPT);
2538
2539 kfree(sg);
2540
2541 return txd;
2542}
2543
2544static enum dma_status d40_tx_status(struct dma_chan *chan,
2545 dma_cookie_t cookie,
2546 struct dma_tx_state *txstate)
2547{
2548 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2549 enum dma_status ret;
2550
2551 if (d40c->phy_chan == NULL) {
2552 chan_err(d40c, "Cannot read status of unallocated channel\n");
2553 return -EINVAL;
2554 }
2555
2556 ret = dma_cookie_status(chan, cookie, txstate);
2557 if (ret != DMA_COMPLETE && txstate)
2558 dma_set_residue(txstate, stedma40_residue(chan));
2559
2560 if (d40_is_paused(d40c))
2561 ret = DMA_PAUSED;
2562
2563 return ret;
2564}
2565
2566static void d40_issue_pending(struct dma_chan *chan)
2567{
2568 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2569 unsigned long flags;
2570
2571 if (d40c->phy_chan == NULL) {
2572 chan_err(d40c, "Channel is not allocated!\n");
2573 return;
2574 }
2575
2576 spin_lock_irqsave(&d40c->lock, flags);
2577
2578 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2579
2580
2581 if (!d40c->busy)
2582 (void) d40_queue_start(d40c);
2583
2584 spin_unlock_irqrestore(&d40c->lock, flags);
2585}
2586
2587static int d40_terminate_all(struct dma_chan *chan)
2588{
2589 unsigned long flags;
2590 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2591 int ret;
2592
2593 if (d40c->phy_chan == NULL) {
2594 chan_err(d40c, "Channel is not allocated!\n");
2595 return -EINVAL;
2596 }
2597
2598 spin_lock_irqsave(&d40c->lock, flags);
2599
2600 pm_runtime_get_sync(d40c->base->dev);
2601 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2602 if (ret)
2603 chan_err(d40c, "Failed to stop channel\n");
2604
2605 d40_term_all(d40c);
2606 pm_runtime_mark_last_busy(d40c->base->dev);
2607 pm_runtime_put_autosuspend(d40c->base->dev);
2608 if (d40c->busy) {
2609 pm_runtime_mark_last_busy(d40c->base->dev);
2610 pm_runtime_put_autosuspend(d40c->base->dev);
2611 }
2612 d40c->busy = false;
2613
2614 spin_unlock_irqrestore(&d40c->lock, flags);
2615 return 0;
2616}
2617
2618static int
2619dma40_config_to_halfchannel(struct d40_chan *d40c,
2620 struct stedma40_half_channel_info *info,
2621 u32 maxburst)
2622{
2623 int psize;
2624
2625 if (chan_is_logical(d40c)) {
2626 if (maxburst >= 16)
2627 psize = STEDMA40_PSIZE_LOG_16;
2628 else if (maxburst >= 8)
2629 psize = STEDMA40_PSIZE_LOG_8;
2630 else if (maxburst >= 4)
2631 psize = STEDMA40_PSIZE_LOG_4;
2632 else
2633 psize = STEDMA40_PSIZE_LOG_1;
2634 } else {
2635 if (maxburst >= 16)
2636 psize = STEDMA40_PSIZE_PHY_16;
2637 else if (maxburst >= 8)
2638 psize = STEDMA40_PSIZE_PHY_8;
2639 else if (maxburst >= 4)
2640 psize = STEDMA40_PSIZE_PHY_4;
2641 else
2642 psize = STEDMA40_PSIZE_PHY_1;
2643 }
2644
2645 info->psize = psize;
2646 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2647
2648 return 0;
2649}
2650
2651
2652static int d40_set_runtime_config(struct dma_chan *chan,
2653 struct dma_slave_config *config)
2654{
2655 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2656 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2657 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2658 dma_addr_t config_addr;
2659 u32 src_maxburst, dst_maxburst;
2660 int ret;
2661
2662 if (d40c->phy_chan == NULL) {
2663 chan_err(d40c, "Channel is not allocated!\n");
2664 return -EINVAL;
2665 }
2666
2667 src_addr_width = config->src_addr_width;
2668 src_maxburst = config->src_maxburst;
2669 dst_addr_width = config->dst_addr_width;
2670 dst_maxburst = config->dst_maxburst;
2671
2672 if (config->direction == DMA_DEV_TO_MEM) {
2673 config_addr = config->src_addr;
2674
2675 if (cfg->dir != DMA_DEV_TO_MEM)
2676 dev_dbg(d40c->base->dev,
2677 "channel was not configured for peripheral "
2678 "to memory transfer (%d) overriding\n",
2679 cfg->dir);
2680 cfg->dir = DMA_DEV_TO_MEM;
2681
2682
2683 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2684 dst_addr_width = src_addr_width;
2685 if (dst_maxburst == 0)
2686 dst_maxburst = src_maxburst;
2687
2688 } else if (config->direction == DMA_MEM_TO_DEV) {
2689 config_addr = config->dst_addr;
2690
2691 if (cfg->dir != DMA_MEM_TO_DEV)
2692 dev_dbg(d40c->base->dev,
2693 "channel was not configured for memory "
2694 "to peripheral transfer (%d) overriding\n",
2695 cfg->dir);
2696 cfg->dir = DMA_MEM_TO_DEV;
2697
2698
2699 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2700 src_addr_width = dst_addr_width;
2701 if (src_maxburst == 0)
2702 src_maxburst = dst_maxburst;
2703 } else {
2704 dev_err(d40c->base->dev,
2705 "unrecognized channel direction %d\n",
2706 config->direction);
2707 return -EINVAL;
2708 }
2709
2710 if (config_addr <= 0) {
2711 dev_err(d40c->base->dev, "no address supplied\n");
2712 return -EINVAL;
2713 }
2714
2715 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2716 dev_err(d40c->base->dev,
2717 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2718 src_maxburst,
2719 src_addr_width,
2720 dst_maxburst,
2721 dst_addr_width);
2722 return -EINVAL;
2723 }
2724
2725 if (src_maxburst > 16) {
2726 src_maxburst = 16;
2727 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2728 } else if (dst_maxburst > 16) {
2729 dst_maxburst = 16;
2730 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2731 }
2732
2733
2734 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2735 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2736 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2737 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2738 !is_power_of_2(src_addr_width) ||
2739 !is_power_of_2(dst_addr_width))
2740 return -EINVAL;
2741
2742 cfg->src_info.data_width = src_addr_width;
2743 cfg->dst_info.data_width = dst_addr_width;
2744
2745 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2746 src_maxburst);
2747 if (ret)
2748 return ret;
2749
2750 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2751 dst_maxburst);
2752 if (ret)
2753 return ret;
2754
2755
2756 if (chan_is_logical(d40c))
2757 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2758 else
2759 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2760
2761
2762 d40c->runtime_addr = config_addr;
2763 d40c->runtime_direction = config->direction;
2764 dev_dbg(d40c->base->dev,
2765 "configured channel %s for %s, data width %d/%d, "
2766 "maxburst %d/%d elements, LE, no flow control\n",
2767 dma_chan_name(chan),
2768 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2769 src_addr_width, dst_addr_width,
2770 src_maxburst, dst_maxburst);
2771
2772 return 0;
2773}
2774
2775
2776
2777static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2778 struct d40_chan *chans, int offset,
2779 int num_chans)
2780{
2781 int i = 0;
2782 struct d40_chan *d40c;
2783
2784 INIT_LIST_HEAD(&dma->channels);
2785
2786 for (i = offset; i < offset + num_chans; i++) {
2787 d40c = &chans[i];
2788 d40c->base = base;
2789 d40c->chan.device = dma;
2790
2791 spin_lock_init(&d40c->lock);
2792
2793 d40c->log_num = D40_PHY_CHAN;
2794
2795 INIT_LIST_HEAD(&d40c->done);
2796 INIT_LIST_HEAD(&d40c->active);
2797 INIT_LIST_HEAD(&d40c->queue);
2798 INIT_LIST_HEAD(&d40c->pending_queue);
2799 INIT_LIST_HEAD(&d40c->client);
2800 INIT_LIST_HEAD(&d40c->prepare_queue);
2801
2802 tasklet_init(&d40c->tasklet, dma_tasklet,
2803 (unsigned long) d40c);
2804
2805 list_add_tail(&d40c->chan.device_node,
2806 &dma->channels);
2807 }
2808}
2809
2810static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2811{
2812 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2813 dev->device_prep_slave_sg = d40_prep_slave_sg;
2814
2815 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2816 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2817
2818
2819
2820
2821
2822 dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2823 }
2824
2825 if (dma_has_cap(DMA_SG, dev->cap_mask))
2826 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2827
2828 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2829 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2830
2831 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2832 dev->device_free_chan_resources = d40_free_chan_resources;
2833 dev->device_issue_pending = d40_issue_pending;
2834 dev->device_tx_status = d40_tx_status;
2835 dev->device_config = d40_set_runtime_config;
2836 dev->device_pause = d40_pause;
2837 dev->device_resume = d40_resume;
2838 dev->device_terminate_all = d40_terminate_all;
2839 dev->dev = base->dev;
2840}
2841
2842static int __init d40_dmaengine_init(struct d40_base *base,
2843 int num_reserved_chans)
2844{
2845 int err ;
2846
2847 d40_chan_init(base, &base->dma_slave, base->log_chans,
2848 0, base->num_log_chans);
2849
2850 dma_cap_zero(base->dma_slave.cap_mask);
2851 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2852 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2853
2854 d40_ops_init(base, &base->dma_slave);
2855
2856 err = dma_async_device_register(&base->dma_slave);
2857
2858 if (err) {
2859 d40_err(base->dev, "Failed to register slave channels\n");
2860 goto exit;
2861 }
2862
2863 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2864 base->num_log_chans, base->num_memcpy_chans);
2865
2866 dma_cap_zero(base->dma_memcpy.cap_mask);
2867 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2868 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2869
2870 d40_ops_init(base, &base->dma_memcpy);
2871
2872 err = dma_async_device_register(&base->dma_memcpy);
2873
2874 if (err) {
2875 d40_err(base->dev,
2876 "Failed to register memcpy only channels\n");
2877 goto unregister_slave;
2878 }
2879
2880 d40_chan_init(base, &base->dma_both, base->phy_chans,
2881 0, num_reserved_chans);
2882
2883 dma_cap_zero(base->dma_both.cap_mask);
2884 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2885 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2886 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2887 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2888
2889 d40_ops_init(base, &base->dma_both);
2890 err = dma_async_device_register(&base->dma_both);
2891
2892 if (err) {
2893 d40_err(base->dev,
2894 "Failed to register logical and physical capable channels\n");
2895 goto unregister_memcpy;
2896 }
2897 return 0;
2898 unregister_memcpy:
2899 dma_async_device_unregister(&base->dma_memcpy);
2900 unregister_slave:
2901 dma_async_device_unregister(&base->dma_slave);
2902 exit:
2903 return err;
2904}
2905
2906
2907#ifdef CONFIG_PM_SLEEP
2908static int dma40_suspend(struct device *dev)
2909{
2910 struct platform_device *pdev = to_platform_device(dev);
2911 struct d40_base *base = platform_get_drvdata(pdev);
2912 int ret;
2913
2914 ret = pm_runtime_force_suspend(dev);
2915 if (ret)
2916 return ret;
2917
2918 if (base->lcpa_regulator)
2919 ret = regulator_disable(base->lcpa_regulator);
2920 return ret;
2921}
2922
2923static int dma40_resume(struct device *dev)
2924{
2925 struct platform_device *pdev = to_platform_device(dev);
2926 struct d40_base *base = platform_get_drvdata(pdev);
2927 int ret = 0;
2928
2929 if (base->lcpa_regulator) {
2930 ret = regulator_enable(base->lcpa_regulator);
2931 if (ret)
2932 return ret;
2933 }
2934
2935 return pm_runtime_force_resume(dev);
2936}
2937#endif
2938
2939#ifdef CONFIG_PM
2940static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2941 u32 *regaddr, int num, bool save)
2942{
2943 int i;
2944
2945 for (i = 0; i < num; i++) {
2946 void __iomem *addr = baseaddr + regaddr[i];
2947
2948 if (save)
2949 backup[i] = readl_relaxed(addr);
2950 else
2951 writel_relaxed(backup[i], addr);
2952 }
2953}
2954
2955static void d40_save_restore_registers(struct d40_base *base, bool save)
2956{
2957 int i;
2958
2959
2960 for (i = 0; i < base->num_phy_chans; i++) {
2961 void __iomem *addr;
2962 int idx;
2963
2964 if (base->phy_res[i].reserved)
2965 continue;
2966
2967 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2968 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2969
2970 dma40_backup(addr, &base->reg_val_backup_chan[idx],
2971 d40_backup_regs_chan,
2972 ARRAY_SIZE(d40_backup_regs_chan),
2973 save);
2974 }
2975
2976
2977 dma40_backup(base->virtbase, base->reg_val_backup,
2978 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2979 save);
2980
2981
2982 if (base->gen_dmac.backup)
2983 dma40_backup(base->virtbase, base->reg_val_backup_v4,
2984 base->gen_dmac.backup,
2985 base->gen_dmac.backup_size,
2986 save);
2987}
2988
2989static int dma40_runtime_suspend(struct device *dev)
2990{
2991 struct platform_device *pdev = to_platform_device(dev);
2992 struct d40_base *base = platform_get_drvdata(pdev);
2993
2994 d40_save_restore_registers(base, true);
2995
2996
2997 if (base->rev != 1)
2998 writel_relaxed(base->gcc_pwr_off_mask,
2999 base->virtbase + D40_DREG_GCC);
3000
3001 return 0;
3002}
3003
3004static int dma40_runtime_resume(struct device *dev)
3005{
3006 struct platform_device *pdev = to_platform_device(dev);
3007 struct d40_base *base = platform_get_drvdata(pdev);
3008
3009 d40_save_restore_registers(base, false);
3010
3011 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3012 base->virtbase + D40_DREG_GCC);
3013 return 0;
3014}
3015#endif
3016
3017static const struct dev_pm_ops dma40_pm_ops = {
3018 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3019 SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3020 dma40_runtime_resume,
3021 NULL)
3022};
3023
3024
3025
3026static int __init d40_phy_res_init(struct d40_base *base)
3027{
3028 int i;
3029 int num_phy_chans_avail = 0;
3030 u32 val[2];
3031 int odd_even_bit = -2;
3032 int gcc = D40_DREG_GCC_ENA;
3033
3034 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3035 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3036
3037 for (i = 0; i < base->num_phy_chans; i++) {
3038 base->phy_res[i].num = i;
3039 odd_even_bit += 2 * ((i % 2) == 0);
3040 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3041
3042 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3043 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3044 base->phy_res[i].reserved = true;
3045 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3046 D40_DREG_GCC_SRC);
3047 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3048 D40_DREG_GCC_DST);
3049
3050
3051 } else {
3052 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3053 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3054 base->phy_res[i].reserved = false;
3055 num_phy_chans_avail++;
3056 }
3057 spin_lock_init(&base->phy_res[i].lock);
3058 }
3059
3060
3061 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3062 int chan = base->plat_data->disabled_channels[i];
3063
3064 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3065 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3066 base->phy_res[chan].reserved = true;
3067 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3068 D40_DREG_GCC_SRC);
3069 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3070 D40_DREG_GCC_DST);
3071 num_phy_chans_avail--;
3072 }
3073
3074
3075 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3076 int chan = base->plat_data->soft_lli_chans[i];
3077
3078 base->phy_res[chan].use_soft_lli = true;
3079 }
3080
3081 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3082 num_phy_chans_avail, base->num_phy_chans);
3083
3084
3085 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3086
3087 for (i = 0; i < base->num_phy_chans; i++) {
3088
3089 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3090 (val[0] & 0x3) != 1)
3091 dev_info(base->dev,
3092 "[%s] INFO: channel %d is misconfigured (%d)\n",
3093 __func__, i, val[0] & 0x3);
3094
3095 val[0] = val[0] >> 2;
3096 }
3097
3098
3099
3100
3101
3102
3103
3104 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3105 base->gcc_pwr_off_mask = gcc;
3106
3107 return num_phy_chans_avail;
3108}
3109
3110static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3111{
3112 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3113 struct clk *clk;
3114 void __iomem *virtbase;
3115 struct resource *res;
3116 struct d40_base *base;
3117 int num_log_chans;
3118 int num_phy_chans;
3119 int num_memcpy_chans;
3120 int clk_ret = -EINVAL;
3121 int i;
3122 u32 pid;
3123 u32 cid;
3124 u8 rev;
3125
3126 clk = clk_get(&pdev->dev, NULL);
3127 if (IS_ERR(clk)) {
3128 d40_err(&pdev->dev, "No matching clock found\n");
3129 goto check_prepare_enabled;
3130 }
3131
3132 clk_ret = clk_prepare_enable(clk);
3133 if (clk_ret) {
3134 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3135 goto disable_unprepare;
3136 }
3137
3138
3139 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3140 if (!res)
3141 goto disable_unprepare;
3142
3143 if (request_mem_region(res->start, resource_size(res),
3144 D40_NAME " I/O base") == NULL)
3145 goto release_region;
3146
3147 virtbase = ioremap(res->start, resource_size(res));
3148 if (!virtbase)
3149 goto release_region;
3150
3151
3152 for (pid = 0, i = 0; i < 4; i++)
3153 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3154 & 255) << (i * 8);
3155 for (cid = 0, i = 0; i < 4; i++)
3156 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3157 & 255) << (i * 8);
3158
3159 if (cid != AMBA_CID) {
3160 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3161 goto unmap_io;
3162 }
3163 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3164 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3165 AMBA_MANF_BITS(pid),
3166 AMBA_VENDOR_ST);
3167 goto unmap_io;
3168 }
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178 rev = AMBA_REV_BITS(pid);
3179 if (rev < 2) {
3180 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3181 goto unmap_io;
3182 }
3183
3184
3185 if (plat_data->num_of_phy_chans)
3186 num_phy_chans = plat_data->num_of_phy_chans;
3187 else
3188 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3189
3190
3191 if (plat_data->num_of_memcpy_chans)
3192 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3193 else
3194 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3195
3196 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3197
3198 dev_info(&pdev->dev,
3199 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3200 rev, &res->start, num_phy_chans, num_log_chans);
3201
3202 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3203 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3204 sizeof(struct d40_chan), GFP_KERNEL);
3205
3206 if (base == NULL)
3207 goto unmap_io;
3208
3209 base->rev = rev;
3210 base->clk = clk;
3211 base->num_memcpy_chans = num_memcpy_chans;
3212 base->num_phy_chans = num_phy_chans;
3213 base->num_log_chans = num_log_chans;
3214 base->phy_start = res->start;
3215 base->phy_size = resource_size(res);
3216 base->virtbase = virtbase;
3217 base->plat_data = plat_data;
3218 base->dev = &pdev->dev;
3219 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3220 base->log_chans = &base->phy_chans[num_phy_chans];
3221
3222 if (base->plat_data->num_of_phy_chans == 14) {
3223 base->gen_dmac.backup = d40_backup_regs_v4b;
3224 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3225 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3226 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3227 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3228 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3229 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3230 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3231 base->gen_dmac.il = il_v4b;
3232 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3233 base->gen_dmac.init_reg = dma_init_reg_v4b;
3234 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3235 } else {
3236 if (base->rev >= 3) {
3237 base->gen_dmac.backup = d40_backup_regs_v4a;
3238 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3239 }
3240 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3241 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3242 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3243 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3244 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3245 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3246 base->gen_dmac.il = il_v4a;
3247 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3248 base->gen_dmac.init_reg = dma_init_reg_v4a;
3249 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3250 }
3251
3252 base->phy_res = kcalloc(num_phy_chans,
3253 sizeof(*base->phy_res),
3254 GFP_KERNEL);
3255 if (!base->phy_res)
3256 goto free_base;
3257
3258 base->lookup_phy_chans = kcalloc(num_phy_chans,
3259 sizeof(*base->lookup_phy_chans),
3260 GFP_KERNEL);
3261 if (!base->lookup_phy_chans)
3262 goto free_phy_res;
3263
3264 base->lookup_log_chans = kcalloc(num_log_chans,
3265 sizeof(*base->lookup_log_chans),
3266 GFP_KERNEL);
3267 if (!base->lookup_log_chans)
3268 goto free_phy_chans;
3269
3270 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3271 sizeof(d40_backup_regs_chan),
3272 GFP_KERNEL);
3273 if (!base->reg_val_backup_chan)
3274 goto free_log_chans;
3275
3276 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3277 * D40_LCLA_LINK_PER_EVENT_GRP,
3278 sizeof(*base->lcla_pool.alloc_map),
3279 GFP_KERNEL);
3280 if (!base->lcla_pool.alloc_map)
3281 goto free_backup_chan;
3282
3283 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3284 0, SLAB_HWCACHE_ALIGN,
3285 NULL);
3286 if (base->desc_slab == NULL)
3287 goto free_map;
3288
3289 return base;
3290 free_map:
3291 kfree(base->lcla_pool.alloc_map);
3292 free_backup_chan:
3293 kfree(base->reg_val_backup_chan);
3294 free_log_chans:
3295 kfree(base->lookup_log_chans);
3296 free_phy_chans:
3297 kfree(base->lookup_phy_chans);
3298 free_phy_res:
3299 kfree(base->phy_res);
3300 free_base:
3301 kfree(base);
3302 unmap_io:
3303 iounmap(virtbase);
3304 release_region:
3305 release_mem_region(res->start, resource_size(res));
3306 check_prepare_enabled:
3307 if (!clk_ret)
3308 disable_unprepare:
3309 clk_disable_unprepare(clk);
3310 if (!IS_ERR(clk))
3311 clk_put(clk);
3312 return NULL;
3313}
3314
3315static void __init d40_hw_init(struct d40_base *base)
3316{
3317
3318 int i;
3319 u32 prmseo[2] = {0, 0};
3320 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3321 u32 pcmis = 0;
3322 u32 pcicr = 0;
3323 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3324 u32 reg_size = base->gen_dmac.init_reg_size;
3325
3326 for (i = 0; i < reg_size; i++)
3327 writel(dma_init_reg[i].val,
3328 base->virtbase + dma_init_reg[i].reg);
3329
3330
3331 for (i = 0; i < base->num_phy_chans; i++) {
3332
3333 activeo[i % 2] = activeo[i % 2] << 2;
3334
3335 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3336 == D40_ALLOC_PHY) {
3337 activeo[i % 2] |= 3;
3338 continue;
3339 }
3340
3341
3342 pcmis = (pcmis << 1) | 1;
3343
3344
3345 pcicr = (pcicr << 1) | 1;
3346
3347
3348 prmseo[i % 2] = prmseo[i % 2] << 2;
3349 prmseo[i % 2] |= 1;
3350
3351 }
3352
3353 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3354 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3355 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3356 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3357
3358
3359 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3360
3361
3362 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3363
3364
3365 base->gen_dmac.init_reg = NULL;
3366 base->gen_dmac.init_reg_size = 0;
3367}
3368
3369static int __init d40_lcla_allocate(struct d40_base *base)
3370{
3371 struct d40_lcla_pool *pool = &base->lcla_pool;
3372 unsigned long *page_list;
3373 int i, j;
3374 int ret;
3375
3376
3377
3378
3379
3380
3381 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3382 sizeof(*page_list),
3383 GFP_KERNEL);
3384 if (!page_list)
3385 return -ENOMEM;
3386
3387
3388 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3389
3390 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3391 page_list[i] = __get_free_pages(GFP_KERNEL,
3392 base->lcla_pool.pages);
3393 if (!page_list[i]) {
3394
3395 d40_err(base->dev, "Failed to allocate %d pages.\n",
3396 base->lcla_pool.pages);
3397 ret = -ENOMEM;
3398
3399 for (j = 0; j < i; j++)
3400 free_pages(page_list[j], base->lcla_pool.pages);
3401 goto free_page_list;
3402 }
3403
3404 if ((virt_to_phys((void *)page_list[i]) &
3405 (LCLA_ALIGNMENT - 1)) == 0)
3406 break;
3407 }
3408
3409 for (j = 0; j < i; j++)
3410 free_pages(page_list[j], base->lcla_pool.pages);
3411
3412 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3413 base->lcla_pool.base = (void *)page_list[i];
3414 } else {
3415
3416
3417
3418
3419 dev_warn(base->dev,
3420 "[%s] Failed to get %d pages @ 18 bit align.\n",
3421 __func__, base->lcla_pool.pages);
3422 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3423 base->num_phy_chans +
3424 LCLA_ALIGNMENT,
3425 GFP_KERNEL);
3426 if (!base->lcla_pool.base_unaligned) {
3427 ret = -ENOMEM;
3428 goto free_page_list;
3429 }
3430
3431 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3432 LCLA_ALIGNMENT);
3433 }
3434
3435 pool->dma_addr = dma_map_single(base->dev, pool->base,
3436 SZ_1K * base->num_phy_chans,
3437 DMA_TO_DEVICE);
3438 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3439 pool->dma_addr = 0;
3440 ret = -ENOMEM;
3441 goto free_page_list;
3442 }
3443
3444 writel(virt_to_phys(base->lcla_pool.base),
3445 base->virtbase + D40_DREG_LCLA);
3446 ret = 0;
3447 free_page_list:
3448 kfree(page_list);
3449 return ret;
3450}
3451
3452static int __init d40_of_probe(struct platform_device *pdev,
3453 struct device_node *np)
3454{
3455 struct stedma40_platform_data *pdata;
3456 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3457 const __be32 *list;
3458
3459 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3460 if (!pdata)
3461 return -ENOMEM;
3462
3463
3464 of_property_read_u32(np, "dma-channels", &num_phy);
3465 if (num_phy > 0)
3466 pdata->num_of_phy_chans = num_phy;
3467
3468 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3469 num_memcpy /= sizeof(*list);
3470
3471 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3472 d40_err(&pdev->dev,
3473 "Invalid number of memcpy channels specified (%d)\n",
3474 num_memcpy);
3475 return -EINVAL;
3476 }
3477 pdata->num_of_memcpy_chans = num_memcpy;
3478
3479 of_property_read_u32_array(np, "memcpy-channels",
3480 dma40_memcpy_channels,
3481 num_memcpy);
3482
3483 list = of_get_property(np, "disabled-channels", &num_disabled);
3484 num_disabled /= sizeof(*list);
3485
3486 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3487 d40_err(&pdev->dev,
3488 "Invalid number of disabled channels specified (%d)\n",
3489 num_disabled);
3490 return -EINVAL;
3491 }
3492
3493 of_property_read_u32_array(np, "disabled-channels",
3494 pdata->disabled_channels,
3495 num_disabled);
3496 pdata->disabled_channels[num_disabled] = -1;
3497
3498 pdev->dev.platform_data = pdata;
3499
3500 return 0;
3501}
3502
3503static int __init d40_probe(struct platform_device *pdev)
3504{
3505 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3506 struct device_node *np = pdev->dev.of_node;
3507 int ret = -ENOENT;
3508 struct d40_base *base;
3509 struct resource *res;
3510 int num_reserved_chans;
3511 u32 val;
3512
3513 if (!plat_data) {
3514 if (np) {
3515 if (d40_of_probe(pdev, np)) {
3516 ret = -ENOMEM;
3517 goto report_failure;
3518 }
3519 } else {
3520 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3521 goto report_failure;
3522 }
3523 }
3524
3525 base = d40_hw_detect_init(pdev);
3526 if (!base)
3527 goto report_failure;
3528
3529 num_reserved_chans = d40_phy_res_init(base);
3530
3531 platform_set_drvdata(pdev, base);
3532
3533 spin_lock_init(&base->interrupt_lock);
3534 spin_lock_init(&base->execmd_lock);
3535
3536
3537 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3538 if (!res) {
3539 ret = -ENOENT;
3540 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3541 goto destroy_cache;
3542 }
3543 base->lcpa_size = resource_size(res);
3544 base->phy_lcpa = res->start;
3545
3546 if (request_mem_region(res->start, resource_size(res),
3547 D40_NAME " I/O lcpa") == NULL) {
3548 ret = -EBUSY;
3549 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3550 goto destroy_cache;
3551 }
3552
3553
3554 val = readl(base->virtbase + D40_DREG_LCPA);
3555 if (res->start != val && val != 0) {
3556 dev_warn(&pdev->dev,
3557 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3558 __func__, val, &res->start);
3559 } else
3560 writel(res->start, base->virtbase + D40_DREG_LCPA);
3561
3562 base->lcpa_base = ioremap(res->start, resource_size(res));
3563 if (!base->lcpa_base) {
3564 ret = -ENOMEM;
3565 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3566 goto destroy_cache;
3567 }
3568
3569 if (base->plat_data->use_esram_lcla) {
3570 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3571 "lcla_esram");
3572 if (!res) {
3573 ret = -ENOENT;
3574 d40_err(&pdev->dev,
3575 "No \"lcla_esram\" memory resource\n");
3576 goto destroy_cache;
3577 }
3578 base->lcla_pool.base = ioremap(res->start,
3579 resource_size(res));
3580 if (!base->lcla_pool.base) {
3581 ret = -ENOMEM;
3582 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3583 goto destroy_cache;
3584 }
3585 writel(res->start, base->virtbase + D40_DREG_LCLA);
3586
3587 } else {
3588 ret = d40_lcla_allocate(base);
3589 if (ret) {
3590 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3591 goto destroy_cache;
3592 }
3593 }
3594
3595 spin_lock_init(&base->lcla_pool.lock);
3596
3597 base->irq = platform_get_irq(pdev, 0);
3598
3599 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3600 if (ret) {
3601 d40_err(&pdev->dev, "No IRQ defined\n");
3602 goto destroy_cache;
3603 }
3604
3605 if (base->plat_data->use_esram_lcla) {
3606
3607 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3608 if (IS_ERR(base->lcpa_regulator)) {
3609 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3610 ret = PTR_ERR(base->lcpa_regulator);
3611 base->lcpa_regulator = NULL;
3612 goto destroy_cache;
3613 }
3614
3615 ret = regulator_enable(base->lcpa_regulator);
3616 if (ret) {
3617 d40_err(&pdev->dev,
3618 "Failed to enable lcpa_regulator\n");
3619 regulator_put(base->lcpa_regulator);
3620 base->lcpa_regulator = NULL;
3621 goto destroy_cache;
3622 }
3623 }
3624
3625 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3626
3627 pm_runtime_irq_safe(base->dev);
3628 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3629 pm_runtime_use_autosuspend(base->dev);
3630 pm_runtime_mark_last_busy(base->dev);
3631 pm_runtime_set_active(base->dev);
3632 pm_runtime_enable(base->dev);
3633
3634 ret = d40_dmaengine_init(base, num_reserved_chans);
3635 if (ret)
3636 goto destroy_cache;
3637
3638 base->dev->dma_parms = &base->dma_parms;
3639 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3640 if (ret) {
3641 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3642 goto destroy_cache;
3643 }
3644
3645 d40_hw_init(base);
3646
3647 if (np) {
3648 ret = of_dma_controller_register(np, d40_xlate, NULL);
3649 if (ret)
3650 dev_err(&pdev->dev,
3651 "could not register of_dma_controller\n");
3652 }
3653
3654 dev_info(base->dev, "initialized\n");
3655 return 0;
3656 destroy_cache:
3657 kmem_cache_destroy(base->desc_slab);
3658 if (base->virtbase)
3659 iounmap(base->virtbase);
3660
3661 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3662 iounmap(base->lcla_pool.base);
3663 base->lcla_pool.base = NULL;
3664 }
3665
3666 if (base->lcla_pool.dma_addr)
3667 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3668 SZ_1K * base->num_phy_chans,
3669 DMA_TO_DEVICE);
3670
3671 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3672 free_pages((unsigned long)base->lcla_pool.base,
3673 base->lcla_pool.pages);
3674
3675 kfree(base->lcla_pool.base_unaligned);
3676
3677 if (base->phy_lcpa)
3678 release_mem_region(base->phy_lcpa,
3679 base->lcpa_size);
3680 if (base->phy_start)
3681 release_mem_region(base->phy_start,
3682 base->phy_size);
3683 if (base->clk) {
3684 clk_disable_unprepare(base->clk);
3685 clk_put(base->clk);
3686 }
3687
3688 if (base->lcpa_regulator) {
3689 regulator_disable(base->lcpa_regulator);
3690 regulator_put(base->lcpa_regulator);
3691 }
3692
3693 kfree(base->lcla_pool.alloc_map);
3694 kfree(base->lookup_log_chans);
3695 kfree(base->lookup_phy_chans);
3696 kfree(base->phy_res);
3697 kfree(base);
3698 report_failure:
3699 d40_err(&pdev->dev, "probe failed\n");
3700 return ret;
3701}
3702
3703static const struct of_device_id d40_match[] = {
3704 { .compatible = "stericsson,dma40", },
3705 {}
3706};
3707
3708static struct platform_driver d40_driver = {
3709 .driver = {
3710 .name = D40_NAME,
3711 .pm = &dma40_pm_ops,
3712 .of_match_table = d40_match,
3713 },
3714};
3715
3716static int __init stedma40_init(void)
3717{
3718 return platform_driver_probe(&d40_driver, d40_probe);
3719}
3720subsys_initcall(stedma40_init);
3721