1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/dmaengine.h>
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/mm.h>
20#include <linux/highmem.h>
21#include <linux/dma-mapping.h>
22#include <linux/delay.h>
23#include <linux/atomic.h>
24#include <linux/iopoll.h>
25#include <linux/kfifo.h>
26#include <linux/bitops.h>
27
28#include "hidma.h"
29
30#define HIDMA_EVRE_SIZE 16
31
32#define HIDMA_TRCA_CTRLSTS_REG 0x000
33#define HIDMA_TRCA_RING_LOW_REG 0x008
34#define HIDMA_TRCA_RING_HIGH_REG 0x00C
35#define HIDMA_TRCA_RING_LEN_REG 0x010
36#define HIDMA_TRCA_DOORBELL_REG 0x400
37
38#define HIDMA_EVCA_CTRLSTS_REG 0x000
39#define HIDMA_EVCA_INTCTRL_REG 0x004
40#define HIDMA_EVCA_RING_LOW_REG 0x008
41#define HIDMA_EVCA_RING_HIGH_REG 0x00C
42#define HIDMA_EVCA_RING_LEN_REG 0x010
43#define HIDMA_EVCA_WRITE_PTR_REG 0x020
44#define HIDMA_EVCA_DOORBELL_REG 0x400
45
46#define HIDMA_EVCA_IRQ_STAT_REG 0x100
47#define HIDMA_EVCA_IRQ_CLR_REG 0x108
48#define HIDMA_EVCA_IRQ_EN_REG 0x110
49
50#define HIDMA_EVRE_CFG_IDX 0
51
52#define HIDMA_EVRE_ERRINFO_BIT_POS 24
53#define HIDMA_EVRE_CODE_BIT_POS 28
54
55#define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
56#define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
57
58#define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
59#define HIDMA_CH_STATE_MASK GENMASK(7, 0)
60#define HIDMA_CH_STATE_BIT_POS 0x8
61
62#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
63#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
64#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
65#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
66#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
67#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
68
69#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
70 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
71 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
72 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
73 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
74 BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
75
76#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
77do { \
78 iter += size; \
79 if (iter >= ring_size) \
80 iter -= ring_size; \
81} while (0)
82
83#define HIDMA_CH_STATE(val) \
84 ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
85
86#define HIDMA_ERR_INT_MASK \
87 (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
88 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
89 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
90 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
91 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
92
93enum ch_command {
94 HIDMA_CH_DISABLE = 0,
95 HIDMA_CH_ENABLE = 1,
96 HIDMA_CH_SUSPEND = 2,
97 HIDMA_CH_RESET = 9,
98};
99
100enum ch_state {
101 HIDMA_CH_DISABLED = 0,
102 HIDMA_CH_ENABLED = 1,
103 HIDMA_CH_RUNNING = 2,
104 HIDMA_CH_SUSPENDED = 3,
105 HIDMA_CH_STOPPED = 4,
106};
107
108enum err_code {
109 HIDMA_EVRE_STATUS_COMPLETE = 1,
110 HIDMA_EVRE_STATUS_ERROR = 4,
111};
112
113static int hidma_is_chan_enabled(int state)
114{
115 switch (state) {
116 case HIDMA_CH_ENABLED:
117 case HIDMA_CH_RUNNING:
118 return true;
119 default:
120 return false;
121 }
122}
123
124void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
125{
126 struct hidma_tre *tre;
127
128 if (tre_ch >= lldev->nr_tres) {
129 dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
130 return;
131 }
132
133 tre = &lldev->trepool[tre_ch];
134 if (atomic_read(&tre->allocated) != true) {
135 dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
136 return;
137 }
138
139 atomic_set(&tre->allocated, 0);
140}
141
142int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
143 void (*callback)(void *data), void *data, u32 *tre_ch)
144{
145 unsigned int i;
146 struct hidma_tre *tre;
147 u32 *tre_local;
148
149 if (!tre_ch || !lldev)
150 return -EINVAL;
151
152
153 for (i = 0; i < lldev->nr_tres - 1; i++) {
154 if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
155 break;
156 }
157
158 if (i == (lldev->nr_tres - 1))
159 return -ENOMEM;
160
161 tre = &lldev->trepool[i];
162 tre->dma_sig = sig;
163 tre->dev_name = dev_name;
164 tre->callback = callback;
165 tre->data = data;
166 tre->idx = i;
167 tre->status = 0;
168 tre->queued = 0;
169 tre->err_code = 0;
170 tre->err_info = 0;
171 tre->lldev = lldev;
172 tre_local = &tre->tre_local[0];
173 tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
174 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16);
175 *tre_ch = i;
176 if (callback)
177 callback(data);
178 return 0;
179}
180
181
182
183
184static void hidma_ll_tre_complete(unsigned long arg)
185{
186 struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
187 struct hidma_tre *tre;
188
189 while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
190
191 if (tre->callback)
192 tre->callback(tre->data);
193 }
194}
195
196static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
197 u8 err_code)
198{
199 struct hidma_tre *tre;
200 unsigned long flags;
201 u32 tre_iterator;
202
203 spin_lock_irqsave(&lldev->lock, flags);
204
205 tre_iterator = lldev->tre_processed_off;
206 tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
207 if (!tre) {
208 spin_unlock_irqrestore(&lldev->lock, flags);
209 dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
210 tre_iterator / HIDMA_TRE_SIZE);
211 return -EINVAL;
212 }
213 lldev->pending_tre_list[tre->tre_index] = NULL;
214
215
216
217
218
219 if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
220 dev_warn(lldev->dev, "tre count mismatch on completion");
221 atomic_set(&lldev->pending_tre_count, 0);
222 }
223
224 HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
225 lldev->tre_ring_size);
226 lldev->tre_processed_off = tre_iterator;
227 spin_unlock_irqrestore(&lldev->lock, flags);
228
229 tre->err_info = err_info;
230 tre->err_code = err_code;
231 tre->queued = 0;
232
233 kfifo_put(&lldev->handoff_fifo, tre);
234 tasklet_schedule(&lldev->task);
235
236 return 0;
237}
238
239
240
241
242
243
244
245static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
246{
247 u32 evre_ring_size = lldev->evre_ring_size;
248 u32 err_info, err_code, evre_write_off;
249 u32 evre_iterator;
250 u32 num_completed = 0;
251
252 evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
253 evre_iterator = lldev->evre_processed_off;
254
255 if ((evre_write_off > evre_ring_size) ||
256 (evre_write_off % HIDMA_EVRE_SIZE)) {
257 dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
258 return 0;
259 }
260
261
262
263
264
265 while ((evre_iterator != evre_write_off)) {
266 u32 *current_evre = lldev->evre_ring + evre_iterator;
267 u32 cfg;
268
269 cfg = current_evre[HIDMA_EVRE_CFG_IDX];
270 err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
271 err_info &= HIDMA_EVRE_ERRINFO_MASK;
272 err_code =
273 (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
274
275 if (hidma_post_completed(lldev, err_info, err_code))
276 break;
277
278 HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
279 evre_ring_size);
280
281
282
283
284
285
286 evre_write_off =
287 readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
288 num_completed++;
289
290
291
292
293
294 if (!hidma_ll_isenabled(lldev))
295 break;
296 }
297
298 if (num_completed) {
299 u32 evre_read_off = (lldev->evre_processed_off +
300 HIDMA_EVRE_SIZE * num_completed);
301 evre_read_off = evre_read_off % evre_ring_size;
302 writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
303
304
305 lldev->evre_processed_off = evre_read_off;
306 }
307
308 return num_completed;
309}
310
311void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
312 u8 err_code)
313{
314 while (atomic_read(&lldev->pending_tre_count)) {
315 if (hidma_post_completed(lldev, err_info, err_code))
316 break;
317 }
318}
319
320static int hidma_ll_reset(struct hidma_lldev *lldev)
321{
322 u32 val;
323 int ret;
324
325 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
326 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
327 val |= HIDMA_CH_RESET << 16;
328 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
329
330
331
332
333
334 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
335 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
336 1000, 10000);
337 if (ret) {
338 dev_err(lldev->dev, "transfer channel did not reset\n");
339 return ret;
340 }
341
342 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
343 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
344 val |= HIDMA_CH_RESET << 16;
345 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
346
347
348
349
350
351 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
352 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
353 1000, 10000);
354 if (ret)
355 return ret;
356
357 lldev->trch_state = HIDMA_CH_DISABLED;
358 lldev->evch_state = HIDMA_CH_DISABLED;
359 return 0;
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
395{
396 if (cause & HIDMA_ERR_INT_MASK) {
397 dev_err(lldev->dev, "error 0x%x, disabling...\n",
398 cause);
399
400
401 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
402
403
404 hidma_ll_disable(lldev);
405
406
407 hidma_cleanup_pending_tre(lldev, 0xFF,
408 HIDMA_EVRE_STATUS_ERROR);
409
410 return;
411 }
412
413
414
415
416
417
418
419
420
421
422
423 hidma_handle_tre_completion(lldev);
424
425
426 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
427}
428
429irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
430{
431 struct hidma_lldev *lldev = arg;
432 u32 status;
433 u32 enable;
434 u32 cause;
435
436 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
437 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
438 cause = status & enable;
439
440 while (cause) {
441 hidma_ll_int_handler_internal(lldev, cause);
442
443
444
445
446
447 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
448 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
449 cause = status & enable;
450 }
451
452 return IRQ_HANDLED;
453}
454
455irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
456{
457 struct hidma_lldev *lldev = arg;
458
459 hidma_ll_int_handler_internal(lldev, cause);
460 return IRQ_HANDLED;
461}
462
463int hidma_ll_enable(struct hidma_lldev *lldev)
464{
465 u32 val;
466 int ret;
467
468 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
469 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
470 val |= HIDMA_CH_ENABLE << 16;
471 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
472
473 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
474 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
475 1000, 10000);
476 if (ret) {
477 dev_err(lldev->dev, "event channel did not get enabled\n");
478 return ret;
479 }
480
481 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
482 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
483 val |= HIDMA_CH_ENABLE << 16;
484 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
485
486 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
487 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
488 1000, 10000);
489 if (ret) {
490 dev_err(lldev->dev, "transfer channel did not get enabled\n");
491 return ret;
492 }
493
494 lldev->trch_state = HIDMA_CH_ENABLED;
495 lldev->evch_state = HIDMA_CH_ENABLED;
496
497
498 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
499
500 return 0;
501}
502
503void hidma_ll_start(struct hidma_lldev *lldev)
504{
505 unsigned long irqflags;
506
507 spin_lock_irqsave(&lldev->lock, irqflags);
508 writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
509 spin_unlock_irqrestore(&lldev->lock, irqflags);
510}
511
512bool hidma_ll_isenabled(struct hidma_lldev *lldev)
513{
514 u32 val;
515
516 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
517 lldev->trch_state = HIDMA_CH_STATE(val);
518 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
519 lldev->evch_state = HIDMA_CH_STATE(val);
520
521
522 if (hidma_is_chan_enabled(lldev->trch_state) &&
523 hidma_is_chan_enabled(lldev->evch_state))
524 return true;
525
526 return false;
527}
528
529void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
530{
531 struct hidma_tre *tre;
532 unsigned long flags;
533
534 tre = &lldev->trepool[tre_ch];
535
536
537 spin_lock_irqsave(&lldev->lock, flags);
538 tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
539 lldev->pending_tre_list[tre->tre_index] = tre;
540 memcpy(lldev->tre_ring + lldev->tre_write_offset,
541 &tre->tre_local[0], HIDMA_TRE_SIZE);
542 tre->err_code = 0;
543 tre->err_info = 0;
544 tre->queued = 1;
545 atomic_inc(&lldev->pending_tre_count);
546 lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
547 % lldev->tre_ring_size;
548 spin_unlock_irqrestore(&lldev->lock, flags);
549}
550
551
552
553
554
555
556int hidma_ll_disable(struct hidma_lldev *lldev)
557{
558 u32 val;
559 int ret;
560
561
562 if (!hidma_ll_isenabled(lldev))
563 return 0;
564
565 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
566 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
567 val |= HIDMA_CH_SUSPEND << 16;
568 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
569
570
571
572
573
574 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
575 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
576 1000, 10000);
577 if (ret)
578 return ret;
579
580 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
581 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
582 val |= HIDMA_CH_SUSPEND << 16;
583 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
584
585
586
587
588
589 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
590 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
591 1000, 10000);
592 if (ret)
593 return ret;
594
595 lldev->trch_state = HIDMA_CH_SUSPENDED;
596 lldev->evch_state = HIDMA_CH_SUSPENDED;
597
598
599 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
600 return 0;
601}
602
603void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
604 dma_addr_t src, dma_addr_t dest, u32 len,
605 u32 flags, u32 txntype)
606{
607 struct hidma_tre *tre;
608 u32 *tre_local;
609
610 if (tre_ch >= lldev->nr_tres) {
611 dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
612 tre_ch);
613 return;
614 }
615
616 tre = &lldev->trepool[tre_ch];
617 if (atomic_read(&tre->allocated) != true) {
618 dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
619 tre_ch);
620 return;
621 }
622
623 tre_local = &tre->tre_local[0];
624 tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
625 tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
626 tre_local[HIDMA_TRE_LEN_IDX] = len;
627 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
628 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
629 tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
630 tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
631 tre->int_flags = flags;
632}
633
634
635
636
637
638int hidma_ll_setup(struct hidma_lldev *lldev)
639{
640 int rc;
641 u64 addr;
642 u32 val;
643 u32 nr_tres = lldev->nr_tres;
644
645 atomic_set(&lldev->pending_tre_count, 0);
646 lldev->tre_processed_off = 0;
647 lldev->evre_processed_off = 0;
648 lldev->tre_write_offset = 0;
649
650
651 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
652
653
654 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
655 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
656
657 rc = hidma_ll_reset(lldev);
658 if (rc)
659 return rc;
660
661
662
663
664
665 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
666 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
667
668
669 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
670
671 addr = lldev->tre_dma;
672 writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
673 writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
674 writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
675
676 addr = lldev->evre_dma;
677 writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
678 writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
679 writel(HIDMA_EVRE_SIZE * nr_tres,
680 lldev->evca + HIDMA_EVCA_RING_LEN_REG);
681
682
683 hidma_ll_setup_irq(lldev, lldev->msi_support);
684
685 rc = hidma_ll_enable(lldev);
686 if (rc)
687 return rc;
688
689 return rc;
690}
691
692void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
693{
694 u32 val;
695
696 lldev->msi_support = msi;
697
698
699 writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
700 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
701
702
703 val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
704 val &= ~0xF;
705 if (!lldev->msi_support)
706 val = val | 0x1;
707 writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
708
709
710 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
711 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
712}
713
714struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
715 void __iomem *trca, void __iomem *evca,
716 u8 chidx)
717{
718 u32 required_bytes;
719 struct hidma_lldev *lldev;
720 int rc;
721 size_t sz;
722
723 if (!trca || !evca || !dev || !nr_tres)
724 return NULL;
725
726
727 if (nr_tres < 4)
728 return NULL;
729
730
731 nr_tres += 1;
732
733 lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
734 if (!lldev)
735 return NULL;
736
737 lldev->evca = evca;
738 lldev->trca = trca;
739 lldev->dev = dev;
740 sz = sizeof(struct hidma_tre);
741 lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
742 if (!lldev->trepool)
743 return NULL;
744
745 required_bytes = sizeof(lldev->pending_tre_list[0]);
746 lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
747 GFP_KERNEL);
748 if (!lldev->pending_tre_list)
749 return NULL;
750
751 sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
752 lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
753 GFP_KERNEL);
754 if (!lldev->tre_ring)
755 return NULL;
756
757 memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
758 lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
759 lldev->nr_tres = nr_tres;
760
761
762 if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
763 u8 tre_ring_shift;
764
765 tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
766 tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
767 lldev->tre_dma += tre_ring_shift;
768 lldev->tre_ring += tre_ring_shift;
769 }
770
771 sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
772 lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
773 GFP_KERNEL);
774 if (!lldev->evre_ring)
775 return NULL;
776
777 memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
778 lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
779
780
781 if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
782 u8 evre_ring_shift;
783
784 evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
785 evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
786 lldev->evre_dma += evre_ring_shift;
787 lldev->evre_ring += evre_ring_shift;
788 }
789 lldev->nr_tres = nr_tres;
790 lldev->chidx = chidx;
791
792 sz = nr_tres * sizeof(struct hidma_tre *);
793 rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
794 if (rc)
795 return NULL;
796
797 rc = hidma_ll_setup(lldev);
798 if (rc)
799 return NULL;
800
801 spin_lock_init(&lldev->lock);
802 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
803 lldev->initialized = 1;
804 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
805 return lldev;
806}
807
808int hidma_ll_uninit(struct hidma_lldev *lldev)
809{
810 u32 required_bytes;
811 int rc = 0;
812 u32 val;
813
814 if (!lldev)
815 return -ENODEV;
816
817 if (!lldev->initialized)
818 return 0;
819
820 lldev->initialized = 0;
821
822 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
823 tasklet_kill(&lldev->task);
824 memset(lldev->trepool, 0, required_bytes);
825 lldev->trepool = NULL;
826 atomic_set(&lldev->pending_tre_count, 0);
827 lldev->tre_write_offset = 0;
828
829 rc = hidma_ll_reset(lldev);
830
831
832
833
834
835 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
836 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
837 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
838 return rc;
839}
840
841enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
842{
843 enum dma_status ret = DMA_ERROR;
844 struct hidma_tre *tre;
845 unsigned long flags;
846 u8 err_code;
847
848 spin_lock_irqsave(&lldev->lock, flags);
849
850 tre = &lldev->trepool[tre_ch];
851 err_code = tre->err_code;
852
853 if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
854 ret = DMA_COMPLETE;
855 else if (err_code & HIDMA_EVRE_STATUS_ERROR)
856 ret = DMA_ERROR;
857 else
858 ret = DMA_IN_PROGRESS;
859 spin_unlock_irqrestore(&lldev->lock, flags);
860
861 return ret;
862}
863