1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/dmaengine.h>
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/mm.h>
20#include <linux/highmem.h>
21#include <linux/dma-mapping.h>
22#include <linux/delay.h>
23#include <linux/atomic.h>
24#include <linux/iopoll.h>
25#include <linux/kfifo.h>
26#include <linux/bitops.h>
27
28#include "hidma.h"
29
30#define HIDMA_EVRE_SIZE 16
31
32#define HIDMA_TRCA_CTRLSTS_REG 0x000
33#define HIDMA_TRCA_RING_LOW_REG 0x008
34#define HIDMA_TRCA_RING_HIGH_REG 0x00C
35#define HIDMA_TRCA_RING_LEN_REG 0x010
36#define HIDMA_TRCA_DOORBELL_REG 0x400
37
38#define HIDMA_EVCA_CTRLSTS_REG 0x000
39#define HIDMA_EVCA_INTCTRL_REG 0x004
40#define HIDMA_EVCA_RING_LOW_REG 0x008
41#define HIDMA_EVCA_RING_HIGH_REG 0x00C
42#define HIDMA_EVCA_RING_LEN_REG 0x010
43#define HIDMA_EVCA_WRITE_PTR_REG 0x020
44#define HIDMA_EVCA_DOORBELL_REG 0x400
45
46#define HIDMA_EVCA_IRQ_STAT_REG 0x100
47#define HIDMA_EVCA_IRQ_CLR_REG 0x108
48#define HIDMA_EVCA_IRQ_EN_REG 0x110
49
50#define HIDMA_EVRE_CFG_IDX 0
51
52#define HIDMA_EVRE_ERRINFO_BIT_POS 24
53#define HIDMA_EVRE_CODE_BIT_POS 28
54
55#define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
56#define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
57
58#define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
59#define HIDMA_CH_STATE_MASK GENMASK(7, 0)
60#define HIDMA_CH_STATE_BIT_POS 0x8
61
62#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
63#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
64#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
65#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
66#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
67#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
68
69#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
70 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
71 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
72 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
73 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
74 BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
75
76#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
77do { \
78 iter += size; \
79 if (iter >= ring_size) \
80 iter -= ring_size; \
81} while (0)
82
83#define HIDMA_CH_STATE(val) \
84 ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
85
86#define HIDMA_ERR_INT_MASK \
87 (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
88 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
89 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
90 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
91 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
92
93enum ch_command {
94 HIDMA_CH_DISABLE = 0,
95 HIDMA_CH_ENABLE = 1,
96 HIDMA_CH_SUSPEND = 2,
97 HIDMA_CH_RESET = 9,
98};
99
100enum ch_state {
101 HIDMA_CH_DISABLED = 0,
102 HIDMA_CH_ENABLED = 1,
103 HIDMA_CH_RUNNING = 2,
104 HIDMA_CH_SUSPENDED = 3,
105 HIDMA_CH_STOPPED = 4,
106};
107
108enum err_code {
109 HIDMA_EVRE_STATUS_COMPLETE = 1,
110 HIDMA_EVRE_STATUS_ERROR = 4,
111};
112
113static int hidma_is_chan_enabled(int state)
114{
115 switch (state) {
116 case HIDMA_CH_ENABLED:
117 case HIDMA_CH_RUNNING:
118 return true;
119 default:
120 return false;
121 }
122}
123
124void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
125{
126 struct hidma_tre *tre;
127
128 if (tre_ch >= lldev->nr_tres) {
129 dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
130 return;
131 }
132
133 tre = &lldev->trepool[tre_ch];
134 if (atomic_read(&tre->allocated) != true) {
135 dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
136 return;
137 }
138
139 atomic_set(&tre->allocated, 0);
140}
141
142int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
143 void (*callback)(void *data), void *data, u32 *tre_ch)
144{
145 unsigned int i;
146 struct hidma_tre *tre;
147 u32 *tre_local;
148
149 if (!tre_ch || !lldev)
150 return -EINVAL;
151
152
153 for (i = 0; i < lldev->nr_tres - 1; i++) {
154 if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
155 break;
156 }
157
158 if (i == (lldev->nr_tres - 1))
159 return -ENOMEM;
160
161 tre = &lldev->trepool[i];
162 tre->dma_sig = sig;
163 tre->dev_name = dev_name;
164 tre->callback = callback;
165 tre->data = data;
166 tre->idx = i;
167 tre->status = 0;
168 tre->queued = 0;
169 tre->err_code = 0;
170 tre->err_info = 0;
171 tre->lldev = lldev;
172 tre_local = &tre->tre_local[0];
173 tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
174 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16);
175 *tre_ch = i;
176 if (callback)
177 callback(data);
178 return 0;
179}
180
181
182
183
184static void hidma_ll_tre_complete(unsigned long arg)
185{
186 struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
187 struct hidma_tre *tre;
188
189 while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
190
191 if (tre->callback)
192 tre->callback(tre->data);
193 }
194}
195
196static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
197 u8 err_code)
198{
199 struct hidma_tre *tre;
200 unsigned long flags;
201 u32 tre_iterator;
202
203 spin_lock_irqsave(&lldev->lock, flags);
204
205 tre_iterator = lldev->tre_processed_off;
206 tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
207 if (!tre) {
208 spin_unlock_irqrestore(&lldev->lock, flags);
209 dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
210 tre_iterator / HIDMA_TRE_SIZE);
211 return -EINVAL;
212 }
213 lldev->pending_tre_list[tre->tre_index] = NULL;
214
215
216
217
218
219 if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
220 dev_warn(lldev->dev, "tre count mismatch on completion");
221 atomic_set(&lldev->pending_tre_count, 0);
222 }
223
224 HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
225 lldev->tre_ring_size);
226 lldev->tre_processed_off = tre_iterator;
227 spin_unlock_irqrestore(&lldev->lock, flags);
228
229 tre->err_info = err_info;
230 tre->err_code = err_code;
231 tre->queued = 0;
232
233 kfifo_put(&lldev->handoff_fifo, tre);
234 tasklet_schedule(&lldev->task);
235
236 return 0;
237}
238
239
240
241
242
243
244
245static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
246{
247 u32 evre_ring_size = lldev->evre_ring_size;
248 u32 err_info, err_code, evre_write_off;
249 u32 evre_iterator;
250 u32 num_completed = 0;
251
252 evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
253 evre_iterator = lldev->evre_processed_off;
254
255 if ((evre_write_off > evre_ring_size) ||
256 (evre_write_off % HIDMA_EVRE_SIZE)) {
257 dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
258 return 0;
259 }
260
261
262
263
264
265 while ((evre_iterator != evre_write_off)) {
266 u32 *current_evre = lldev->evre_ring + evre_iterator;
267 u32 cfg;
268
269 cfg = current_evre[HIDMA_EVRE_CFG_IDX];
270 err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
271 err_info &= HIDMA_EVRE_ERRINFO_MASK;
272 err_code =
273 (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
274
275 if (hidma_post_completed(lldev, err_info, err_code))
276 break;
277
278 HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
279 evre_ring_size);
280
281
282
283
284
285
286 evre_write_off =
287 readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
288 num_completed++;
289
290
291
292
293
294 if (!hidma_ll_isenabled(lldev))
295 break;
296 }
297
298 if (num_completed) {
299 u32 evre_read_off = (lldev->evre_processed_off +
300 HIDMA_EVRE_SIZE * num_completed);
301 evre_read_off = evre_read_off % evre_ring_size;
302 writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
303
304
305 lldev->evre_processed_off = evre_read_off;
306 }
307
308 return num_completed;
309}
310
311void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
312 u8 err_code)
313{
314 while (atomic_read(&lldev->pending_tre_count)) {
315 if (hidma_post_completed(lldev, err_info, err_code))
316 break;
317 }
318}
319
320static int hidma_ll_reset(struct hidma_lldev *lldev)
321{
322 u32 val;
323 int ret;
324
325 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
326 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
327 val |= HIDMA_CH_RESET << 16;
328 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
329
330
331
332
333
334 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
335 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
336 1000, 10000);
337 if (ret) {
338 dev_err(lldev->dev, "transfer channel did not reset\n");
339 return ret;
340 }
341
342 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
343 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
344 val |= HIDMA_CH_RESET << 16;
345 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
346
347
348
349
350
351 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
352 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
353 1000, 10000);
354 if (ret)
355 return ret;
356
357 lldev->trch_state = HIDMA_CH_DISABLED;
358 lldev->evch_state = HIDMA_CH_DISABLED;
359 return 0;
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
395{
396 unsigned long irqflags;
397
398 if (cause & HIDMA_ERR_INT_MASK) {
399 dev_err(lldev->dev, "error 0x%x, disabling...\n",
400 cause);
401
402
403 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
404
405
406 hidma_ll_disable(lldev);
407
408
409 hidma_cleanup_pending_tre(lldev, 0xFF,
410 HIDMA_EVRE_STATUS_ERROR);
411
412 return;
413 }
414
415 spin_lock_irqsave(&lldev->lock, irqflags);
416 writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
417 spin_unlock_irqrestore(&lldev->lock, irqflags);
418
419
420
421
422
423
424
425
426
427
428
429 hidma_handle_tre_completion(lldev);
430}
431
432irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
433{
434 struct hidma_lldev *lldev = arg;
435 u32 status;
436 u32 enable;
437 u32 cause;
438
439 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
440 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
441 cause = status & enable;
442
443 while (cause) {
444 hidma_ll_int_handler_internal(lldev, cause);
445
446
447
448
449
450 status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
451 enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
452 cause = status & enable;
453 }
454
455 return IRQ_HANDLED;
456}
457
458irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
459{
460 struct hidma_lldev *lldev = arg;
461
462 hidma_ll_int_handler_internal(lldev, cause);
463 return IRQ_HANDLED;
464}
465
466int hidma_ll_enable(struct hidma_lldev *lldev)
467{
468 u32 val;
469 int ret;
470
471 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
472 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
473 val |= HIDMA_CH_ENABLE << 16;
474 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
475
476 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
477 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
478 1000, 10000);
479 if (ret) {
480 dev_err(lldev->dev, "event channel did not get enabled\n");
481 return ret;
482 }
483
484 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
485 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
486 val |= HIDMA_CH_ENABLE << 16;
487 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
488
489 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
490 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
491 1000, 10000);
492 if (ret) {
493 dev_err(lldev->dev, "transfer channel did not get enabled\n");
494 return ret;
495 }
496
497 lldev->trch_state = HIDMA_CH_ENABLED;
498 lldev->evch_state = HIDMA_CH_ENABLED;
499
500
501 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
502
503 return 0;
504}
505
506void hidma_ll_start(struct hidma_lldev *lldev)
507{
508 unsigned long irqflags;
509
510 spin_lock_irqsave(&lldev->lock, irqflags);
511 writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
512 spin_unlock_irqrestore(&lldev->lock, irqflags);
513}
514
515bool hidma_ll_isenabled(struct hidma_lldev *lldev)
516{
517 u32 val;
518
519 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
520 lldev->trch_state = HIDMA_CH_STATE(val);
521 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
522 lldev->evch_state = HIDMA_CH_STATE(val);
523
524
525 if (hidma_is_chan_enabled(lldev->trch_state) &&
526 hidma_is_chan_enabled(lldev->evch_state))
527 return true;
528
529 return false;
530}
531
532void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
533{
534 struct hidma_tre *tre;
535 unsigned long flags;
536
537 tre = &lldev->trepool[tre_ch];
538
539
540 spin_lock_irqsave(&lldev->lock, flags);
541 tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
542 lldev->pending_tre_list[tre->tre_index] = tre;
543 memcpy(lldev->tre_ring + lldev->tre_write_offset,
544 &tre->tre_local[0], HIDMA_TRE_SIZE);
545 tre->err_code = 0;
546 tre->err_info = 0;
547 tre->queued = 1;
548 atomic_inc(&lldev->pending_tre_count);
549 lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
550 % lldev->tre_ring_size;
551 spin_unlock_irqrestore(&lldev->lock, flags);
552}
553
554
555
556
557
558
559int hidma_ll_disable(struct hidma_lldev *lldev)
560{
561 u32 val;
562 int ret;
563
564
565 if (!hidma_ll_isenabled(lldev))
566 return 0;
567
568 val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
569 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
570 val |= HIDMA_CH_SUSPEND << 16;
571 writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
572
573
574
575
576
577 ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
578 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
579 1000, 10000);
580 if (ret)
581 return ret;
582
583 val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
584 val &= ~(HIDMA_CH_CONTROL_MASK << 16);
585 val |= HIDMA_CH_SUSPEND << 16;
586 writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
587
588
589
590
591
592 ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
593 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
594 1000, 10000);
595 if (ret)
596 return ret;
597
598 lldev->trch_state = HIDMA_CH_SUSPENDED;
599 lldev->evch_state = HIDMA_CH_SUSPENDED;
600
601
602 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
603 return 0;
604}
605
606void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
607 dma_addr_t src, dma_addr_t dest, u32 len,
608 u32 flags, u32 txntype)
609{
610 struct hidma_tre *tre;
611 u32 *tre_local;
612
613 if (tre_ch >= lldev->nr_tres) {
614 dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
615 tre_ch);
616 return;
617 }
618
619 tre = &lldev->trepool[tre_ch];
620 if (atomic_read(&tre->allocated) != true) {
621 dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
622 tre_ch);
623 return;
624 }
625
626 tre_local = &tre->tre_local[0];
627 tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
628 tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
629 tre_local[HIDMA_TRE_LEN_IDX] = len;
630 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
631 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
632 tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
633 tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
634 tre->int_flags = flags;
635}
636
637
638
639
640
641int hidma_ll_setup(struct hidma_lldev *lldev)
642{
643 int rc;
644 u64 addr;
645 u32 val;
646 u32 nr_tres = lldev->nr_tres;
647
648 atomic_set(&lldev->pending_tre_count, 0);
649 lldev->tre_processed_off = 0;
650 lldev->evre_processed_off = 0;
651 lldev->tre_write_offset = 0;
652
653
654 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
655
656
657 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
658 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
659
660 rc = hidma_ll_reset(lldev);
661 if (rc)
662 return rc;
663
664
665
666
667
668 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
669 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
670
671
672 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
673
674 addr = lldev->tre_dma;
675 writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
676 writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
677 writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
678
679 addr = lldev->evre_dma;
680 writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
681 writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
682 writel(HIDMA_EVRE_SIZE * nr_tres,
683 lldev->evca + HIDMA_EVCA_RING_LEN_REG);
684
685
686 hidma_ll_setup_irq(lldev, lldev->msi_support);
687
688 rc = hidma_ll_enable(lldev);
689 if (rc)
690 return rc;
691
692 return rc;
693}
694
695void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
696{
697 u32 val;
698
699 lldev->msi_support = msi;
700
701
702 writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
703 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
704
705
706 val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
707 val &= ~0xF;
708 if (!lldev->msi_support)
709 val = val | 0x1;
710 writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
711
712
713 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
714 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
715}
716
717struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
718 void __iomem *trca, void __iomem *evca,
719 u8 chidx)
720{
721 u32 required_bytes;
722 struct hidma_lldev *lldev;
723 int rc;
724 size_t sz;
725
726 if (!trca || !evca || !dev || !nr_tres)
727 return NULL;
728
729
730 if (nr_tres < 4)
731 return NULL;
732
733
734 nr_tres += 1;
735
736 lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
737 if (!lldev)
738 return NULL;
739
740 lldev->evca = evca;
741 lldev->trca = trca;
742 lldev->dev = dev;
743 sz = sizeof(struct hidma_tre);
744 lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
745 if (!lldev->trepool)
746 return NULL;
747
748 required_bytes = sizeof(lldev->pending_tre_list[0]);
749 lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
750 GFP_KERNEL);
751 if (!lldev->pending_tre_list)
752 return NULL;
753
754 sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
755 lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
756 GFP_KERNEL);
757 if (!lldev->tre_ring)
758 return NULL;
759
760 memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
761 lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
762 lldev->nr_tres = nr_tres;
763
764
765 if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
766 u8 tre_ring_shift;
767
768 tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
769 tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
770 lldev->tre_dma += tre_ring_shift;
771 lldev->tre_ring += tre_ring_shift;
772 }
773
774 sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
775 lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
776 GFP_KERNEL);
777 if (!lldev->evre_ring)
778 return NULL;
779
780 memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
781 lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
782
783
784 if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
785 u8 evre_ring_shift;
786
787 evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
788 evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
789 lldev->evre_dma += evre_ring_shift;
790 lldev->evre_ring += evre_ring_shift;
791 }
792 lldev->nr_tres = nr_tres;
793 lldev->chidx = chidx;
794
795 sz = nr_tres * sizeof(struct hidma_tre *);
796 rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
797 if (rc)
798 return NULL;
799
800 rc = hidma_ll_setup(lldev);
801 if (rc)
802 return NULL;
803
804 spin_lock_init(&lldev->lock);
805 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
806 lldev->initialized = 1;
807 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
808 return lldev;
809}
810
811int hidma_ll_uninit(struct hidma_lldev *lldev)
812{
813 u32 required_bytes;
814 int rc = 0;
815 u32 val;
816
817 if (!lldev)
818 return -ENODEV;
819
820 if (!lldev->initialized)
821 return 0;
822
823 lldev->initialized = 0;
824
825 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
826 tasklet_kill(&lldev->task);
827 memset(lldev->trepool, 0, required_bytes);
828 lldev->trepool = NULL;
829 atomic_set(&lldev->pending_tre_count, 0);
830 lldev->tre_write_offset = 0;
831
832 rc = hidma_ll_reset(lldev);
833
834
835
836
837
838 val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
839 writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
840 writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
841 return rc;
842}
843
844enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
845{
846 enum dma_status ret = DMA_ERROR;
847 struct hidma_tre *tre;
848 unsigned long flags;
849 u8 err_code;
850
851 spin_lock_irqsave(&lldev->lock, flags);
852
853 tre = &lldev->trepool[tre_ch];
854 err_code = tre->err_code;
855
856 if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
857 ret = DMA_COMPLETE;
858 else if (err_code & HIDMA_EVRE_STATUS_ERROR)
859 ret = DMA_ERROR;
860 else
861 ret = DMA_IN_PROGRESS;
862 spin_unlock_irqrestore(&lldev->lock, flags);
863
864 return ret;
865}
866