1
2
3
4
5
6
7#include <linux/types.h>
8#include <linux/fsl/mc.h>
9#include <soc/fsl/dpaa2-io.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/interrupt.h>
14#include <linux/dma-mapping.h>
15#include <linux/slab.h>
16
17#include "dpio.h"
18#include "qbman-portal.h"
19
20struct dpaa2_io {
21 struct dpaa2_io_desc dpio_desc;
22 struct qbman_swp_desc swp_desc;
23 struct qbman_swp *swp;
24 struct list_head node;
25
26 spinlock_t lock_mgmt_cmd;
27
28 spinlock_t lock_notifications;
29 struct list_head notifications;
30 struct device *dev;
31};
32
33struct dpaa2_io_store {
34 unsigned int max;
35 dma_addr_t paddr;
36 struct dpaa2_dq *vaddr;
37 void *alloced_addr;
38 unsigned int idx;
39 struct qbman_swp *swp;
40 struct device *dev;
41};
42
43
44static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
45static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
46static DEFINE_SPINLOCK(dpio_list_lock);
47
48static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
49 int cpu)
50{
51 if (d)
52 return d;
53
54 if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
55 return NULL;
56
57
58
59
60
61 if (cpu < 0)
62 cpu = smp_processor_id();
63
64
65 return dpio_by_cpu[cpu];
66}
67
68static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
69{
70 if (d)
71 return d;
72
73 d = service_select_by_cpu(d, -1);
74 if (d)
75 return d;
76
77 spin_lock(&dpio_list_lock);
78 d = list_entry(dpio_list.next, struct dpaa2_io, node);
79 list_del(&d->node);
80 list_add_tail(&d->node, &dpio_list);
81 spin_unlock(&dpio_list_lock);
82
83 return d;
84}
85
86
87
88
89
90
91
92
93
94struct dpaa2_io *dpaa2_io_service_select(int cpu)
95{
96 if (cpu == DPAA2_IO_ANY_CPU)
97 return service_select(NULL);
98
99 return service_select_by_cpu(NULL, cpu);
100}
101EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
102
103
104
105
106
107
108
109
110
111
112
113struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
114 struct device *dev)
115{
116 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
117
118 if (!obj)
119 return NULL;
120
121
122 if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
123 kfree(obj);
124 return NULL;
125 }
126
127 obj->dpio_desc = *desc;
128 obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
129 obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
130 obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
131 obj->swp = qbman_swp_init(&obj->swp_desc);
132
133 if (!obj->swp) {
134 kfree(obj);
135 return NULL;
136 }
137
138 INIT_LIST_HEAD(&obj->node);
139 spin_lock_init(&obj->lock_mgmt_cmd);
140 spin_lock_init(&obj->lock_notifications);
141 INIT_LIST_HEAD(&obj->notifications);
142
143
144 qbman_swp_interrupt_set_trigger(obj->swp,
145 QBMAN_SWP_INTERRUPT_DQRI);
146 qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
147 if (obj->dpio_desc.receives_notifications)
148 qbman_swp_push_set(obj->swp, 0, 1);
149
150 spin_lock(&dpio_list_lock);
151 list_add_tail(&obj->node, &dpio_list);
152 if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
153 dpio_by_cpu[desc->cpu] = obj;
154 spin_unlock(&dpio_list_lock);
155
156 obj->dev = dev;
157
158 return obj;
159}
160
161
162
163
164
165
166
167
168
169
170void dpaa2_io_down(struct dpaa2_io *d)
171{
172 spin_lock(&dpio_list_lock);
173 dpio_by_cpu[d->dpio_desc.cpu] = NULL;
174 list_del(&d->node);
175 spin_unlock(&dpio_list_lock);
176
177 kfree(d);
178}
179
180#define DPAA_POLL_MAX 32
181
182
183
184
185
186
187
188
189
190irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
191{
192 const struct dpaa2_dq *dq;
193 int max = 0;
194 struct qbman_swp *swp;
195 u32 status;
196
197 swp = obj->swp;
198 status = qbman_swp_interrupt_read_status(swp);
199 if (!status)
200 return IRQ_NONE;
201
202 dq = qbman_swp_dqrr_next(swp);
203 while (dq) {
204 if (qbman_result_is_SCN(dq)) {
205 struct dpaa2_io_notification_ctx *ctx;
206 u64 q64;
207
208 q64 = qbman_result_SCN_ctx(dq);
209 ctx = (void *)(uintptr_t)q64;
210 ctx->cb(ctx);
211 } else {
212 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
213 }
214 qbman_swp_dqrr_consume(swp, dq);
215 ++max;
216 if (max > DPAA_POLL_MAX)
217 goto done;
218 dq = qbman_swp_dqrr_next(swp);
219 }
220done:
221 qbman_swp_interrupt_clear_status(swp, status);
222 qbman_swp_interrupt_set_inhibit(swp, 0);
223 return IRQ_HANDLED;
224}
225
226
227
228
229
230
231
232
233int dpaa2_io_get_cpu(struct dpaa2_io *d)
234{
235 return d->dpio_desc.cpu;
236}
237EXPORT_SYMBOL(dpaa2_io_get_cpu);
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258int dpaa2_io_service_register(struct dpaa2_io *d,
259 struct dpaa2_io_notification_ctx *ctx,
260 struct device *dev)
261{
262 struct device_link *link;
263 unsigned long irqflags;
264
265 d = service_select_by_cpu(d, ctx->desired_cpu);
266 if (!d)
267 return -ENODEV;
268
269 link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
270 if (!link)
271 return -EINVAL;
272
273 ctx->dpio_id = d->dpio_desc.dpio_id;
274 ctx->qman64 = (u64)(uintptr_t)ctx;
275 ctx->dpio_private = d;
276 spin_lock_irqsave(&d->lock_notifications, irqflags);
277 list_add(&ctx->node, &d->notifications);
278 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
279
280
281 if (ctx->is_cdan)
282 return qbman_swp_CDAN_set_context_enable(d->swp,
283 (u16)ctx->id,
284 ctx->qman64);
285 return 0;
286}
287EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
288
289
290
291
292
293
294
295
296
297
298void dpaa2_io_service_deregister(struct dpaa2_io *service,
299 struct dpaa2_io_notification_ctx *ctx,
300 struct device *dev)
301{
302 struct dpaa2_io *d = ctx->dpio_private;
303 unsigned long irqflags;
304
305 if (ctx->is_cdan)
306 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
307
308 spin_lock_irqsave(&d->lock_notifications, irqflags);
309 list_del(&ctx->node);
310 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
311
312}
313EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328int dpaa2_io_service_rearm(struct dpaa2_io *d,
329 struct dpaa2_io_notification_ctx *ctx)
330{
331 unsigned long irqflags;
332 int err;
333
334 d = service_select_by_cpu(d, ctx->desired_cpu);
335 if (!unlikely(d))
336 return -ENODEV;
337
338 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
339 if (ctx->is_cdan)
340 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
341 else
342 err = qbman_swp_fq_schedule(d->swp, ctx->id);
343 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
344
345 return err;
346}
347EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
348
349
350
351
352
353
354
355
356
357int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
358 struct dpaa2_io_store *s)
359{
360 struct qbman_pull_desc pd;
361 int err;
362
363 qbman_pull_desc_clear(&pd);
364 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
365 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
366 qbman_pull_desc_set_fq(&pd, fqid);
367
368 d = service_select(d);
369 if (!d)
370 return -ENODEV;
371 s->swp = d->swp;
372 err = qbman_swp_pull(d->swp, &pd);
373 if (err)
374 s->swp = NULL;
375
376 return err;
377}
378EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
379
380
381
382
383
384
385
386
387
388int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
389 struct dpaa2_io_store *s)
390{
391 struct qbman_pull_desc pd;
392 int err;
393
394 qbman_pull_desc_clear(&pd);
395 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
396 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
397 qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
398
399 d = service_select(d);
400 if (!d)
401 return -ENODEV;
402
403 s->swp = d->swp;
404 err = qbman_swp_pull(d->swp, &pd);
405 if (err)
406 s->swp = NULL;
407
408 return err;
409}
410EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
411
412
413
414
415
416
417
418
419
420
421int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
422 u32 fqid,
423 const struct dpaa2_fd *fd)
424{
425 struct qbman_eq_desc ed;
426
427 d = service_select(d);
428 if (!d)
429 return -ENODEV;
430
431 qbman_eq_desc_clear(&ed);
432 qbman_eq_desc_set_no_orp(&ed, 0);
433 qbman_eq_desc_set_fq(&ed, fqid);
434
435 return qbman_swp_enqueue(d->swp, &ed, fd);
436}
437EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
438
439
440
441
442
443
444
445
446
447
448
449
450int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
451 u32 fqid,
452 const struct dpaa2_fd *fd,
453 int nb)
454{
455 struct qbman_eq_desc ed;
456
457 d = service_select(d);
458 if (!d)
459 return -ENODEV;
460
461 qbman_eq_desc_clear(&ed);
462 qbman_eq_desc_set_no_orp(&ed, 0);
463 qbman_eq_desc_set_fq(&ed, fqid);
464
465 return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
466}
467EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
468
469
470
471
472
473
474
475
476
477
478
479
480int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
481 u32 *fqid,
482 const struct dpaa2_fd *fd,
483 int nb)
484{
485 struct qbman_eq_desc *ed;
486 int i, ret;
487
488 ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
489 if (!ed)
490 return -ENOMEM;
491
492 d = service_select(d);
493 if (!d) {
494 ret = -ENODEV;
495 goto out;
496 }
497
498 for (i = 0; i < nb; i++) {
499 qbman_eq_desc_clear(&ed[i]);
500 qbman_eq_desc_set_no_orp(&ed[i], 0);
501 qbman_eq_desc_set_fq(&ed[i], fqid[i]);
502 }
503
504 ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
505out:
506 kfree(ed);
507 return ret;
508}
509EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
510
511
512
513
514
515
516
517
518
519
520
521
522int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
523 u32 qdid, u8 prio, u16 qdbin,
524 const struct dpaa2_fd *fd)
525{
526 struct qbman_eq_desc ed;
527
528 d = service_select(d);
529 if (!d)
530 return -ENODEV;
531
532 qbman_eq_desc_clear(&ed);
533 qbman_eq_desc_set_no_orp(&ed, 0);
534 qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
535
536 return qbman_swp_enqueue(d->swp, &ed, fd);
537}
538EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
539
540
541
542
543
544
545
546
547
548
549int dpaa2_io_service_release(struct dpaa2_io *d,
550 u16 bpid,
551 const u64 *buffers,
552 unsigned int num_buffers)
553{
554 struct qbman_release_desc rd;
555
556 d = service_select(d);
557 if (!d)
558 return -ENODEV;
559
560 qbman_release_desc_clear(&rd);
561 qbman_release_desc_set_bpid(&rd, bpid);
562
563 return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
564}
565EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
566
567
568
569
570
571
572
573
574
575
576
577
578int dpaa2_io_service_acquire(struct dpaa2_io *d,
579 u16 bpid,
580 u64 *buffers,
581 unsigned int num_buffers)
582{
583 unsigned long irqflags;
584 int err;
585
586 d = service_select(d);
587 if (!d)
588 return -ENODEV;
589
590 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
591 err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
592 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
593
594 return err;
595}
596EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
615 struct device *dev)
616{
617 struct dpaa2_io_store *ret;
618 size_t size;
619
620 if (!max_frames || (max_frames > 32))
621 return NULL;
622
623 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
624 if (!ret)
625 return NULL;
626
627 ret->max = max_frames;
628 size = max_frames * sizeof(struct dpaa2_dq) + 64;
629 ret->alloced_addr = kzalloc(size, GFP_KERNEL);
630 if (!ret->alloced_addr) {
631 kfree(ret);
632 return NULL;
633 }
634
635 ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
636 ret->paddr = dma_map_single(dev, ret->vaddr,
637 sizeof(struct dpaa2_dq) * max_frames,
638 DMA_FROM_DEVICE);
639 if (dma_mapping_error(dev, ret->paddr)) {
640 kfree(ret->alloced_addr);
641 kfree(ret);
642 return NULL;
643 }
644
645 ret->idx = 0;
646 ret->dev = dev;
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
651
652
653
654
655
656
657void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
658{
659 dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
660 DMA_FROM_DEVICE);
661 kfree(s->alloced_addr);
662 kfree(s);
663}
664EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
684{
685 int match;
686 struct dpaa2_dq *ret = &s->vaddr[s->idx];
687
688 match = qbman_result_has_new_result(s->swp, ret);
689 if (!match) {
690 *is_last = 0;
691 return NULL;
692 }
693
694 s->idx++;
695
696 if (dpaa2_dq_is_pull_complete(ret)) {
697 *is_last = 1;
698 s->idx = 0;
699
700
701
702
703
704 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
705 ret = NULL;
706 } else {
707 prefetch(&s->vaddr[s->idx]);
708 *is_last = 0;
709 }
710
711 return ret;
712}
713EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
714
715
716
717
718
719
720
721
722
723
724
725
726
727int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
728 u32 *fcnt, u32 *bcnt)
729{
730 struct qbman_fq_query_np_rslt state;
731 struct qbman_swp *swp;
732 unsigned long irqflags;
733 int ret;
734
735 d = service_select(d);
736 if (!d)
737 return -ENODEV;
738
739 swp = d->swp;
740 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
741 ret = qbman_fq_query_state(swp, fqid, &state);
742 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
743 if (ret)
744 return ret;
745 *fcnt = qbman_fq_state_frame_count(&state);
746 *bcnt = qbman_fq_state_byte_count(&state);
747
748 return 0;
749}
750EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
751
752
753
754
755
756
757
758
759
760
761int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
762{
763 struct qbman_bp_query_rslt state;
764 struct qbman_swp *swp;
765 unsigned long irqflags;
766 int ret;
767
768 d = service_select(d);
769 if (!d)
770 return -ENODEV;
771
772 swp = d->swp;
773 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
774 ret = qbman_bp_query(swp, bpid, &state);
775 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
776 if (ret)
777 return ret;
778 *num = qbman_bp_info_num_free_bufs(&state);
779 return 0;
780}
781EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
782