1
2
3
4
5
6
7
8#include <asm/cacheflush.h>
9#include <linux/io.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <soc/fsl/dpaa2-global.h>
13
14#include "qbman-portal.h"
15
16
17#define QB_VALID_BIT ((u32)0x80)
18
19
20#define QBMAN_MC_ACQUIRE 0x30
21#define QBMAN_WQCHAN_CONFIGURE 0x46
22
23
24#define QBMAN_CINH_SWP_EQCR_PI 0x800
25#define QBMAN_CINH_SWP_EQCR_CI 0x840
26#define QBMAN_CINH_SWP_EQAR 0x8c0
27#define QBMAN_CINH_SWP_CR_RT 0x900
28#define QBMAN_CINH_SWP_VDQCR_RT 0x940
29#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
30#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
31#define QBMAN_CINH_SWP_DQPI 0xa00
32#define QBMAN_CINH_SWP_DCAP 0xac0
33#define QBMAN_CINH_SWP_SDQCR 0xb00
34#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
35#define QBMAN_CINH_SWP_RCR_PI 0xc00
36#define QBMAN_CINH_SWP_RAR 0xcc0
37#define QBMAN_CINH_SWP_ISR 0xe00
38#define QBMAN_CINH_SWP_IER 0xe40
39#define QBMAN_CINH_SWP_ISDR 0xe80
40#define QBMAN_CINH_SWP_IIR 0xec0
41
42
43#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
44#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
45#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
46#define QBMAN_CENA_SWP_CR 0x600
47#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
48#define QBMAN_CENA_SWP_VDQCR 0x780
49#define QBMAN_CENA_SWP_EQCR_CI 0x840
50#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
51
52
53#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
54#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
55#define QBMAN_CENA_SWP_CR_MEM 0x1600
56#define QBMAN_CENA_SWP_RR_MEM 0x1680
57#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
58
59
60#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
61
62
63#define QMAN_DQ_TOKEN_VALID 1
64
65
66#define QB_SDQCR_FC_SHIFT 29
67#define QB_SDQCR_FC_MASK 0x1
68#define QB_SDQCR_DCT_SHIFT 24
69#define QB_SDQCR_DCT_MASK 0x3
70#define QB_SDQCR_TOK_SHIFT 16
71#define QB_SDQCR_TOK_MASK 0xff
72#define QB_SDQCR_SRC_SHIFT 0
73#define QB_SDQCR_SRC_MASK 0xffff
74
75
76#define QMAN_SDQCR_TOKEN 0xbb
77
78#define QBMAN_EQCR_DCA_IDXMASK 0x0f
79#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
80
81#define EQ_DESC_SIZE_WITHOUT_FD 29
82#define EQ_DESC_SIZE_FD_START 32
83
84enum qbman_sdqcr_dct {
85 qbman_sdqcr_dct_null = 0,
86 qbman_sdqcr_dct_prio_ics,
87 qbman_sdqcr_dct_active_ics,
88 qbman_sdqcr_dct_active
89};
90
91enum qbman_sdqcr_fc {
92 qbman_sdqcr_fc_one = 0,
93 qbman_sdqcr_fc_up_to_3 = 1
94};
95
96
97static int qbman_swp_enqueue_direct(struct qbman_swp *s,
98 const struct qbman_eq_desc *d,
99 const struct dpaa2_fd *fd);
100static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct dpaa2_fd *fd);
103static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
104 const struct qbman_eq_desc *d,
105 const struct dpaa2_fd *fd,
106 uint32_t *flags,
107 int num_frames);
108static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
109 const struct qbman_eq_desc *d,
110 const struct dpaa2_fd *fd,
111 uint32_t *flags,
112 int num_frames);
113static int
114qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
115 const struct qbman_eq_desc *d,
116 const struct dpaa2_fd *fd,
117 int num_frames);
118static
119int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 const struct dpaa2_fd *fd,
122 int num_frames);
123static int qbman_swp_pull_direct(struct qbman_swp *s,
124 struct qbman_pull_desc *d);
125static int qbman_swp_pull_mem_back(struct qbman_swp *s,
126 struct qbman_pull_desc *d);
127
128const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
129const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
130
131static int qbman_swp_release_direct(struct qbman_swp *s,
132 const struct qbman_release_desc *d,
133 const u64 *buffers,
134 unsigned int num_buffers);
135static int qbman_swp_release_mem_back(struct qbman_swp *s,
136 const struct qbman_release_desc *d,
137 const u64 *buffers,
138 unsigned int num_buffers);
139
140
141int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
142 const struct qbman_eq_desc *d,
143 const struct dpaa2_fd *fd)
144 = qbman_swp_enqueue_direct;
145
146int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
147 const struct qbman_eq_desc *d,
148 const struct dpaa2_fd *fd,
149 uint32_t *flags,
150 int num_frames)
151 = qbman_swp_enqueue_multiple_direct;
152
153int
154(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
155 const struct qbman_eq_desc *d,
156 const struct dpaa2_fd *fd,
157 int num_frames)
158 = qbman_swp_enqueue_multiple_desc_direct;
159
160int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
161 = qbman_swp_pull_direct;
162
163const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
164 = qbman_swp_dqrr_next_direct;
165
166int (*qbman_swp_release_ptr)(struct qbman_swp *s,
167 const struct qbman_release_desc *d,
168 const u64 *buffers,
169 unsigned int num_buffers)
170 = qbman_swp_release_direct;
171
172
173
174static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
175{
176 return readl_relaxed(p->addr_cinh + offset);
177}
178
179static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
180 u32 value)
181{
182 writel_relaxed(value, p->addr_cinh + offset);
183}
184
185static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
186{
187 return p->addr_cena + offset;
188}
189
190#define QBMAN_CINH_SWP_CFG 0xd00
191
192#define SWP_CFG_DQRR_MF_SHIFT 20
193#define SWP_CFG_EST_SHIFT 16
194#define SWP_CFG_CPBS_SHIFT 15
195#define SWP_CFG_WN_SHIFT 14
196#define SWP_CFG_RPM_SHIFT 12
197#define SWP_CFG_DCM_SHIFT 10
198#define SWP_CFG_EPM_SHIFT 8
199#define SWP_CFG_VPM_SHIFT 7
200#define SWP_CFG_CPM_SHIFT 6
201#define SWP_CFG_SD_SHIFT 5
202#define SWP_CFG_SP_SHIFT 4
203#define SWP_CFG_SE_SHIFT 3
204#define SWP_CFG_DP_SHIFT 2
205#define SWP_CFG_DE_SHIFT 1
206#define SWP_CFG_EP_SHIFT 0
207
208static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
209 u8 epm, int sd, int sp, int se,
210 int dp, int de, int ep)
211{
212 return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
213 est << SWP_CFG_EST_SHIFT |
214 wn << SWP_CFG_WN_SHIFT |
215 rpm << SWP_CFG_RPM_SHIFT |
216 dcm << SWP_CFG_DCM_SHIFT |
217 epm << SWP_CFG_EPM_SHIFT |
218 sd << SWP_CFG_SD_SHIFT |
219 sp << SWP_CFG_SP_SHIFT |
220 se << SWP_CFG_SE_SHIFT |
221 dp << SWP_CFG_DP_SHIFT |
222 de << SWP_CFG_DE_SHIFT |
223 ep << SWP_CFG_EP_SHIFT);
224}
225
226#define QMAN_RT_MODE 0x00000100
227
228static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
229{
230
231 if (first <= last)
232 return last - first;
233 else
234 return (2 * ringsize) - (first - last);
235}
236
237
238
239
240
241
242
243
244
245struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
246{
247 struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
248 u32 reg;
249 u32 mask_size;
250 u32 eqcr_pi;
251
252 if (!p)
253 return NULL;
254
255 spin_lock_init(&p->access_spinlock);
256
257 p->desc = d;
258 p->mc.valid_bit = QB_VALID_BIT;
259 p->sdq = 0;
260 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
261 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
262 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
263 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
264 p->mr.valid_bit = QB_VALID_BIT;
265
266 atomic_set(&p->vdq.available, 1);
267 p->vdq.valid_bit = QB_VALID_BIT;
268 p->dqrr.next_idx = 0;
269 p->dqrr.valid_bit = QB_VALID_BIT;
270
271 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
272 p->dqrr.dqrr_size = 4;
273 p->dqrr.reset_bug = 1;
274 } else {
275 p->dqrr.dqrr_size = 8;
276 p->dqrr.reset_bug = 0;
277 }
278
279 p->addr_cena = d->cena_bar;
280 p->addr_cinh = d->cinh_bar;
281
282 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
283
284 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
285 1,
286 0,
287 3,
288 2,
289 2,
290 1,
291 1,
292 1,
293 1,
294 0,
295 0);
296 } else {
297 memset(p->addr_cena, 0, 64 * 1024);
298 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
299 1,
300 1,
301 3,
302 2,
303 0,
304 1,
305 1,
306 1,
307 1,
308 0,
309 0);
310 reg |= 1 << SWP_CFG_CPBS_SHIFT |
311 1 << SWP_CFG_VPM_SHIFT |
312 1 << SWP_CFG_CPM_SHIFT;
313 }
314
315 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
316 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
317 if (!reg) {
318 pr_err("qbman: the portal is not enabled!\n");
319 kfree(p);
320 return NULL;
321 }
322
323 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
324 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
325 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
326 }
327
328
329
330
331
332
333 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
334
335 p->eqcr.pi_ring_size = 8;
336 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
337 p->eqcr.pi_ring_size = 32;
338 qbman_swp_enqueue_ptr =
339 qbman_swp_enqueue_mem_back;
340 qbman_swp_enqueue_multiple_ptr =
341 qbman_swp_enqueue_multiple_mem_back;
342 qbman_swp_enqueue_multiple_desc_ptr =
343 qbman_swp_enqueue_multiple_desc_mem_back;
344 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
345 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
346 qbman_swp_release_ptr = qbman_swp_release_mem_back;
347 }
348
349 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
350 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
351 eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
352 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
353 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
354 p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
355 & p->eqcr.pi_ci_mask;
356 p->eqcr.available = p->eqcr.pi_ring_size;
357
358 return p;
359}
360
361
362
363
364
365
366void qbman_swp_finish(struct qbman_swp *p)
367{
368 kfree(p);
369}
370
371
372
373
374
375
376
377u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
378{
379 return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
380}
381
382
383
384
385
386
387void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
388{
389 qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
390}
391
392
393
394
395
396
397
398u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
399{
400 return qbman_read_register(p, QBMAN_CINH_SWP_IER);
401}
402
403
404
405
406
407
408void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
409{
410 qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
411}
412
413
414
415
416
417
418
419int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
420{
421 return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
422}
423
424
425
426
427
428
429void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
430{
431 qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
432}
433
434
435
436
437
438
439
440
441
442
443void *qbman_swp_mc_start(struct qbman_swp *p)
444{
445 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
446 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
447 else
448 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
449}
450
451
452
453
454
455void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
456{
457 u8 *v = cmd;
458
459 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
460 dma_wmb();
461 *v = cmd_verb | p->mc.valid_bit;
462 } else {
463 *v = cmd_verb | p->mc.valid_bit;
464 dma_wmb();
465 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
466 }
467}
468
469
470
471
472
473void *qbman_swp_mc_result(struct qbman_swp *p)
474{
475 u32 *ret, verb;
476
477 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
478 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
479
480
481
482 verb = ret[0] & ~QB_VALID_BIT;
483 if (!verb)
484 return NULL;
485 p->mc.valid_bit ^= QB_VALID_BIT;
486 } else {
487 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
488
489 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
490 return NULL;
491
492 verb = ret[0] & ~QB_VALID_BIT;
493 if (!verb)
494 return NULL;
495 p->mr.valid_bit ^= QB_VALID_BIT;
496 }
497
498 return ret;
499}
500
501#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
502enum qb_enqueue_commands {
503 enqueue_empty = 0,
504 enqueue_response_always = 1,
505 enqueue_rejects_to_fq = 2
506};
507
508#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
509#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
510#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
511#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
512
513
514
515
516
517void qbman_eq_desc_clear(struct qbman_eq_desc *d)
518{
519 memset(d, 0, sizeof(*d));
520}
521
522
523
524
525
526
527
528void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
529{
530 d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
531 if (respond_success)
532 d->verb |= enqueue_response_always;
533 else
534 d->verb |= enqueue_rejects_to_fq;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
550{
551 d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
552 d->tgtid = cpu_to_le32(fqid);
553}
554
555
556
557
558
559
560
561
562void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
563 u32 qd_bin, u32 qd_prio)
564{
565 d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
566 d->tgtid = cpu_to_le32(qdid);
567 d->qdbin = cpu_to_le16(qd_bin);
568 d->qpri = qd_prio;
569}
570
571#define EQAR_IDX(eqar) ((eqar) & 0x7)
572#define EQAR_VB(eqar) ((eqar) & 0x80)
573#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
574
575#define QB_RT_BIT ((u32)0x100)
576
577
578
579
580
581
582
583
584
585
586
587static
588int qbman_swp_enqueue_direct(struct qbman_swp *s,
589 const struct qbman_eq_desc *d,
590 const struct dpaa2_fd *fd)
591{
592 int flags = 0;
593 int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
594
595 if (ret >= 0)
596 ret = 0;
597 else
598 ret = -EBUSY;
599 return ret;
600}
601
602
603
604
605
606
607
608
609
610
611
612
613static
614int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
615 const struct qbman_eq_desc *d,
616 const struct dpaa2_fd *fd)
617{
618 int flags = 0;
619 int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
620
621 if (ret >= 0)
622 ret = 0;
623 else
624 ret = -EBUSY;
625 return ret;
626}
627
628
629
630
631
632
633
634
635
636
637
638
639static
640int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
641 const struct qbman_eq_desc *d,
642 const struct dpaa2_fd *fd,
643 uint32_t *flags,
644 int num_frames)
645{
646 uint32_t *p = NULL;
647 const uint32_t *cl = (uint32_t *)d;
648 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
649 int i, num_enqueued = 0;
650 uint64_t addr_cena;
651
652 spin_lock(&s->access_spinlock);
653 half_mask = (s->eqcr.pi_ci_mask>>1);
654 full_mask = s->eqcr.pi_ci_mask;
655
656 if (!s->eqcr.available) {
657 eqcr_ci = s->eqcr.ci;
658 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
659 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
660 s->eqcr.ci &= full_mask;
661
662 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
663 eqcr_ci, s->eqcr.ci);
664 if (!s->eqcr.available) {
665 spin_unlock(&s->access_spinlock);
666 return 0;
667 }
668 }
669
670 eqcr_pi = s->eqcr.pi;
671 num_enqueued = (s->eqcr.available < num_frames) ?
672 s->eqcr.available : num_frames;
673 s->eqcr.available -= num_enqueued;
674
675 for (i = 0; i < num_enqueued; i++) {
676 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
677
678 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
679 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
680 &fd[i], sizeof(*fd));
681 eqcr_pi++;
682 }
683
684 dma_wmb();
685
686
687 eqcr_pi = s->eqcr.pi;
688 for (i = 0; i < num_enqueued; i++) {
689 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
690 p[0] = cl[0] | s->eqcr.pi_vb;
691 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
692 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
693
694 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
695 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
696 }
697 eqcr_pi++;
698 if (!(eqcr_pi & half_mask))
699 s->eqcr.pi_vb ^= QB_VALID_BIT;
700 }
701
702
703 eqcr_pi = s->eqcr.pi;
704 addr_cena = (size_t)s->addr_cena;
705 for (i = 0; i < num_enqueued; i++)
706 eqcr_pi++;
707 s->eqcr.pi = eqcr_pi & full_mask;
708 spin_unlock(&s->access_spinlock);
709
710 return num_enqueued;
711}
712
713
714
715
716
717
718
719
720
721
722
723
724static
725int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
726 const struct qbman_eq_desc *d,
727 const struct dpaa2_fd *fd,
728 uint32_t *flags,
729 int num_frames)
730{
731 uint32_t *p = NULL;
732 const uint32_t *cl = (uint32_t *)(d);
733 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
734 int i, num_enqueued = 0;
735 unsigned long irq_flags;
736
737 spin_lock(&s->access_spinlock);
738 local_irq_save(irq_flags);
739
740 half_mask = (s->eqcr.pi_ci_mask>>1);
741 full_mask = s->eqcr.pi_ci_mask;
742 if (!s->eqcr.available) {
743 eqcr_ci = s->eqcr.ci;
744 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
745 s->eqcr.ci = *p & full_mask;
746 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
747 eqcr_ci, s->eqcr.ci);
748 if (!s->eqcr.available) {
749 local_irq_restore(irq_flags);
750 spin_unlock(&s->access_spinlock);
751 return 0;
752 }
753 }
754
755 eqcr_pi = s->eqcr.pi;
756 num_enqueued = (s->eqcr.available < num_frames) ?
757 s->eqcr.available : num_frames;
758 s->eqcr.available -= num_enqueued;
759
760 for (i = 0; i < num_enqueued; i++) {
761 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
762
763 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
764 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
765 &fd[i], sizeof(*fd));
766 eqcr_pi++;
767 }
768
769
770 eqcr_pi = s->eqcr.pi;
771 for (i = 0; i < num_enqueued; i++) {
772 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
773 p[0] = cl[0] | s->eqcr.pi_vb;
774 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
775 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
776
777 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
778 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
779 }
780 eqcr_pi++;
781 if (!(eqcr_pi & half_mask))
782 s->eqcr.pi_vb ^= QB_VALID_BIT;
783 }
784 s->eqcr.pi = eqcr_pi & full_mask;
785
786 dma_wmb();
787 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
788 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
789 local_irq_restore(irq_flags);
790 spin_unlock(&s->access_spinlock);
791
792 return num_enqueued;
793}
794
795
796
797
798
799
800
801
802
803
804
805static
806int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
807 const struct qbman_eq_desc *d,
808 const struct dpaa2_fd *fd,
809 int num_frames)
810{
811 uint32_t *p;
812 const uint32_t *cl;
813 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
814 int i, num_enqueued = 0;
815
816 half_mask = (s->eqcr.pi_ci_mask>>1);
817 full_mask = s->eqcr.pi_ci_mask;
818 if (!s->eqcr.available) {
819 eqcr_ci = s->eqcr.ci;
820 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
821 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
822 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
823 eqcr_ci, s->eqcr.ci);
824 if (!s->eqcr.available)
825 return 0;
826 }
827
828 eqcr_pi = s->eqcr.pi;
829 num_enqueued = (s->eqcr.available < num_frames) ?
830 s->eqcr.available : num_frames;
831 s->eqcr.available -= num_enqueued;
832
833 for (i = 0; i < num_enqueued; i++) {
834 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
835 cl = (uint32_t *)(&d[i]);
836
837 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
838 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
839 &fd[i], sizeof(*fd));
840 eqcr_pi++;
841 }
842
843 dma_wmb();
844
845
846 eqcr_pi = s->eqcr.pi;
847 for (i = 0; i < num_enqueued; i++) {
848 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
849 cl = (uint32_t *)(&d[i]);
850 p[0] = cl[0] | s->eqcr.pi_vb;
851 eqcr_pi++;
852 if (!(eqcr_pi & half_mask))
853 s->eqcr.pi_vb ^= QB_VALID_BIT;
854 }
855
856
857 eqcr_pi = s->eqcr.pi;
858 for (i = 0; i < num_enqueued; i++)
859 eqcr_pi++;
860 s->eqcr.pi = eqcr_pi & full_mask;
861
862 return num_enqueued;
863}
864
865
866
867
868
869
870
871
872
873
874
875static
876int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
877 const struct qbman_eq_desc *d,
878 const struct dpaa2_fd *fd,
879 int num_frames)
880{
881 uint32_t *p;
882 const uint32_t *cl;
883 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
884 int i, num_enqueued = 0;
885
886 half_mask = (s->eqcr.pi_ci_mask>>1);
887 full_mask = s->eqcr.pi_ci_mask;
888 if (!s->eqcr.available) {
889 eqcr_ci = s->eqcr.ci;
890 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
891 s->eqcr.ci = *p & full_mask;
892 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
893 eqcr_ci, s->eqcr.ci);
894 if (!s->eqcr.available)
895 return 0;
896 }
897
898 eqcr_pi = s->eqcr.pi;
899 num_enqueued = (s->eqcr.available < num_frames) ?
900 s->eqcr.available : num_frames;
901 s->eqcr.available -= num_enqueued;
902
903 for (i = 0; i < num_enqueued; i++) {
904 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
905 cl = (uint32_t *)(&d[i]);
906
907 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
908 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
909 &fd[i], sizeof(*fd));
910 eqcr_pi++;
911 }
912
913
914 eqcr_pi = s->eqcr.pi;
915 for (i = 0; i < num_enqueued; i++) {
916 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
917 cl = (uint32_t *)(&d[i]);
918 p[0] = cl[0] | s->eqcr.pi_vb;
919 eqcr_pi++;
920 if (!(eqcr_pi & half_mask))
921 s->eqcr.pi_vb ^= QB_VALID_BIT;
922 }
923
924 s->eqcr.pi = eqcr_pi & full_mask;
925
926 dma_wmb();
927 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
928 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
929
930 return num_enqueued;
931}
932
933
934
935
936
937
938
939
940
941
942void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
943{
944 u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
945
946 WARN_ON(channel_idx > 15);
947 *enabled = src | (1 << channel_idx);
948}
949
950
951
952
953
954
955
956void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
957{
958 u16 dqsrc;
959
960 WARN_ON(channel_idx > 15);
961 if (enable)
962 s->sdq |= 1 << channel_idx;
963 else
964 s->sdq &= ~(1 << channel_idx);
965
966
967
968
969 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
970 if (dqsrc != 0)
971 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
972 else
973 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
974}
975
976#define QB_VDQCR_VERB_DCT_SHIFT 0
977#define QB_VDQCR_VERB_DT_SHIFT 2
978#define QB_VDQCR_VERB_RLS_SHIFT 4
979#define QB_VDQCR_VERB_WAE_SHIFT 5
980
981enum qb_pull_dt_e {
982 qb_pull_dt_channel,
983 qb_pull_dt_workqueue,
984 qb_pull_dt_framequeue
985};
986
987
988
989
990
991
992void qbman_pull_desc_clear(struct qbman_pull_desc *d)
993{
994 memset(d, 0, sizeof(*d));
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1011 struct dpaa2_dq *storage,
1012 dma_addr_t storage_phys,
1013 int stash)
1014{
1015
1016 d->rsp_addr_virt = (u64)(uintptr_t)storage;
1017
1018 if (!storage) {
1019 d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1020 return;
1021 }
1022 d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1023 if (stash)
1024 d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1025 else
1026 d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1027
1028 d->rsp_addr = cpu_to_le64(storage_phys);
1029}
1030
1031
1032
1033
1034
1035
1036void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1037{
1038 d->numf = numframes - 1;
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1054{
1055 d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1056 d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1057 d->dq_src = cpu_to_le32(fqid);
1058}
1059
1060
1061
1062
1063
1064
1065void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1066 enum qbman_pull_type_e dct)
1067{
1068 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1069 d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1070 d->dq_src = cpu_to_le32(wqid);
1071}
1072
1073
1074
1075
1076
1077
1078
1079void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1080 enum qbman_pull_type_e dct)
1081{
1082 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1083 d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1084 d->dq_src = cpu_to_le32(chid);
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static
1097int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1098{
1099 struct qbman_pull_desc *p;
1100
1101 if (!atomic_dec_and_test(&s->vdq.available)) {
1102 atomic_inc(&s->vdq.available);
1103 return -EBUSY;
1104 }
1105 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1106 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1107 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1108 else
1109 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1110 p->numf = d->numf;
1111 p->tok = QMAN_DQ_TOKEN_VALID;
1112 p->dq_src = d->dq_src;
1113 p->rsp_addr = d->rsp_addr;
1114 p->rsp_addr_virt = d->rsp_addr_virt;
1115 dma_wmb();
1116
1117 p->verb = d->verb | s->vdq.valid_bit;
1118 s->vdq.valid_bit ^= QB_VALID_BIT;
1119
1120 return 0;
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132static
1133int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1134{
1135 struct qbman_pull_desc *p;
1136
1137 if (!atomic_dec_and_test(&s->vdq.available)) {
1138 atomic_inc(&s->vdq.available);
1139 return -EBUSY;
1140 }
1141 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1142 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1143 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1144 else
1145 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1146 p->numf = d->numf;
1147 p->tok = QMAN_DQ_TOKEN_VALID;
1148 p->dq_src = d->dq_src;
1149 p->rsp_addr = d->rsp_addr;
1150 p->rsp_addr_virt = d->rsp_addr_virt;
1151
1152
1153 p->verb = d->verb | s->vdq.valid_bit;
1154 s->vdq.valid_bit ^= QB_VALID_BIT;
1155 dma_wmb();
1156 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1157
1158 return 0;
1159}
1160
1161#define QMAN_DQRR_PI_MASK 0xf
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1172{
1173 u32 verb;
1174 u32 response_verb;
1175 u32 flags;
1176 struct dpaa2_dq *p;
1177
1178
1179
1180
1181 if (unlikely(s->dqrr.reset_bug)) {
1182
1183
1184
1185
1186
1187
1188
1189
1190 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1191 QMAN_DQRR_PI_MASK;
1192
1193
1194 if (pi == s->dqrr.next_idx)
1195 return NULL;
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1206 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1207 s->dqrr.next_idx, pi);
1208 s->dqrr.reset_bug = 0;
1209 }
1210 prefetch(qbman_get_cmd(s,
1211 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1212 }
1213
1214 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1215 verb = p->dq.verb;
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1226 prefetch(qbman_get_cmd(s,
1227 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1228 return NULL;
1229 }
1230
1231
1232
1233
1234 s->dqrr.next_idx++;
1235 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1;
1236 if (!s->dqrr.next_idx)
1237 s->dqrr.valid_bit ^= QB_VALID_BIT;
1238
1239
1240
1241
1242
1243 flags = p->dq.stat;
1244 response_verb = verb & QBMAN_RESULT_MASK;
1245 if ((response_verb == QBMAN_RESULT_DQ) &&
1246 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1247 (flags & DPAA2_DQ_STAT_EXPIRED))
1248 atomic_inc(&s->vdq.available);
1249
1250 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1251
1252 return p;
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1264{
1265 u32 verb;
1266 u32 response_verb;
1267 u32 flags;
1268 struct dpaa2_dq *p;
1269
1270
1271
1272
1273 if (unlikely(s->dqrr.reset_bug)) {
1274
1275
1276
1277
1278
1279
1280
1281
1282 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1283 QMAN_DQRR_PI_MASK;
1284
1285
1286 if (pi == s->dqrr.next_idx)
1287 return NULL;
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1298 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1299 s->dqrr.next_idx, pi);
1300 s->dqrr.reset_bug = 0;
1301 }
1302 prefetch(qbman_get_cmd(s,
1303 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1304 }
1305
1306 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1307 verb = p->dq.verb;
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1318 prefetch(qbman_get_cmd(s,
1319 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1320 return NULL;
1321 }
1322
1323
1324
1325
1326 s->dqrr.next_idx++;
1327 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1;
1328 if (!s->dqrr.next_idx)
1329 s->dqrr.valid_bit ^= QB_VALID_BIT;
1330
1331
1332
1333
1334
1335 flags = p->dq.stat;
1336 response_verb = verb & QBMAN_RESULT_MASK;
1337 if ((response_verb == QBMAN_RESULT_DQ) &&
1338 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1339 (flags & DPAA2_DQ_STAT_EXPIRED))
1340 atomic_inc(&s->vdq.available);
1341
1342 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1343
1344 return p;
1345}
1346
1347
1348
1349
1350
1351
1352
1353void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1354{
1355 qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1376{
1377 if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1378 return 0;
1379
1380
1381
1382
1383
1384
1385 ((struct dpaa2_dq *)dq)->dq.tok = 0;
1386
1387
1388
1389
1390
1391
1392 if (s->vdq.storage == dq) {
1393 s->vdq.storage = NULL;
1394 atomic_inc(&s->vdq.available);
1395 }
1396
1397 return 1;
1398}
1399
1400
1401
1402
1403
1404void qbman_release_desc_clear(struct qbman_release_desc *d)
1405{
1406 memset(d, 0, sizeof(*d));
1407 d->verb = 1 << 5;
1408}
1409
1410
1411
1412
1413void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1414{
1415 d->bpid = cpu_to_le16(bpid);
1416}
1417
1418
1419
1420
1421
1422void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1423{
1424 if (enable)
1425 d->verb |= 1 << 6;
1426 else
1427 d->verb &= ~(1 << 6);
1428}
1429
1430#define RAR_IDX(rar) ((rar) & 0x7)
1431#define RAR_VB(rar) ((rar) & 0x80)
1432#define RAR_SUCCESS(rar) ((rar) & 0x100)
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443int qbman_swp_release_direct(struct qbman_swp *s,
1444 const struct qbman_release_desc *d,
1445 const u64 *buffers, unsigned int num_buffers)
1446{
1447 int i;
1448 struct qbman_release_desc *p;
1449 u32 rar;
1450
1451 if (!num_buffers || (num_buffers > 7))
1452 return -EINVAL;
1453
1454 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1455 if (!RAR_SUCCESS(rar))
1456 return -EBUSY;
1457
1458
1459 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1460
1461
1462 for (i = 0; i < num_buffers; i++)
1463 p->buf[i] = cpu_to_le64(buffers[i]);
1464 p->bpid = d->bpid;
1465
1466
1467
1468
1469
1470 dma_wmb();
1471 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1472
1473 return 0;
1474}
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485int qbman_swp_release_mem_back(struct qbman_swp *s,
1486 const struct qbman_release_desc *d,
1487 const u64 *buffers, unsigned int num_buffers)
1488{
1489 int i;
1490 struct qbman_release_desc *p;
1491 u32 rar;
1492
1493 if (!num_buffers || (num_buffers > 7))
1494 return -EINVAL;
1495
1496 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1497 if (!RAR_SUCCESS(rar))
1498 return -EBUSY;
1499
1500
1501 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1502
1503
1504 for (i = 0; i < num_buffers; i++)
1505 p->buf[i] = cpu_to_le64(buffers[i]);
1506 p->bpid = d->bpid;
1507
1508 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1509 dma_wmb();
1510 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1511 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1512
1513 return 0;
1514}
1515
1516struct qbman_acquire_desc {
1517 u8 verb;
1518 u8 reserved;
1519 __le16 bpid;
1520 u8 num;
1521 u8 reserved2[59];
1522};
1523
1524struct qbman_acquire_rslt {
1525 u8 verb;
1526 u8 rslt;
1527 __le16 reserved;
1528 u8 num;
1529 u8 reserved2[3];
1530 __le64 buf[7];
1531};
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1544 unsigned int num_buffers)
1545{
1546 struct qbman_acquire_desc *p;
1547 struct qbman_acquire_rslt *r;
1548 int i;
1549
1550 if (!num_buffers || (num_buffers > 7))
1551 return -EINVAL;
1552
1553
1554 p = qbman_swp_mc_start(s);
1555
1556 if (!p)
1557 return -EBUSY;
1558
1559
1560 p->bpid = cpu_to_le16(bpid);
1561 p->num = num_buffers;
1562
1563
1564 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1565 if (unlikely(!r)) {
1566 pr_err("qbman: acquire from BPID %d failed, no response\n",
1567 bpid);
1568 return -EIO;
1569 }
1570
1571
1572 WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1573
1574
1575 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1576 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1577 bpid, r->rslt);
1578 return -EIO;
1579 }
1580
1581 WARN_ON(r->num > num_buffers);
1582
1583
1584 for (i = 0; i < r->num; i++)
1585 buffers[i] = le64_to_cpu(r->buf[i]);
1586
1587 return (int)r->num;
1588}
1589
1590struct qbman_alt_fq_state_desc {
1591 u8 verb;
1592 u8 reserved[3];
1593 __le32 fqid;
1594 u8 reserved2[56];
1595};
1596
1597struct qbman_alt_fq_state_rslt {
1598 u8 verb;
1599 u8 rslt;
1600 u8 reserved[62];
1601};
1602
1603#define ALT_FQ_FQID_MASK 0x00FFFFFF
1604
1605int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1606 u8 alt_fq_verb)
1607{
1608 struct qbman_alt_fq_state_desc *p;
1609 struct qbman_alt_fq_state_rslt *r;
1610
1611
1612 p = qbman_swp_mc_start(s);
1613 if (!p)
1614 return -EBUSY;
1615
1616 p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1617
1618
1619 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1620 if (unlikely(!r)) {
1621 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1622 alt_fq_verb);
1623 return -EIO;
1624 }
1625
1626
1627 WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1628
1629
1630 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1631 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1632 fqid, r->verb, r->rslt);
1633 return -EIO;
1634 }
1635
1636 return 0;
1637}
1638
1639struct qbman_cdan_ctrl_desc {
1640 u8 verb;
1641 u8 reserved;
1642 __le16 ch;
1643 u8 we;
1644 u8 ctrl;
1645 __le16 reserved2;
1646 __le64 cdan_ctx;
1647 u8 reserved3[48];
1648
1649};
1650
1651struct qbman_cdan_ctrl_rslt {
1652 u8 verb;
1653 u8 rslt;
1654 __le16 ch;
1655 u8 reserved[60];
1656};
1657
1658int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1659 u8 we_mask, u8 cdan_en,
1660 u64 ctx)
1661{
1662 struct qbman_cdan_ctrl_desc *p = NULL;
1663 struct qbman_cdan_ctrl_rslt *r = NULL;
1664
1665
1666 p = qbman_swp_mc_start(s);
1667 if (!p)
1668 return -EBUSY;
1669
1670
1671 p->ch = cpu_to_le16(channelid);
1672 p->we = we_mask;
1673 if (cdan_en)
1674 p->ctrl = 1;
1675 else
1676 p->ctrl = 0;
1677 p->cdan_ctx = cpu_to_le64(ctx);
1678
1679
1680 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1681 if (unlikely(!r)) {
1682 pr_err("qbman: wqchan config failed, no response\n");
1683 return -EIO;
1684 }
1685
1686 WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1687
1688
1689 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1690 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1691 channelid, r->rslt);
1692 return -EIO;
1693 }
1694
1695 return 0;
1696}
1697
1698#define QBMAN_RESPONSE_VERB_MASK 0x7f
1699#define QBMAN_FQ_QUERY_NP 0x45
1700#define QBMAN_BP_QUERY 0x32
1701
1702struct qbman_fq_query_desc {
1703 u8 verb;
1704 u8 reserved[3];
1705 __le32 fqid;
1706 u8 reserved2[56];
1707};
1708
1709int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1710 struct qbman_fq_query_np_rslt *r)
1711{
1712 struct qbman_fq_query_desc *p;
1713 void *resp;
1714
1715 p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1716 if (!p)
1717 return -EBUSY;
1718
1719
1720 p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1721 resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1722 if (!resp) {
1723 pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1724 fqid);
1725 return -EIO;
1726 }
1727 *r = *(struct qbman_fq_query_np_rslt *)resp;
1728
1729 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1730
1731
1732 if (r->rslt != QBMAN_MC_RSLT_OK) {
1733 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1734 p->fqid, r->rslt);
1735 return -EIO;
1736 }
1737
1738 return 0;
1739}
1740
1741u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1742{
1743 return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1744}
1745
1746u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1747{
1748 return le32_to_cpu(r->byte_cnt);
1749}
1750
1751struct qbman_bp_query_desc {
1752 u8 verb;
1753 u8 reserved;
1754 __le16 bpid;
1755 u8 reserved2[60];
1756};
1757
1758int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1759 struct qbman_bp_query_rslt *r)
1760{
1761 struct qbman_bp_query_desc *p;
1762 void *resp;
1763
1764 p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1765 if (!p)
1766 return -EBUSY;
1767
1768 p->bpid = cpu_to_le16(bpid);
1769 resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1770 if (!resp) {
1771 pr_err("qbman: Query BPID %d fields failed, no response\n",
1772 bpid);
1773 return -EIO;
1774 }
1775 *r = *(struct qbman_bp_query_rslt *)resp;
1776
1777 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1778
1779
1780 if (r->rslt != QBMAN_MC_RSLT_OK) {
1781 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1782 bpid, r->rslt);
1783 return -EIO;
1784 }
1785
1786 return 0;
1787}
1788
1789u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1790{
1791 return le32_to_cpu(a->fill);
1792}
1793