1
2
3
4
5
6
7
8#include <asm/cacheflush.h>
9#include <linux/io.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <soc/fsl/dpaa2-global.h>
13
14#include "qbman-portal.h"
15
16
17#define QB_VALID_BIT ((u32)0x80)
18
19
20#define QBMAN_MC_ACQUIRE 0x30
21#define QBMAN_WQCHAN_CONFIGURE 0x46
22
23
24#define QBMAN_CINH_SWP_EQCR_PI 0x800
25#define QBMAN_CINH_SWP_EQCR_CI 0x840
26#define QBMAN_CINH_SWP_EQAR 0x8c0
27#define QBMAN_CINH_SWP_CR_RT 0x900
28#define QBMAN_CINH_SWP_VDQCR_RT 0x940
29#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
30#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
31#define QBMAN_CINH_SWP_DQPI 0xa00
32#define QBMAN_CINH_SWP_DCAP 0xac0
33#define QBMAN_CINH_SWP_SDQCR 0xb00
34#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
35#define QBMAN_CINH_SWP_RCR_PI 0xc00
36#define QBMAN_CINH_SWP_RAR 0xcc0
37#define QBMAN_CINH_SWP_ISR 0xe00
38#define QBMAN_CINH_SWP_IER 0xe40
39#define QBMAN_CINH_SWP_ISDR 0xe80
40#define QBMAN_CINH_SWP_IIR 0xec0
41
42
43#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
44#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
45#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
46#define QBMAN_CENA_SWP_CR 0x600
47#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
48#define QBMAN_CENA_SWP_VDQCR 0x780
49#define QBMAN_CENA_SWP_EQCR_CI 0x840
50#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
51
52
53#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
54#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
55#define QBMAN_CENA_SWP_CR_MEM 0x1600
56#define QBMAN_CENA_SWP_RR_MEM 0x1680
57#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
58
59
60#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
61
62
63#define QMAN_DQ_TOKEN_VALID 1
64
65
66#define QB_SDQCR_FC_SHIFT 29
67#define QB_SDQCR_FC_MASK 0x1
68#define QB_SDQCR_DCT_SHIFT 24
69#define QB_SDQCR_DCT_MASK 0x3
70#define QB_SDQCR_TOK_SHIFT 16
71#define QB_SDQCR_TOK_MASK 0xff
72#define QB_SDQCR_SRC_SHIFT 0
73#define QB_SDQCR_SRC_MASK 0xffff
74
75
76#define QMAN_SDQCR_TOKEN 0xbb
77
78#define QBMAN_EQCR_DCA_IDXMASK 0x0f
79#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
80
81#define EQ_DESC_SIZE_WITHOUT_FD 29
82#define EQ_DESC_SIZE_FD_START 32
83
84enum qbman_sdqcr_dct {
85 qbman_sdqcr_dct_null = 0,
86 qbman_sdqcr_dct_prio_ics,
87 qbman_sdqcr_dct_active_ics,
88 qbman_sdqcr_dct_active
89};
90
91enum qbman_sdqcr_fc {
92 qbman_sdqcr_fc_one = 0,
93 qbman_sdqcr_fc_up_to_3 = 1
94};
95
96
97static int qbman_swp_enqueue_direct(struct qbman_swp *s,
98 const struct qbman_eq_desc *d,
99 const struct dpaa2_fd *fd);
100static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct dpaa2_fd *fd);
103static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
104 const struct qbman_eq_desc *d,
105 const struct dpaa2_fd *fd,
106 uint32_t *flags,
107 int num_frames);
108static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
109 const struct qbman_eq_desc *d,
110 const struct dpaa2_fd *fd,
111 uint32_t *flags,
112 int num_frames);
113static int
114qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
115 const struct qbman_eq_desc *d,
116 const struct dpaa2_fd *fd,
117 int num_frames);
118static
119int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 const struct dpaa2_fd *fd,
122 int num_frames);
123static int qbman_swp_pull_direct(struct qbman_swp *s,
124 struct qbman_pull_desc *d);
125static int qbman_swp_pull_mem_back(struct qbman_swp *s,
126 struct qbman_pull_desc *d);
127
128const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
129const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
130
131static int qbman_swp_release_direct(struct qbman_swp *s,
132 const struct qbman_release_desc *d,
133 const u64 *buffers,
134 unsigned int num_buffers);
135static int qbman_swp_release_mem_back(struct qbman_swp *s,
136 const struct qbman_release_desc *d,
137 const u64 *buffers,
138 unsigned int num_buffers);
139
140
141int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
142 const struct qbman_eq_desc *d,
143 const struct dpaa2_fd *fd)
144 = qbman_swp_enqueue_direct;
145
146int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
147 const struct qbman_eq_desc *d,
148 const struct dpaa2_fd *fd,
149 uint32_t *flags,
150 int num_frames)
151 = qbman_swp_enqueue_multiple_direct;
152
153int
154(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
155 const struct qbman_eq_desc *d,
156 const struct dpaa2_fd *fd,
157 int num_frames)
158 = qbman_swp_enqueue_multiple_desc_direct;
159
160int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
161 = qbman_swp_pull_direct;
162
163const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
164 = qbman_swp_dqrr_next_direct;
165
166int (*qbman_swp_release_ptr)(struct qbman_swp *s,
167 const struct qbman_release_desc *d,
168 const u64 *buffers,
169 unsigned int num_buffers)
170 = qbman_swp_release_direct;
171
172
173
174static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
175{
176 return readl_relaxed(p->addr_cinh + offset);
177}
178
179static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
180 u32 value)
181{
182 writel_relaxed(value, p->addr_cinh + offset);
183}
184
185static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
186{
187 return p->addr_cena + offset;
188}
189
190#define QBMAN_CINH_SWP_CFG 0xd00
191
192#define SWP_CFG_DQRR_MF_SHIFT 20
193#define SWP_CFG_EST_SHIFT 16
194#define SWP_CFG_CPBS_SHIFT 15
195#define SWP_CFG_WN_SHIFT 14
196#define SWP_CFG_RPM_SHIFT 12
197#define SWP_CFG_DCM_SHIFT 10
198#define SWP_CFG_EPM_SHIFT 8
199#define SWP_CFG_VPM_SHIFT 7
200#define SWP_CFG_CPM_SHIFT 6
201#define SWP_CFG_SD_SHIFT 5
202#define SWP_CFG_SP_SHIFT 4
203#define SWP_CFG_SE_SHIFT 3
204#define SWP_CFG_DP_SHIFT 2
205#define SWP_CFG_DE_SHIFT 1
206#define SWP_CFG_EP_SHIFT 0
207
208static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
209 u8 epm, int sd, int sp, int se,
210 int dp, int de, int ep)
211{
212 return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
213 est << SWP_CFG_EST_SHIFT |
214 wn << SWP_CFG_WN_SHIFT |
215 rpm << SWP_CFG_RPM_SHIFT |
216 dcm << SWP_CFG_DCM_SHIFT |
217 epm << SWP_CFG_EPM_SHIFT |
218 sd << SWP_CFG_SD_SHIFT |
219 sp << SWP_CFG_SP_SHIFT |
220 se << SWP_CFG_SE_SHIFT |
221 dp << SWP_CFG_DP_SHIFT |
222 de << SWP_CFG_DE_SHIFT |
223 ep << SWP_CFG_EP_SHIFT);
224}
225
226#define QMAN_RT_MODE 0x00000100
227
228static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
229{
230
231 if (first <= last)
232 return last - first;
233 else
234 return (2 * ringsize) - (first - last);
235}
236
237
238
239
240
241
242
243
244
245struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
246{
247 struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
248 u32 reg;
249 u32 mask_size;
250 u32 eqcr_pi;
251
252 if (!p)
253 return NULL;
254
255 spin_lock_init(&p->access_spinlock);
256
257 p->desc = d;
258 p->mc.valid_bit = QB_VALID_BIT;
259 p->sdq = 0;
260 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
261 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
262 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
263 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
264 p->mr.valid_bit = QB_VALID_BIT;
265
266 atomic_set(&p->vdq.available, 1);
267 p->vdq.valid_bit = QB_VALID_BIT;
268 p->dqrr.next_idx = 0;
269 p->dqrr.valid_bit = QB_VALID_BIT;
270
271 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
272 p->dqrr.dqrr_size = 4;
273 p->dqrr.reset_bug = 1;
274 } else {
275 p->dqrr.dqrr_size = 8;
276 p->dqrr.reset_bug = 0;
277 }
278
279 p->addr_cena = d->cena_bar;
280 p->addr_cinh = d->cinh_bar;
281
282 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
283
284 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
285 1,
286 0,
287 3,
288 2,
289 2,
290 1,
291 1,
292 1,
293 1,
294 0,
295 0);
296 } else {
297 memset(p->addr_cena, 0, 64 * 1024);
298 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
299 1,
300 1,
301 3,
302 2,
303 0,
304 1,
305 1,
306 1,
307 1,
308 0,
309 0);
310 reg |= 1 << SWP_CFG_CPBS_SHIFT |
311 1 << SWP_CFG_VPM_SHIFT |
312 1 << SWP_CFG_CPM_SHIFT;
313 }
314
315 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
316 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
317 if (!reg) {
318 pr_err("qbman: the portal is not enabled!\n");
319 kfree(p);
320 return NULL;
321 }
322
323 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
324 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
325 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
326 }
327
328
329
330
331
332
333 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
334
335 p->eqcr.pi_ring_size = 8;
336 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
337 p->eqcr.pi_ring_size = 32;
338 qbman_swp_enqueue_ptr =
339 qbman_swp_enqueue_mem_back;
340 qbman_swp_enqueue_multiple_ptr =
341 qbman_swp_enqueue_multiple_mem_back;
342 qbman_swp_enqueue_multiple_desc_ptr =
343 qbman_swp_enqueue_multiple_desc_mem_back;
344 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
345 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
346 qbman_swp_release_ptr = qbman_swp_release_mem_back;
347 }
348
349 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
350 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
351 eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
352 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
353 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
354 p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
355 & p->eqcr.pi_ci_mask;
356 p->eqcr.available = p->eqcr.pi_ring_size;
357
358 return p;
359}
360
361
362
363
364
365
366void qbman_swp_finish(struct qbman_swp *p)
367{
368 kfree(p);
369}
370
371
372
373
374
375
376
377u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
378{
379 return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
380}
381
382
383
384
385
386
387void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
388{
389 qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
390}
391
392
393
394
395
396
397
398u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
399{
400 return qbman_read_register(p, QBMAN_CINH_SWP_IER);
401}
402
403
404
405
406
407
408void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
409{
410 qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
411}
412
413
414
415
416
417
418
419int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
420{
421 return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
422}
423
424
425
426
427
428
429void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
430{
431 qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
432}
433
434
435
436
437
438
439
440
441
442
443void *qbman_swp_mc_start(struct qbman_swp *p)
444{
445 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
446 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
447 else
448 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
449}
450
451
452
453
454
455void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
456{
457 u8 *v = cmd;
458
459 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
460 dma_wmb();
461 *v = cmd_verb | p->mc.valid_bit;
462 } else {
463 *v = cmd_verb | p->mc.valid_bit;
464 dma_wmb();
465 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
466 }
467}
468
469
470
471
472
473void *qbman_swp_mc_result(struct qbman_swp *p)
474{
475 u32 *ret, verb;
476
477 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
478 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
479
480
481
482 verb = ret[0] & ~QB_VALID_BIT;
483 if (!verb)
484 return NULL;
485 p->mc.valid_bit ^= QB_VALID_BIT;
486 } else {
487 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
488
489 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
490 return NULL;
491
492 verb = ret[0] & ~QB_VALID_BIT;
493 if (!verb)
494 return NULL;
495 p->mr.valid_bit ^= QB_VALID_BIT;
496 }
497
498 return ret;
499}
500
501#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
502enum qb_enqueue_commands {
503 enqueue_empty = 0,
504 enqueue_response_always = 1,
505 enqueue_rejects_to_fq = 2
506};
507
508#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
509#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
510#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
511#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
512
513
514
515
516
517void qbman_eq_desc_clear(struct qbman_eq_desc *d)
518{
519 memset(d, 0, sizeof(*d));
520}
521
522
523
524
525
526
527
528void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
529{
530 d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
531 if (respond_success)
532 d->verb |= enqueue_response_always;
533 else
534 d->verb |= enqueue_rejects_to_fq;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
550{
551 d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
552 d->tgtid = cpu_to_le32(fqid);
553}
554
555
556
557
558
559
560
561
562void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
563 u32 qd_bin, u32 qd_prio)
564{
565 d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
566 d->tgtid = cpu_to_le32(qdid);
567 d->qdbin = cpu_to_le16(qd_bin);
568 d->qpri = qd_prio;
569}
570
571#define EQAR_IDX(eqar) ((eqar) & 0x7)
572#define EQAR_VB(eqar) ((eqar) & 0x80)
573#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
574
575#define QB_RT_BIT ((u32)0x100)
576
577
578
579
580
581
582
583
584
585
586
587static
588int qbman_swp_enqueue_direct(struct qbman_swp *s,
589 const struct qbman_eq_desc *d,
590 const struct dpaa2_fd *fd)
591{
592 int flags = 0;
593 int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
594
595 if (ret >= 0)
596 ret = 0;
597 else
598 ret = -EBUSY;
599 return ret;
600}
601
602
603
604
605
606
607
608
609
610
611
612
613static
614int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
615 const struct qbman_eq_desc *d,
616 const struct dpaa2_fd *fd)
617{
618 int flags = 0;
619 int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
620
621 if (ret >= 0)
622 ret = 0;
623 else
624 ret = -EBUSY;
625 return ret;
626}
627
628
629
630
631
632
633
634
635
636
637
638
639static
640int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
641 const struct qbman_eq_desc *d,
642 const struct dpaa2_fd *fd,
643 uint32_t *flags,
644 int num_frames)
645{
646 uint32_t *p = NULL;
647 const uint32_t *cl = (uint32_t *)d;
648 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
649 int i, num_enqueued = 0;
650
651 spin_lock(&s->access_spinlock);
652 half_mask = (s->eqcr.pi_ci_mask>>1);
653 full_mask = s->eqcr.pi_ci_mask;
654
655 if (!s->eqcr.available) {
656 eqcr_ci = s->eqcr.ci;
657 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
658 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
659 s->eqcr.ci &= full_mask;
660
661 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
662 eqcr_ci, s->eqcr.ci);
663 if (!s->eqcr.available) {
664 spin_unlock(&s->access_spinlock);
665 return 0;
666 }
667 }
668
669 eqcr_pi = s->eqcr.pi;
670 num_enqueued = (s->eqcr.available < num_frames) ?
671 s->eqcr.available : num_frames;
672 s->eqcr.available -= num_enqueued;
673
674 for (i = 0; i < num_enqueued; i++) {
675 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
676
677 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
678 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
679 &fd[i], sizeof(*fd));
680 eqcr_pi++;
681 }
682
683 dma_wmb();
684
685
686 eqcr_pi = s->eqcr.pi;
687 for (i = 0; i < num_enqueued; i++) {
688 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
689 p[0] = cl[0] | s->eqcr.pi_vb;
690 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
691 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
692
693 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
694 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
695 }
696 eqcr_pi++;
697 if (!(eqcr_pi & half_mask))
698 s->eqcr.pi_vb ^= QB_VALID_BIT;
699 }
700
701
702 eqcr_pi = s->eqcr.pi;
703 for (i = 0; i < num_enqueued; i++)
704 eqcr_pi++;
705 s->eqcr.pi = eqcr_pi & full_mask;
706 spin_unlock(&s->access_spinlock);
707
708 return num_enqueued;
709}
710
711
712
713
714
715
716
717
718
719
720
721
722static
723int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
724 const struct qbman_eq_desc *d,
725 const struct dpaa2_fd *fd,
726 uint32_t *flags,
727 int num_frames)
728{
729 uint32_t *p = NULL;
730 const uint32_t *cl = (uint32_t *)(d);
731 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
732 int i, num_enqueued = 0;
733 unsigned long irq_flags;
734
735 spin_lock(&s->access_spinlock);
736 local_irq_save(irq_flags);
737
738 half_mask = (s->eqcr.pi_ci_mask>>1);
739 full_mask = s->eqcr.pi_ci_mask;
740 if (!s->eqcr.available) {
741 eqcr_ci = s->eqcr.ci;
742 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
743 s->eqcr.ci = *p & full_mask;
744 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
745 eqcr_ci, s->eqcr.ci);
746 if (!s->eqcr.available) {
747 local_irq_restore(irq_flags);
748 spin_unlock(&s->access_spinlock);
749 return 0;
750 }
751 }
752
753 eqcr_pi = s->eqcr.pi;
754 num_enqueued = (s->eqcr.available < num_frames) ?
755 s->eqcr.available : num_frames;
756 s->eqcr.available -= num_enqueued;
757
758 for (i = 0; i < num_enqueued; i++) {
759 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
760
761 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
762 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
763 &fd[i], sizeof(*fd));
764 eqcr_pi++;
765 }
766
767
768 eqcr_pi = s->eqcr.pi;
769 for (i = 0; i < num_enqueued; i++) {
770 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
771 p[0] = cl[0] | s->eqcr.pi_vb;
772 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
773 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
774
775 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
776 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
777 }
778 eqcr_pi++;
779 if (!(eqcr_pi & half_mask))
780 s->eqcr.pi_vb ^= QB_VALID_BIT;
781 }
782 s->eqcr.pi = eqcr_pi & full_mask;
783
784 dma_wmb();
785 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
786 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
787 local_irq_restore(irq_flags);
788 spin_unlock(&s->access_spinlock);
789
790 return num_enqueued;
791}
792
793
794
795
796
797
798
799
800
801
802
803static
804int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
805 const struct qbman_eq_desc *d,
806 const struct dpaa2_fd *fd,
807 int num_frames)
808{
809 uint32_t *p;
810 const uint32_t *cl;
811 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
812 int i, num_enqueued = 0;
813
814 half_mask = (s->eqcr.pi_ci_mask>>1);
815 full_mask = s->eqcr.pi_ci_mask;
816 if (!s->eqcr.available) {
817 eqcr_ci = s->eqcr.ci;
818 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
819 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
820 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
821 eqcr_ci, s->eqcr.ci);
822 if (!s->eqcr.available)
823 return 0;
824 }
825
826 eqcr_pi = s->eqcr.pi;
827 num_enqueued = (s->eqcr.available < num_frames) ?
828 s->eqcr.available : num_frames;
829 s->eqcr.available -= num_enqueued;
830
831 for (i = 0; i < num_enqueued; i++) {
832 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
833 cl = (uint32_t *)(&d[i]);
834
835 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
836 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
837 &fd[i], sizeof(*fd));
838 eqcr_pi++;
839 }
840
841 dma_wmb();
842
843
844 eqcr_pi = s->eqcr.pi;
845 for (i = 0; i < num_enqueued; i++) {
846 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
847 cl = (uint32_t *)(&d[i]);
848 p[0] = cl[0] | s->eqcr.pi_vb;
849 eqcr_pi++;
850 if (!(eqcr_pi & half_mask))
851 s->eqcr.pi_vb ^= QB_VALID_BIT;
852 }
853
854
855 eqcr_pi = s->eqcr.pi;
856 for (i = 0; i < num_enqueued; i++)
857 eqcr_pi++;
858 s->eqcr.pi = eqcr_pi & full_mask;
859
860 return num_enqueued;
861}
862
863
864
865
866
867
868
869
870
871
872
873static
874int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
875 const struct qbman_eq_desc *d,
876 const struct dpaa2_fd *fd,
877 int num_frames)
878{
879 uint32_t *p;
880 const uint32_t *cl;
881 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
882 int i, num_enqueued = 0;
883
884 half_mask = (s->eqcr.pi_ci_mask>>1);
885 full_mask = s->eqcr.pi_ci_mask;
886 if (!s->eqcr.available) {
887 eqcr_ci = s->eqcr.ci;
888 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
889 s->eqcr.ci = *p & full_mask;
890 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
891 eqcr_ci, s->eqcr.ci);
892 if (!s->eqcr.available)
893 return 0;
894 }
895
896 eqcr_pi = s->eqcr.pi;
897 num_enqueued = (s->eqcr.available < num_frames) ?
898 s->eqcr.available : num_frames;
899 s->eqcr.available -= num_enqueued;
900
901 for (i = 0; i < num_enqueued; i++) {
902 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
903 cl = (uint32_t *)(&d[i]);
904
905 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
906 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
907 &fd[i], sizeof(*fd));
908 eqcr_pi++;
909 }
910
911
912 eqcr_pi = s->eqcr.pi;
913 for (i = 0; i < num_enqueued; i++) {
914 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
915 cl = (uint32_t *)(&d[i]);
916 p[0] = cl[0] | s->eqcr.pi_vb;
917 eqcr_pi++;
918 if (!(eqcr_pi & half_mask))
919 s->eqcr.pi_vb ^= QB_VALID_BIT;
920 }
921
922 s->eqcr.pi = eqcr_pi & full_mask;
923
924 dma_wmb();
925 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
926 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
927
928 return num_enqueued;
929}
930
931
932
933
934
935
936
937
938
939
940void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
941{
942 u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
943
944 WARN_ON(channel_idx > 15);
945 *enabled = src | (1 << channel_idx);
946}
947
948
949
950
951
952
953
954void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
955{
956 u16 dqsrc;
957
958 WARN_ON(channel_idx > 15);
959 if (enable)
960 s->sdq |= 1 << channel_idx;
961 else
962 s->sdq &= ~(1 << channel_idx);
963
964
965
966
967 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
968 if (dqsrc != 0)
969 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
970 else
971 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
972}
973
974#define QB_VDQCR_VERB_DCT_SHIFT 0
975#define QB_VDQCR_VERB_DT_SHIFT 2
976#define QB_VDQCR_VERB_RLS_SHIFT 4
977#define QB_VDQCR_VERB_WAE_SHIFT 5
978
979enum qb_pull_dt_e {
980 qb_pull_dt_channel,
981 qb_pull_dt_workqueue,
982 qb_pull_dt_framequeue
983};
984
985
986
987
988
989
990void qbman_pull_desc_clear(struct qbman_pull_desc *d)
991{
992 memset(d, 0, sizeof(*d));
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1009 struct dpaa2_dq *storage,
1010 dma_addr_t storage_phys,
1011 int stash)
1012{
1013
1014 d->rsp_addr_virt = (u64)(uintptr_t)storage;
1015
1016 if (!storage) {
1017 d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1018 return;
1019 }
1020 d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1021 if (stash)
1022 d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1023 else
1024 d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1025
1026 d->rsp_addr = cpu_to_le64(storage_phys);
1027}
1028
1029
1030
1031
1032
1033
1034void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1035{
1036 d->numf = numframes - 1;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1052{
1053 d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1054 d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1055 d->dq_src = cpu_to_le32(fqid);
1056}
1057
1058
1059
1060
1061
1062
1063void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1064 enum qbman_pull_type_e dct)
1065{
1066 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1067 d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1068 d->dq_src = cpu_to_le32(wqid);
1069}
1070
1071
1072
1073
1074
1075
1076
1077void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1078 enum qbman_pull_type_e dct)
1079{
1080 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1081 d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1082 d->dq_src = cpu_to_le32(chid);
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094static
1095int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1096{
1097 struct qbman_pull_desc *p;
1098
1099 if (!atomic_dec_and_test(&s->vdq.available)) {
1100 atomic_inc(&s->vdq.available);
1101 return -EBUSY;
1102 }
1103 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1104 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1105 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1106 else
1107 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1108 p->numf = d->numf;
1109 p->tok = QMAN_DQ_TOKEN_VALID;
1110 p->dq_src = d->dq_src;
1111 p->rsp_addr = d->rsp_addr;
1112 p->rsp_addr_virt = d->rsp_addr_virt;
1113 dma_wmb();
1114
1115 p->verb = d->verb | s->vdq.valid_bit;
1116 s->vdq.valid_bit ^= QB_VALID_BIT;
1117
1118 return 0;
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130static
1131int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1132{
1133 struct qbman_pull_desc *p;
1134
1135 if (!atomic_dec_and_test(&s->vdq.available)) {
1136 atomic_inc(&s->vdq.available);
1137 return -EBUSY;
1138 }
1139 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1140 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1141 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1142 else
1143 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1144 p->numf = d->numf;
1145 p->tok = QMAN_DQ_TOKEN_VALID;
1146 p->dq_src = d->dq_src;
1147 p->rsp_addr = d->rsp_addr;
1148 p->rsp_addr_virt = d->rsp_addr_virt;
1149
1150
1151 p->verb = d->verb | s->vdq.valid_bit;
1152 s->vdq.valid_bit ^= QB_VALID_BIT;
1153 dma_wmb();
1154 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1155
1156 return 0;
1157}
1158
1159#define QMAN_DQRR_PI_MASK 0xf
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1170{
1171 u32 verb;
1172 u32 response_verb;
1173 u32 flags;
1174 struct dpaa2_dq *p;
1175
1176
1177
1178
1179 if (unlikely(s->dqrr.reset_bug)) {
1180
1181
1182
1183
1184
1185
1186
1187
1188 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1189 QMAN_DQRR_PI_MASK;
1190
1191
1192 if (pi == s->dqrr.next_idx)
1193 return NULL;
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1204 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1205 s->dqrr.next_idx, pi);
1206 s->dqrr.reset_bug = 0;
1207 }
1208 prefetch(qbman_get_cmd(s,
1209 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1210 }
1211
1212 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1213 verb = p->dq.verb;
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1224 prefetch(qbman_get_cmd(s,
1225 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1226 return NULL;
1227 }
1228
1229
1230
1231
1232 s->dqrr.next_idx++;
1233 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1;
1234 if (!s->dqrr.next_idx)
1235 s->dqrr.valid_bit ^= QB_VALID_BIT;
1236
1237
1238
1239
1240
1241 flags = p->dq.stat;
1242 response_verb = verb & QBMAN_RESULT_MASK;
1243 if ((response_verb == QBMAN_RESULT_DQ) &&
1244 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1245 (flags & DPAA2_DQ_STAT_EXPIRED))
1246 atomic_inc(&s->vdq.available);
1247
1248 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1249
1250 return p;
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1262{
1263 u32 verb;
1264 u32 response_verb;
1265 u32 flags;
1266 struct dpaa2_dq *p;
1267
1268
1269
1270
1271 if (unlikely(s->dqrr.reset_bug)) {
1272
1273
1274
1275
1276
1277
1278
1279
1280 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1281 QMAN_DQRR_PI_MASK;
1282
1283
1284 if (pi == s->dqrr.next_idx)
1285 return NULL;
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1296 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1297 s->dqrr.next_idx, pi);
1298 s->dqrr.reset_bug = 0;
1299 }
1300 prefetch(qbman_get_cmd(s,
1301 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1302 }
1303
1304 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1305 verb = p->dq.verb;
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1316 prefetch(qbman_get_cmd(s,
1317 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1318 return NULL;
1319 }
1320
1321
1322
1323
1324 s->dqrr.next_idx++;
1325 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1;
1326 if (!s->dqrr.next_idx)
1327 s->dqrr.valid_bit ^= QB_VALID_BIT;
1328
1329
1330
1331
1332
1333 flags = p->dq.stat;
1334 response_verb = verb & QBMAN_RESULT_MASK;
1335 if ((response_verb == QBMAN_RESULT_DQ) &&
1336 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1337 (flags & DPAA2_DQ_STAT_EXPIRED))
1338 atomic_inc(&s->vdq.available);
1339
1340 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1341
1342 return p;
1343}
1344
1345
1346
1347
1348
1349
1350
1351void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1352{
1353 qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1374{
1375 if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1376 return 0;
1377
1378
1379
1380
1381
1382
1383 ((struct dpaa2_dq *)dq)->dq.tok = 0;
1384
1385
1386
1387
1388
1389
1390 if (s->vdq.storage == dq) {
1391 s->vdq.storage = NULL;
1392 atomic_inc(&s->vdq.available);
1393 }
1394
1395 return 1;
1396}
1397
1398
1399
1400
1401
1402void qbman_release_desc_clear(struct qbman_release_desc *d)
1403{
1404 memset(d, 0, sizeof(*d));
1405 d->verb = 1 << 5;
1406}
1407
1408
1409
1410
1411void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1412{
1413 d->bpid = cpu_to_le16(bpid);
1414}
1415
1416
1417
1418
1419
1420void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1421{
1422 if (enable)
1423 d->verb |= 1 << 6;
1424 else
1425 d->verb &= ~(1 << 6);
1426}
1427
1428#define RAR_IDX(rar) ((rar) & 0x7)
1429#define RAR_VB(rar) ((rar) & 0x80)
1430#define RAR_SUCCESS(rar) ((rar) & 0x100)
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441int qbman_swp_release_direct(struct qbman_swp *s,
1442 const struct qbman_release_desc *d,
1443 const u64 *buffers, unsigned int num_buffers)
1444{
1445 int i;
1446 struct qbman_release_desc *p;
1447 u32 rar;
1448
1449 if (!num_buffers || (num_buffers > 7))
1450 return -EINVAL;
1451
1452 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1453 if (!RAR_SUCCESS(rar))
1454 return -EBUSY;
1455
1456
1457 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1458
1459
1460 for (i = 0; i < num_buffers; i++)
1461 p->buf[i] = cpu_to_le64(buffers[i]);
1462 p->bpid = d->bpid;
1463
1464
1465
1466
1467
1468 dma_wmb();
1469 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1470
1471 return 0;
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483int qbman_swp_release_mem_back(struct qbman_swp *s,
1484 const struct qbman_release_desc *d,
1485 const u64 *buffers, unsigned int num_buffers)
1486{
1487 int i;
1488 struct qbman_release_desc *p;
1489 u32 rar;
1490
1491 if (!num_buffers || (num_buffers > 7))
1492 return -EINVAL;
1493
1494 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1495 if (!RAR_SUCCESS(rar))
1496 return -EBUSY;
1497
1498
1499 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1500
1501
1502 for (i = 0; i < num_buffers; i++)
1503 p->buf[i] = cpu_to_le64(buffers[i]);
1504 p->bpid = d->bpid;
1505
1506 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1507 dma_wmb();
1508 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1509 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1510
1511 return 0;
1512}
1513
1514struct qbman_acquire_desc {
1515 u8 verb;
1516 u8 reserved;
1517 __le16 bpid;
1518 u8 num;
1519 u8 reserved2[59];
1520};
1521
1522struct qbman_acquire_rslt {
1523 u8 verb;
1524 u8 rslt;
1525 __le16 reserved;
1526 u8 num;
1527 u8 reserved2[3];
1528 __le64 buf[7];
1529};
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1542 unsigned int num_buffers)
1543{
1544 struct qbman_acquire_desc *p;
1545 struct qbman_acquire_rslt *r;
1546 int i;
1547
1548 if (!num_buffers || (num_buffers > 7))
1549 return -EINVAL;
1550
1551
1552 p = qbman_swp_mc_start(s);
1553
1554 if (!p)
1555 return -EBUSY;
1556
1557
1558 p->bpid = cpu_to_le16(bpid);
1559 p->num = num_buffers;
1560
1561
1562 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1563 if (unlikely(!r)) {
1564 pr_err("qbman: acquire from BPID %d failed, no response\n",
1565 bpid);
1566 return -EIO;
1567 }
1568
1569
1570 WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1571
1572
1573 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1574 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1575 bpid, r->rslt);
1576 return -EIO;
1577 }
1578
1579 WARN_ON(r->num > num_buffers);
1580
1581
1582 for (i = 0; i < r->num; i++)
1583 buffers[i] = le64_to_cpu(r->buf[i]);
1584
1585 return (int)r->num;
1586}
1587
1588struct qbman_alt_fq_state_desc {
1589 u8 verb;
1590 u8 reserved[3];
1591 __le32 fqid;
1592 u8 reserved2[56];
1593};
1594
1595struct qbman_alt_fq_state_rslt {
1596 u8 verb;
1597 u8 rslt;
1598 u8 reserved[62];
1599};
1600
1601#define ALT_FQ_FQID_MASK 0x00FFFFFF
1602
1603int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1604 u8 alt_fq_verb)
1605{
1606 struct qbman_alt_fq_state_desc *p;
1607 struct qbman_alt_fq_state_rslt *r;
1608
1609
1610 p = qbman_swp_mc_start(s);
1611 if (!p)
1612 return -EBUSY;
1613
1614 p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1615
1616
1617 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1618 if (unlikely(!r)) {
1619 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1620 alt_fq_verb);
1621 return -EIO;
1622 }
1623
1624
1625 WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1626
1627
1628 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1629 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1630 fqid, r->verb, r->rslt);
1631 return -EIO;
1632 }
1633
1634 return 0;
1635}
1636
1637struct qbman_cdan_ctrl_desc {
1638 u8 verb;
1639 u8 reserved;
1640 __le16 ch;
1641 u8 we;
1642 u8 ctrl;
1643 __le16 reserved2;
1644 __le64 cdan_ctx;
1645 u8 reserved3[48];
1646
1647};
1648
1649struct qbman_cdan_ctrl_rslt {
1650 u8 verb;
1651 u8 rslt;
1652 __le16 ch;
1653 u8 reserved[60];
1654};
1655
1656int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1657 u8 we_mask, u8 cdan_en,
1658 u64 ctx)
1659{
1660 struct qbman_cdan_ctrl_desc *p = NULL;
1661 struct qbman_cdan_ctrl_rslt *r = NULL;
1662
1663
1664 p = qbman_swp_mc_start(s);
1665 if (!p)
1666 return -EBUSY;
1667
1668
1669 p->ch = cpu_to_le16(channelid);
1670 p->we = we_mask;
1671 if (cdan_en)
1672 p->ctrl = 1;
1673 else
1674 p->ctrl = 0;
1675 p->cdan_ctx = cpu_to_le64(ctx);
1676
1677
1678 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1679 if (unlikely(!r)) {
1680 pr_err("qbman: wqchan config failed, no response\n");
1681 return -EIO;
1682 }
1683
1684 WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1685
1686
1687 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1688 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1689 channelid, r->rslt);
1690 return -EIO;
1691 }
1692
1693 return 0;
1694}
1695
1696#define QBMAN_RESPONSE_VERB_MASK 0x7f
1697#define QBMAN_FQ_QUERY_NP 0x45
1698#define QBMAN_BP_QUERY 0x32
1699
1700struct qbman_fq_query_desc {
1701 u8 verb;
1702 u8 reserved[3];
1703 __le32 fqid;
1704 u8 reserved2[56];
1705};
1706
1707int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1708 struct qbman_fq_query_np_rslt *r)
1709{
1710 struct qbman_fq_query_desc *p;
1711 void *resp;
1712
1713 p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1714 if (!p)
1715 return -EBUSY;
1716
1717
1718 p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1719 resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1720 if (!resp) {
1721 pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1722 fqid);
1723 return -EIO;
1724 }
1725 *r = *(struct qbman_fq_query_np_rslt *)resp;
1726
1727 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1728
1729
1730 if (r->rslt != QBMAN_MC_RSLT_OK) {
1731 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1732 p->fqid, r->rslt);
1733 return -EIO;
1734 }
1735
1736 return 0;
1737}
1738
1739u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1740{
1741 return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1742}
1743
1744u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1745{
1746 return le32_to_cpu(r->byte_cnt);
1747}
1748
1749struct qbman_bp_query_desc {
1750 u8 verb;
1751 u8 reserved;
1752 __le16 bpid;
1753 u8 reserved2[60];
1754};
1755
1756int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1757 struct qbman_bp_query_rslt *r)
1758{
1759 struct qbman_bp_query_desc *p;
1760 void *resp;
1761
1762 p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1763 if (!p)
1764 return -EBUSY;
1765
1766 p->bpid = cpu_to_le16(bpid);
1767 resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1768 if (!resp) {
1769 pr_err("qbman: Query BPID %d fields failed, no response\n",
1770 bpid);
1771 return -EIO;
1772 }
1773 *r = *(struct qbman_bp_query_rslt *)resp;
1774
1775 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1776
1777
1778 if (r->rslt != QBMAN_MC_RSLT_OK) {
1779 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1780 bpid, r->rslt);
1781 return -EIO;
1782 }
1783
1784 return 0;
1785}
1786
1787u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1788{
1789 return le32_to_cpu(a->fill);
1790}
1791