1
2
3
4
5
6
7
8#include <asm/cacheflush.h>
9#include <linux/io.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <soc/fsl/dpaa2-global.h>
13
14#include "qbman-portal.h"
15
16
17#define QB_VALID_BIT ((u32)0x80)
18
19
20#define QBMAN_MC_ACQUIRE 0x30
21#define QBMAN_WQCHAN_CONFIGURE 0x46
22
23
24#define QBMAN_CINH_SWP_EQCR_PI 0x800
25#define QBMAN_CINH_SWP_EQCR_CI 0x840
26#define QBMAN_CINH_SWP_EQAR 0x8c0
27#define QBMAN_CINH_SWP_CR_RT 0x900
28#define QBMAN_CINH_SWP_VDQCR_RT 0x940
29#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
30#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
31#define QBMAN_CINH_SWP_DQPI 0xa00
32#define QBMAN_CINH_SWP_DCAP 0xac0
33#define QBMAN_CINH_SWP_SDQCR 0xb00
34#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
35#define QBMAN_CINH_SWP_RCR_PI 0xc00
36#define QBMAN_CINH_SWP_RAR 0xcc0
37#define QBMAN_CINH_SWP_ISR 0xe00
38#define QBMAN_CINH_SWP_IER 0xe40
39#define QBMAN_CINH_SWP_ISDR 0xe80
40#define QBMAN_CINH_SWP_IIR 0xec0
41
42
43#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
44#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
45#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
46#define QBMAN_CENA_SWP_CR 0x600
47#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
48#define QBMAN_CENA_SWP_VDQCR 0x780
49#define QBMAN_CENA_SWP_EQCR_CI 0x840
50#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
51
52
53#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
54#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
55#define QBMAN_CENA_SWP_CR_MEM 0x1600
56#define QBMAN_CENA_SWP_RR_MEM 0x1680
57#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
58
59
60#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
61
62
63#define QMAN_DQ_TOKEN_VALID 1
64
65
66#define QB_SDQCR_FC_SHIFT 29
67#define QB_SDQCR_FC_MASK 0x1
68#define QB_SDQCR_DCT_SHIFT 24
69#define QB_SDQCR_DCT_MASK 0x3
70#define QB_SDQCR_TOK_SHIFT 16
71#define QB_SDQCR_TOK_MASK 0xff
72#define QB_SDQCR_SRC_SHIFT 0
73#define QB_SDQCR_SRC_MASK 0xffff
74
75
76#define QMAN_SDQCR_TOKEN 0xbb
77
78#define QBMAN_EQCR_DCA_IDXMASK 0x0f
79#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
80
81#define EQ_DESC_SIZE_WITHOUT_FD 29
82#define EQ_DESC_SIZE_FD_START 32
83
84enum qbman_sdqcr_dct {
85 qbman_sdqcr_dct_null = 0,
86 qbman_sdqcr_dct_prio_ics,
87 qbman_sdqcr_dct_active_ics,
88 qbman_sdqcr_dct_active
89};
90
91enum qbman_sdqcr_fc {
92 qbman_sdqcr_fc_one = 0,
93 qbman_sdqcr_fc_up_to_3 = 1
94};
95
96
97static int qbman_swp_enqueue_direct(struct qbman_swp *s,
98 const struct qbman_eq_desc *d,
99 const struct dpaa2_fd *fd);
100static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct dpaa2_fd *fd);
103static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
104 const struct qbman_eq_desc *d,
105 const struct dpaa2_fd *fd,
106 uint32_t *flags,
107 int num_frames);
108static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
109 const struct qbman_eq_desc *d,
110 const struct dpaa2_fd *fd,
111 uint32_t *flags,
112 int num_frames);
113static int
114qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
115 const struct qbman_eq_desc *d,
116 const struct dpaa2_fd *fd,
117 int num_frames);
118static
119int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 const struct dpaa2_fd *fd,
122 int num_frames);
123static int qbman_swp_pull_direct(struct qbman_swp *s,
124 struct qbman_pull_desc *d);
125static int qbman_swp_pull_mem_back(struct qbman_swp *s,
126 struct qbman_pull_desc *d);
127
128const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
129const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
130
131static int qbman_swp_release_direct(struct qbman_swp *s,
132 const struct qbman_release_desc *d,
133 const u64 *buffers,
134 unsigned int num_buffers);
135static int qbman_swp_release_mem_back(struct qbman_swp *s,
136 const struct qbman_release_desc *d,
137 const u64 *buffers,
138 unsigned int num_buffers);
139
140
141int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
142 const struct qbman_eq_desc *d,
143 const struct dpaa2_fd *fd)
144 = qbman_swp_enqueue_direct;
145
146int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
147 const struct qbman_eq_desc *d,
148 const struct dpaa2_fd *fd,
149 uint32_t *flags,
150 int num_frames)
151 = qbman_swp_enqueue_multiple_direct;
152
153int
154(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
155 const struct qbman_eq_desc *d,
156 const struct dpaa2_fd *fd,
157 int num_frames)
158 = qbman_swp_enqueue_multiple_desc_direct;
159
160int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
161 = qbman_swp_pull_direct;
162
163const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
164 = qbman_swp_dqrr_next_direct;
165
166int (*qbman_swp_release_ptr)(struct qbman_swp *s,
167 const struct qbman_release_desc *d,
168 const u64 *buffers,
169 unsigned int num_buffers)
170 = qbman_swp_release_direct;
171
172
173
174static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
175{
176 return readl_relaxed(p->addr_cinh + offset);
177}
178
179static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
180 u32 value)
181{
182 writel_relaxed(value, p->addr_cinh + offset);
183}
184
185static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
186{
187 return p->addr_cena + offset;
188}
189
190#define QBMAN_CINH_SWP_CFG 0xd00
191
192#define SWP_CFG_DQRR_MF_SHIFT 20
193#define SWP_CFG_EST_SHIFT 16
194#define SWP_CFG_CPBS_SHIFT 15
195#define SWP_CFG_WN_SHIFT 14
196#define SWP_CFG_RPM_SHIFT 12
197#define SWP_CFG_DCM_SHIFT 10
198#define SWP_CFG_EPM_SHIFT 8
199#define SWP_CFG_VPM_SHIFT 7
200#define SWP_CFG_CPM_SHIFT 6
201#define SWP_CFG_SD_SHIFT 5
202#define SWP_CFG_SP_SHIFT 4
203#define SWP_CFG_SE_SHIFT 3
204#define SWP_CFG_DP_SHIFT 2
205#define SWP_CFG_DE_SHIFT 1
206#define SWP_CFG_EP_SHIFT 0
207
208static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
209 u8 epm, int sd, int sp, int se,
210 int dp, int de, int ep)
211{
212 return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
213 est << SWP_CFG_EST_SHIFT |
214 wn << SWP_CFG_WN_SHIFT |
215 rpm << SWP_CFG_RPM_SHIFT |
216 dcm << SWP_CFG_DCM_SHIFT |
217 epm << SWP_CFG_EPM_SHIFT |
218 sd << SWP_CFG_SD_SHIFT |
219 sp << SWP_CFG_SP_SHIFT |
220 se << SWP_CFG_SE_SHIFT |
221 dp << SWP_CFG_DP_SHIFT |
222 de << SWP_CFG_DE_SHIFT |
223 ep << SWP_CFG_EP_SHIFT);
224}
225
226#define QMAN_RT_MODE 0x00000100
227
228static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
229{
230
231 if (first <= last)
232 return last - first;
233 else
234 return (2 * ringsize) - (first - last);
235}
236
237
238
239
240
241
242
243
244
245struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
246{
247 struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
248 u32 reg;
249 u32 mask_size;
250 u32 eqcr_pi;
251
252 if (!p)
253 return NULL;
254
255 spin_lock_init(&p->access_spinlock);
256
257 p->desc = d;
258 p->mc.valid_bit = QB_VALID_BIT;
259 p->sdq = 0;
260 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
261 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
262 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
263 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
264 p->mr.valid_bit = QB_VALID_BIT;
265
266 atomic_set(&p->vdq.available, 1);
267 p->vdq.valid_bit = QB_VALID_BIT;
268 p->dqrr.next_idx = 0;
269 p->dqrr.valid_bit = QB_VALID_BIT;
270
271 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
272 p->dqrr.dqrr_size = 4;
273 p->dqrr.reset_bug = 1;
274 } else {
275 p->dqrr.dqrr_size = 8;
276 p->dqrr.reset_bug = 0;
277 }
278
279 p->addr_cena = d->cena_bar;
280 p->addr_cinh = d->cinh_bar;
281
282 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
283
284 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
285 1,
286 0,
287 3,
288 2,
289 2,
290 1,
291 1,
292 1,
293 1,
294 0,
295 0);
296 } else {
297 memset(p->addr_cena, 0, 64 * 1024);
298 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
299 1,
300 1,
301 3,
302 2,
303 0,
304 1,
305 1,
306 1,
307 1,
308 0,
309 0);
310 reg |= 1 << SWP_CFG_CPBS_SHIFT |
311 1 << SWP_CFG_VPM_SHIFT |
312 1 << SWP_CFG_CPM_SHIFT;
313 }
314
315 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
316 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
317 if (!reg) {
318 pr_err("qbman: the portal is not enabled!\n");
319 kfree(p);
320 return NULL;
321 }
322
323 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
324 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
325 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
326 }
327
328
329
330
331
332
333 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
334
335 p->eqcr.pi_ring_size = 8;
336 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
337 p->eqcr.pi_ring_size = 32;
338 qbman_swp_enqueue_ptr =
339 qbman_swp_enqueue_mem_back;
340 qbman_swp_enqueue_multiple_ptr =
341 qbman_swp_enqueue_multiple_mem_back;
342 qbman_swp_enqueue_multiple_desc_ptr =
343 qbman_swp_enqueue_multiple_desc_mem_back;
344 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
345 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
346 qbman_swp_release_ptr = qbman_swp_release_mem_back;
347 }
348
349 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
350 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
351 eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
352 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
353 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
354 p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
355 & p->eqcr.pi_ci_mask;
356 p->eqcr.available = p->eqcr.pi_ring_size;
357
358 return p;
359}
360
361
362
363
364
365
366void qbman_swp_finish(struct qbman_swp *p)
367{
368 kfree(p);
369}
370
371
372
373
374
375
376
377u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
378{
379 return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
380}
381
382
383
384
385
386
387void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
388{
389 qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
390}
391
392
393
394
395
396
397
398u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
399{
400 return qbman_read_register(p, QBMAN_CINH_SWP_IER);
401}
402
403
404
405
406
407
408void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
409{
410 qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
411}
412
413
414
415
416
417
418
419int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
420{
421 return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
422}
423
424
425
426
427
428
429void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
430{
431 qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
432}
433
434
435
436
437
438
439
440
441
442
443void *qbman_swp_mc_start(struct qbman_swp *p)
444{
445 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
446 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
447 else
448 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
449}
450
451
452
453
454
455void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
456{
457 u8 *v = cmd;
458
459 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
460 dma_wmb();
461 *v = cmd_verb | p->mc.valid_bit;
462 } else {
463 *v = cmd_verb | p->mc.valid_bit;
464 dma_wmb();
465 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
466 }
467}
468
469
470
471
472
473void *qbman_swp_mc_result(struct qbman_swp *p)
474{
475 u32 *ret, verb;
476
477 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
478 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
479
480
481
482 verb = ret[0] & ~QB_VALID_BIT;
483 if (!verb)
484 return NULL;
485 p->mc.valid_bit ^= QB_VALID_BIT;
486 } else {
487 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
488
489 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
490 return NULL;
491
492 verb = ret[0] & ~QB_VALID_BIT;
493 if (!verb)
494 return NULL;
495 p->mr.valid_bit ^= QB_VALID_BIT;
496 }
497
498 return ret;
499}
500
501#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
502enum qb_enqueue_commands {
503 enqueue_empty = 0,
504 enqueue_response_always = 1,
505 enqueue_rejects_to_fq = 2
506};
507
508#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
509#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
510#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
511#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
512
513
514
515
516
517void qbman_eq_desc_clear(struct qbman_eq_desc *d)
518{
519 memset(d, 0, sizeof(*d));
520}
521
522
523
524
525
526
527
528void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
529{
530 d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
531 if (respond_success)
532 d->verb |= enqueue_response_always;
533 else
534 d->verb |= enqueue_rejects_to_fq;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
550{
551 d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
552 d->tgtid = cpu_to_le32(fqid);
553}
554
555
556
557
558
559
560
561
562void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
563 u32 qd_bin, u32 qd_prio)
564{
565 d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
566 d->tgtid = cpu_to_le32(qdid);
567 d->qdbin = cpu_to_le16(qd_bin);
568 d->qpri = qd_prio;
569}
570
571#define EQAR_IDX(eqar) ((eqar) & 0x7)
572#define EQAR_VB(eqar) ((eqar) & 0x80)
573#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
574
575#define QB_RT_BIT ((u32)0x100)
576
577
578
579
580
581
582
583
584
585
586
587static
588int qbman_swp_enqueue_direct(struct qbman_swp *s,
589 const struct qbman_eq_desc *d,
590 const struct dpaa2_fd *fd)
591{
592 int flags = 0;
593 int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
594
595 if (ret >= 0)
596 ret = 0;
597 else
598 ret = -EBUSY;
599 return ret;
600}
601
602
603
604
605
606
607
608
609
610
611
612
613static
614int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
615 const struct qbman_eq_desc *d,
616 const struct dpaa2_fd *fd)
617{
618 int flags = 0;
619 int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
620
621 if (ret >= 0)
622 ret = 0;
623 else
624 ret = -EBUSY;
625 return ret;
626}
627
628
629
630
631
632
633
634
635
636
637
638
639static
640int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
641 const struct qbman_eq_desc *d,
642 const struct dpaa2_fd *fd,
643 uint32_t *flags,
644 int num_frames)
645{
646 uint32_t *p = NULL;
647 const uint32_t *cl = (uint32_t *)d;
648 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
649 int i, num_enqueued = 0;
650
651 spin_lock(&s->access_spinlock);
652 half_mask = (s->eqcr.pi_ci_mask>>1);
653 full_mask = s->eqcr.pi_ci_mask;
654
655 if (!s->eqcr.available) {
656 eqcr_ci = s->eqcr.ci;
657 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
658 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
659 s->eqcr.ci &= full_mask;
660
661 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
662 eqcr_ci, s->eqcr.ci);
663 if (!s->eqcr.available) {
664 spin_unlock(&s->access_spinlock);
665 return 0;
666 }
667 }
668
669 eqcr_pi = s->eqcr.pi;
670 num_enqueued = (s->eqcr.available < num_frames) ?
671 s->eqcr.available : num_frames;
672 s->eqcr.available -= num_enqueued;
673
674 for (i = 0; i < num_enqueued; i++) {
675 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
676
677 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
678 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
679 &fd[i], sizeof(*fd));
680 eqcr_pi++;
681 }
682
683 dma_wmb();
684
685
686 eqcr_pi = s->eqcr.pi;
687 for (i = 0; i < num_enqueued; i++) {
688 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
689 p[0] = cl[0] | s->eqcr.pi_vb;
690 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
691 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
692
693 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
694 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
695 }
696 eqcr_pi++;
697 if (!(eqcr_pi & half_mask))
698 s->eqcr.pi_vb ^= QB_VALID_BIT;
699 }
700
701
702 eqcr_pi = s->eqcr.pi;
703 for (i = 0; i < num_enqueued; i++)
704 eqcr_pi++;
705 s->eqcr.pi = eqcr_pi & full_mask;
706 spin_unlock(&s->access_spinlock);
707
708 return num_enqueued;
709}
710
711
712
713
714
715
716
717
718
719
720
721
722static
723int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
724 const struct qbman_eq_desc *d,
725 const struct dpaa2_fd *fd,
726 uint32_t *flags,
727 int num_frames)
728{
729 uint32_t *p = NULL;
730 const uint32_t *cl = (uint32_t *)(d);
731 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
732 int i, num_enqueued = 0;
733 unsigned long irq_flags;
734
735 spin_lock(&s->access_spinlock);
736 local_irq_save(irq_flags);
737
738 half_mask = (s->eqcr.pi_ci_mask>>1);
739 full_mask = s->eqcr.pi_ci_mask;
740 if (!s->eqcr.available) {
741 eqcr_ci = s->eqcr.ci;
742 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
743 s->eqcr.ci = *p & full_mask;
744 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
745 eqcr_ci, s->eqcr.ci);
746 if (!s->eqcr.available) {
747 local_irq_restore(irq_flags);
748 spin_unlock(&s->access_spinlock);
749 return 0;
750 }
751 }
752
753 eqcr_pi = s->eqcr.pi;
754 num_enqueued = (s->eqcr.available < num_frames) ?
755 s->eqcr.available : num_frames;
756 s->eqcr.available -= num_enqueued;
757
758 for (i = 0; i < num_enqueued; i++) {
759 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
760
761 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
762 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
763 &fd[i], sizeof(*fd));
764 eqcr_pi++;
765 }
766
767
768 eqcr_pi = s->eqcr.pi;
769 for (i = 0; i < num_enqueued; i++) {
770 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
771 p[0] = cl[0] | s->eqcr.pi_vb;
772 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
773 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
774
775 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
776 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
777 }
778 eqcr_pi++;
779 if (!(eqcr_pi & half_mask))
780 s->eqcr.pi_vb ^= QB_VALID_BIT;
781 }
782 s->eqcr.pi = eqcr_pi & full_mask;
783
784 dma_wmb();
785 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
786 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
787 local_irq_restore(irq_flags);
788 spin_unlock(&s->access_spinlock);
789
790 return num_enqueued;
791}
792
793
794
795
796
797
798
799
800
801
802
803static
804int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
805 const struct qbman_eq_desc *d,
806 const struct dpaa2_fd *fd,
807 int num_frames)
808{
809 uint32_t *p;
810 const uint32_t *cl;
811 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
812 int i, num_enqueued = 0;
813
814 half_mask = (s->eqcr.pi_ci_mask>>1);
815 full_mask = s->eqcr.pi_ci_mask;
816 if (!s->eqcr.available) {
817 eqcr_ci = s->eqcr.ci;
818 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
819 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
820 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
821 eqcr_ci, s->eqcr.ci);
822 if (!s->eqcr.available)
823 return 0;
824 }
825
826 eqcr_pi = s->eqcr.pi;
827 num_enqueued = (s->eqcr.available < num_frames) ?
828 s->eqcr.available : num_frames;
829 s->eqcr.available -= num_enqueued;
830
831 for (i = 0; i < num_enqueued; i++) {
832 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
833 cl = (uint32_t *)(&d[i]);
834
835 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
836 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
837 &fd[i], sizeof(*fd));
838 eqcr_pi++;
839 }
840
841 dma_wmb();
842
843
844 eqcr_pi = s->eqcr.pi;
845 for (i = 0; i < num_enqueued; i++) {
846 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
847 cl = (uint32_t *)(&d[i]);
848 p[0] = cl[0] | s->eqcr.pi_vb;
849 eqcr_pi++;
850 if (!(eqcr_pi & half_mask))
851 s->eqcr.pi_vb ^= QB_VALID_BIT;
852 }
853
854
855 eqcr_pi = s->eqcr.pi;
856 for (i = 0; i < num_enqueued; i++)
857 eqcr_pi++;
858 s->eqcr.pi = eqcr_pi & full_mask;
859
860 return num_enqueued;
861}
862
863
864
865
866
867
868
869
870
871
872
873static
874int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
875 const struct qbman_eq_desc *d,
876 const struct dpaa2_fd *fd,
877 int num_frames)
878{
879 uint32_t *p;
880 const uint32_t *cl;
881 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
882 int i, num_enqueued = 0;
883
884 half_mask = (s->eqcr.pi_ci_mask>>1);
885 full_mask = s->eqcr.pi_ci_mask;
886 if (!s->eqcr.available) {
887 eqcr_ci = s->eqcr.ci;
888 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
889 s->eqcr.ci = *p & full_mask;
890 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
891 eqcr_ci, s->eqcr.ci);
892 if (!s->eqcr.available)
893 return 0;
894 }
895
896 eqcr_pi = s->eqcr.pi;
897 num_enqueued = (s->eqcr.available < num_frames) ?
898 s->eqcr.available : num_frames;
899 s->eqcr.available -= num_enqueued;
900
901 for (i = 0; i < num_enqueued; i++) {
902 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
903 cl = (uint32_t *)(&d[i]);
904
905 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
906 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
907 &fd[i], sizeof(*fd));
908 eqcr_pi++;
909 }
910
911
912 eqcr_pi = s->eqcr.pi;
913 for (i = 0; i < num_enqueued; i++) {
914 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
915 cl = (uint32_t *)(&d[i]);
916 p[0] = cl[0] | s->eqcr.pi_vb;
917 eqcr_pi++;
918 if (!(eqcr_pi & half_mask))
919 s->eqcr.pi_vb ^= QB_VALID_BIT;
920 }
921
922 s->eqcr.pi = eqcr_pi & full_mask;
923
924 dma_wmb();
925 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
926 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
927
928 return num_enqueued;
929}
930
931
932
933
934
935
936
937
938
939
940void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
941{
942 u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
943
944 WARN_ON(channel_idx > 15);
945 *enabled = src | (1 << channel_idx);
946}
947
948
949
950
951
952
953
954void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
955{
956 u16 dqsrc;
957
958 WARN_ON(channel_idx > 15);
959 if (enable)
960 s->sdq |= 1 << channel_idx;
961 else
962 s->sdq &= ~(1 << channel_idx);
963
964
965
966
967 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
968 if (dqsrc != 0)
969 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
970 else
971 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
972}
973
974#define QB_VDQCR_VERB_DCT_SHIFT 0
975#define QB_VDQCR_VERB_DT_SHIFT 2
976#define QB_VDQCR_VERB_RLS_SHIFT 4
977#define QB_VDQCR_VERB_WAE_SHIFT 5
978
979enum qb_pull_dt_e {
980 qb_pull_dt_channel,
981 qb_pull_dt_workqueue,
982 qb_pull_dt_framequeue
983};
984
985
986
987
988
989
990void qbman_pull_desc_clear(struct qbman_pull_desc *d)
991{
992 memset(d, 0, sizeof(*d));
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1009 struct dpaa2_dq *storage,
1010 dma_addr_t storage_phys,
1011 int stash)
1012{
1013
1014 d->rsp_addr_virt = (u64)(uintptr_t)storage;
1015
1016 if (!storage) {
1017 d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1018 return;
1019 }
1020 d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1021 if (stash)
1022 d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1023 else
1024 d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1025
1026 d->rsp_addr = cpu_to_le64(storage_phys);
1027}
1028
1029
1030
1031
1032
1033
1034void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1035{
1036 d->numf = numframes - 1;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1053{
1054 d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1055 d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1056 d->dq_src = cpu_to_le32(fqid);
1057}
1058
1059
1060
1061
1062
1063
1064
1065void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1066 enum qbman_pull_type_e dct)
1067{
1068 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1069 d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1070 d->dq_src = cpu_to_le32(wqid);
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1081 enum qbman_pull_type_e dct)
1082{
1083 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1084 d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1085 d->dq_src = cpu_to_le32(chid);
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097static
1098int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1099{
1100 struct qbman_pull_desc *p;
1101
1102 if (!atomic_dec_and_test(&s->vdq.available)) {
1103 atomic_inc(&s->vdq.available);
1104 return -EBUSY;
1105 }
1106 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1107 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1108 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1109 else
1110 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1111 p->numf = d->numf;
1112 p->tok = QMAN_DQ_TOKEN_VALID;
1113 p->dq_src = d->dq_src;
1114 p->rsp_addr = d->rsp_addr;
1115 p->rsp_addr_virt = d->rsp_addr_virt;
1116 dma_wmb();
1117
1118 p->verb = d->verb | s->vdq.valid_bit;
1119 s->vdq.valid_bit ^= QB_VALID_BIT;
1120
1121 return 0;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static
1134int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1135{
1136 struct qbman_pull_desc *p;
1137
1138 if (!atomic_dec_and_test(&s->vdq.available)) {
1139 atomic_inc(&s->vdq.available);
1140 return -EBUSY;
1141 }
1142 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1143 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1144 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1145 else
1146 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1147 p->numf = d->numf;
1148 p->tok = QMAN_DQ_TOKEN_VALID;
1149 p->dq_src = d->dq_src;
1150 p->rsp_addr = d->rsp_addr;
1151 p->rsp_addr_virt = d->rsp_addr_virt;
1152
1153
1154 p->verb = d->verb | s->vdq.valid_bit;
1155 s->vdq.valid_bit ^= QB_VALID_BIT;
1156 dma_wmb();
1157 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1158
1159 return 0;
1160}
1161
1162#define QMAN_DQRR_PI_MASK 0xf
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1173{
1174 u32 verb;
1175 u32 response_verb;
1176 u32 flags;
1177 struct dpaa2_dq *p;
1178
1179
1180
1181
1182 if (unlikely(s->dqrr.reset_bug)) {
1183
1184
1185
1186
1187
1188
1189
1190
1191 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1192 QMAN_DQRR_PI_MASK;
1193
1194
1195 if (pi == s->dqrr.next_idx)
1196 return NULL;
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1207 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1208 s->dqrr.next_idx, pi);
1209 s->dqrr.reset_bug = 0;
1210 }
1211 prefetch(qbman_get_cmd(s,
1212 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1213 }
1214
1215 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1216 verb = p->dq.verb;
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1227 prefetch(qbman_get_cmd(s,
1228 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1229 return NULL;
1230 }
1231
1232
1233
1234
1235 s->dqrr.next_idx++;
1236 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1;
1237 if (!s->dqrr.next_idx)
1238 s->dqrr.valid_bit ^= QB_VALID_BIT;
1239
1240
1241
1242
1243
1244 flags = p->dq.stat;
1245 response_verb = verb & QBMAN_RESULT_MASK;
1246 if ((response_verb == QBMAN_RESULT_DQ) &&
1247 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1248 (flags & DPAA2_DQ_STAT_EXPIRED))
1249 atomic_inc(&s->vdq.available);
1250
1251 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1252
1253 return p;
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1265{
1266 u32 verb;
1267 u32 response_verb;
1268 u32 flags;
1269 struct dpaa2_dq *p;
1270
1271
1272
1273
1274 if (unlikely(s->dqrr.reset_bug)) {
1275
1276
1277
1278
1279
1280
1281
1282
1283 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1284 QMAN_DQRR_PI_MASK;
1285
1286
1287 if (pi == s->dqrr.next_idx)
1288 return NULL;
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1299 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1300 s->dqrr.next_idx, pi);
1301 s->dqrr.reset_bug = 0;
1302 }
1303 prefetch(qbman_get_cmd(s,
1304 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1305 }
1306
1307 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1308 verb = p->dq.verb;
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1319 prefetch(qbman_get_cmd(s,
1320 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1321 return NULL;
1322 }
1323
1324
1325
1326
1327 s->dqrr.next_idx++;
1328 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1;
1329 if (!s->dqrr.next_idx)
1330 s->dqrr.valid_bit ^= QB_VALID_BIT;
1331
1332
1333
1334
1335
1336 flags = p->dq.stat;
1337 response_verb = verb & QBMAN_RESULT_MASK;
1338 if ((response_verb == QBMAN_RESULT_DQ) &&
1339 (flags & DPAA2_DQ_STAT_VOLATILE) &&
1340 (flags & DPAA2_DQ_STAT_EXPIRED))
1341 atomic_inc(&s->vdq.available);
1342
1343 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1344
1345 return p;
1346}
1347
1348
1349
1350
1351
1352
1353
1354void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1355{
1356 qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1377{
1378 if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1379 return 0;
1380
1381
1382
1383
1384
1385
1386 ((struct dpaa2_dq *)dq)->dq.tok = 0;
1387
1388
1389
1390
1391
1392
1393 if (s->vdq.storage == dq) {
1394 s->vdq.storage = NULL;
1395 atomic_inc(&s->vdq.available);
1396 }
1397
1398 return 1;
1399}
1400
1401
1402
1403
1404
1405
1406void qbman_release_desc_clear(struct qbman_release_desc *d)
1407{
1408 memset(d, 0, sizeof(*d));
1409 d->verb = 1 << 5;
1410}
1411
1412
1413
1414
1415
1416
1417void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1418{
1419 d->bpid = cpu_to_le16(bpid);
1420}
1421
1422
1423
1424
1425
1426
1427
1428void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1429{
1430 if (enable)
1431 d->verb |= 1 << 6;
1432 else
1433 d->verb &= ~(1 << 6);
1434}
1435
1436#define RAR_IDX(rar) ((rar) & 0x7)
1437#define RAR_VB(rar) ((rar) & 0x80)
1438#define RAR_SUCCESS(rar) ((rar) & 0x100)
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449int qbman_swp_release_direct(struct qbman_swp *s,
1450 const struct qbman_release_desc *d,
1451 const u64 *buffers, unsigned int num_buffers)
1452{
1453 int i;
1454 struct qbman_release_desc *p;
1455 u32 rar;
1456
1457 if (!num_buffers || (num_buffers > 7))
1458 return -EINVAL;
1459
1460 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1461 if (!RAR_SUCCESS(rar))
1462 return -EBUSY;
1463
1464
1465 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1466
1467
1468 for (i = 0; i < num_buffers; i++)
1469 p->buf[i] = cpu_to_le64(buffers[i]);
1470 p->bpid = d->bpid;
1471
1472
1473
1474
1475
1476 dma_wmb();
1477 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1478
1479 return 0;
1480}
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491int qbman_swp_release_mem_back(struct qbman_swp *s,
1492 const struct qbman_release_desc *d,
1493 const u64 *buffers, unsigned int num_buffers)
1494{
1495 int i;
1496 struct qbman_release_desc *p;
1497 u32 rar;
1498
1499 if (!num_buffers || (num_buffers > 7))
1500 return -EINVAL;
1501
1502 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1503 if (!RAR_SUCCESS(rar))
1504 return -EBUSY;
1505
1506
1507 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1508
1509
1510 for (i = 0; i < num_buffers; i++)
1511 p->buf[i] = cpu_to_le64(buffers[i]);
1512 p->bpid = d->bpid;
1513
1514 p->verb = d->verb | RAR_VB(rar) | num_buffers;
1515 dma_wmb();
1516 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1517 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1518
1519 return 0;
1520}
1521
1522struct qbman_acquire_desc {
1523 u8 verb;
1524 u8 reserved;
1525 __le16 bpid;
1526 u8 num;
1527 u8 reserved2[59];
1528};
1529
1530struct qbman_acquire_rslt {
1531 u8 verb;
1532 u8 rslt;
1533 __le16 reserved;
1534 u8 num;
1535 u8 reserved2[3];
1536 __le64 buf[7];
1537};
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1550 unsigned int num_buffers)
1551{
1552 struct qbman_acquire_desc *p;
1553 struct qbman_acquire_rslt *r;
1554 int i;
1555
1556 if (!num_buffers || (num_buffers > 7))
1557 return -EINVAL;
1558
1559
1560 p = qbman_swp_mc_start(s);
1561
1562 if (!p)
1563 return -EBUSY;
1564
1565
1566 p->bpid = cpu_to_le16(bpid);
1567 p->num = num_buffers;
1568
1569
1570 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1571 if (unlikely(!r)) {
1572 pr_err("qbman: acquire from BPID %d failed, no response\n",
1573 bpid);
1574 return -EIO;
1575 }
1576
1577
1578 WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1579
1580
1581 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1582 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1583 bpid, r->rslt);
1584 return -EIO;
1585 }
1586
1587 WARN_ON(r->num > num_buffers);
1588
1589
1590 for (i = 0; i < r->num; i++)
1591 buffers[i] = le64_to_cpu(r->buf[i]);
1592
1593 return (int)r->num;
1594}
1595
1596struct qbman_alt_fq_state_desc {
1597 u8 verb;
1598 u8 reserved[3];
1599 __le32 fqid;
1600 u8 reserved2[56];
1601};
1602
1603struct qbman_alt_fq_state_rslt {
1604 u8 verb;
1605 u8 rslt;
1606 u8 reserved[62];
1607};
1608
1609#define ALT_FQ_FQID_MASK 0x00FFFFFF
1610
1611int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1612 u8 alt_fq_verb)
1613{
1614 struct qbman_alt_fq_state_desc *p;
1615 struct qbman_alt_fq_state_rslt *r;
1616
1617
1618 p = qbman_swp_mc_start(s);
1619 if (!p)
1620 return -EBUSY;
1621
1622 p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1623
1624
1625 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1626 if (unlikely(!r)) {
1627 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1628 alt_fq_verb);
1629 return -EIO;
1630 }
1631
1632
1633 WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1634
1635
1636 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1637 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1638 fqid, r->verb, r->rslt);
1639 return -EIO;
1640 }
1641
1642 return 0;
1643}
1644
1645struct qbman_cdan_ctrl_desc {
1646 u8 verb;
1647 u8 reserved;
1648 __le16 ch;
1649 u8 we;
1650 u8 ctrl;
1651 __le16 reserved2;
1652 __le64 cdan_ctx;
1653 u8 reserved3[48];
1654
1655};
1656
1657struct qbman_cdan_ctrl_rslt {
1658 u8 verb;
1659 u8 rslt;
1660 __le16 ch;
1661 u8 reserved[60];
1662};
1663
1664int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1665 u8 we_mask, u8 cdan_en,
1666 u64 ctx)
1667{
1668 struct qbman_cdan_ctrl_desc *p = NULL;
1669 struct qbman_cdan_ctrl_rslt *r = NULL;
1670
1671
1672 p = qbman_swp_mc_start(s);
1673 if (!p)
1674 return -EBUSY;
1675
1676
1677 p->ch = cpu_to_le16(channelid);
1678 p->we = we_mask;
1679 if (cdan_en)
1680 p->ctrl = 1;
1681 else
1682 p->ctrl = 0;
1683 p->cdan_ctx = cpu_to_le64(ctx);
1684
1685
1686 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1687 if (unlikely(!r)) {
1688 pr_err("qbman: wqchan config failed, no response\n");
1689 return -EIO;
1690 }
1691
1692 WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1693
1694
1695 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1696 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1697 channelid, r->rslt);
1698 return -EIO;
1699 }
1700
1701 return 0;
1702}
1703
1704#define QBMAN_RESPONSE_VERB_MASK 0x7f
1705#define QBMAN_FQ_QUERY_NP 0x45
1706#define QBMAN_BP_QUERY 0x32
1707
1708struct qbman_fq_query_desc {
1709 u8 verb;
1710 u8 reserved[3];
1711 __le32 fqid;
1712 u8 reserved2[56];
1713};
1714
1715int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1716 struct qbman_fq_query_np_rslt *r)
1717{
1718 struct qbman_fq_query_desc *p;
1719 void *resp;
1720
1721 p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1722 if (!p)
1723 return -EBUSY;
1724
1725
1726 p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1727 resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1728 if (!resp) {
1729 pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1730 fqid);
1731 return -EIO;
1732 }
1733 *r = *(struct qbman_fq_query_np_rslt *)resp;
1734
1735 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1736
1737
1738 if (r->rslt != QBMAN_MC_RSLT_OK) {
1739 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1740 p->fqid, r->rslt);
1741 return -EIO;
1742 }
1743
1744 return 0;
1745}
1746
1747u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1748{
1749 return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1750}
1751
1752u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1753{
1754 return le32_to_cpu(r->byte_cnt);
1755}
1756
1757struct qbman_bp_query_desc {
1758 u8 verb;
1759 u8 reserved;
1760 __le16 bpid;
1761 u8 reserved2[60];
1762};
1763
1764int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1765 struct qbman_bp_query_rslt *r)
1766{
1767 struct qbman_bp_query_desc *p;
1768 void *resp;
1769
1770 p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1771 if (!p)
1772 return -EBUSY;
1773
1774 p->bpid = cpu_to_le16(bpid);
1775 resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1776 if (!resp) {
1777 pr_err("qbman: Query BPID %d fields failed, no response\n",
1778 bpid);
1779 return -EIO;
1780 }
1781 *r = *(struct qbman_bp_query_rslt *)resp;
1782
1783 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1784
1785
1786 if (r->rslt != QBMAN_MC_RSLT_OK) {
1787 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1788 bpid, r->rslt);
1789 return -EIO;
1790 }
1791
1792 return 0;
1793}
1794
1795u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1796{
1797 return le32_to_cpu(a->fill);
1798}
1799