1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include "qman_priv.h"
32
33#define DQRR_MAXFILL 15
34#define EQCR_ITHRESH 4
35#define IRQNAME "QMan portal %d"
36#define MAX_IRQNAME 16
37#define QMAN_POLL_LIMIT 32
38#define QMAN_PIRQ_DQRR_ITHRESH 12
39#define QMAN_PIRQ_MR_ITHRESH 4
40#define QMAN_PIRQ_IPERIOD 100
41
42
43
44#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
45
46#define QM_REG_EQCR_PI_CINH 0x3000
47#define QM_REG_EQCR_CI_CINH 0x3040
48#define QM_REG_EQCR_ITR 0x3080
49#define QM_REG_DQRR_PI_CINH 0x3100
50#define QM_REG_DQRR_CI_CINH 0x3140
51#define QM_REG_DQRR_ITR 0x3180
52#define QM_REG_DQRR_DCAP 0x31C0
53#define QM_REG_DQRR_SDQCR 0x3200
54#define QM_REG_DQRR_VDQCR 0x3240
55#define QM_REG_DQRR_PDQCR 0x3280
56#define QM_REG_MR_PI_CINH 0x3300
57#define QM_REG_MR_CI_CINH 0x3340
58#define QM_REG_MR_ITR 0x3380
59#define QM_REG_CFG 0x3500
60#define QM_REG_ISR 0x3600
61#define QM_REG_IER 0x3640
62#define QM_REG_ISDR 0x3680
63#define QM_REG_IIR 0x36C0
64#define QM_REG_ITPR 0x3740
65
66
67#define QM_CL_EQCR 0x0000
68#define QM_CL_DQRR 0x1000
69#define QM_CL_MR 0x2000
70#define QM_CL_EQCR_PI_CENA 0x3000
71#define QM_CL_EQCR_CI_CENA 0x3040
72#define QM_CL_DQRR_PI_CENA 0x3100
73#define QM_CL_DQRR_CI_CENA 0x3140
74#define QM_CL_MR_PI_CENA 0x3300
75#define QM_CL_MR_CI_CENA 0x3340
76#define QM_CL_CR 0x3800
77#define QM_CL_RR0 0x3900
78#define QM_CL_RR1 0x3940
79
80#else
81
82#define QM_REG_EQCR_PI_CINH 0x0000
83#define QM_REG_EQCR_CI_CINH 0x0004
84#define QM_REG_EQCR_ITR 0x0008
85#define QM_REG_DQRR_PI_CINH 0x0040
86#define QM_REG_DQRR_CI_CINH 0x0044
87#define QM_REG_DQRR_ITR 0x0048
88#define QM_REG_DQRR_DCAP 0x0050
89#define QM_REG_DQRR_SDQCR 0x0054
90#define QM_REG_DQRR_VDQCR 0x0058
91#define QM_REG_DQRR_PDQCR 0x005c
92#define QM_REG_MR_PI_CINH 0x0080
93#define QM_REG_MR_CI_CINH 0x0084
94#define QM_REG_MR_ITR 0x0088
95#define QM_REG_CFG 0x0100
96#define QM_REG_ISR 0x0e00
97#define QM_REG_IER 0x0e04
98#define QM_REG_ISDR 0x0e08
99#define QM_REG_IIR 0x0e0c
100#define QM_REG_ITPR 0x0e14
101
102
103#define QM_CL_EQCR 0x0000
104#define QM_CL_DQRR 0x1000
105#define QM_CL_MR 0x2000
106#define QM_CL_EQCR_PI_CENA 0x3000
107#define QM_CL_EQCR_CI_CENA 0x3100
108#define QM_CL_DQRR_PI_CENA 0x3200
109#define QM_CL_DQRR_CI_CENA 0x3300
110#define QM_CL_MR_PI_CENA 0x3400
111#define QM_CL_MR_CI_CENA 0x3500
112#define QM_CL_CR 0x3800
113#define QM_CL_RR0 0x3900
114#define QM_CL_RR1 0x3940
115#endif
116
117
118
119
120
121
122
123
124
125
126#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142enum qm_eqcr_pmode {
143 qm_eqcr_pci = 0,
144 qm_eqcr_pce = 1,
145 qm_eqcr_pvb = 2
146};
147enum qm_dqrr_dmode {
148 qm_dqrr_dpush = 0,
149 qm_dqrr_dpull = 1
150};
151enum qm_dqrr_pmode {
152 qm_dqrr_pci,
153 qm_dqrr_pce,
154 qm_dqrr_pvb
155};
156enum qm_dqrr_cmode {
157 qm_dqrr_cci = 0,
158 qm_dqrr_cce = 1,
159 qm_dqrr_cdc = 2
160};
161enum qm_mr_pmode {
162 qm_mr_pci,
163 qm_mr_pce,
164 qm_mr_pvb
165};
166enum qm_mr_cmode {
167 qm_mr_cci = 0,
168 qm_mr_cce = 1
169};
170
171
172
173#define QM_EQCR_SIZE 8
174#define QM_DQRR_SIZE 16
175#define QM_MR_SIZE 8
176
177
178struct qm_eqcr_entry {
179 u8 _ncw_verb;
180 u8 dca;
181 __be16 seqnum;
182 u8 __reserved[4];
183 __be32 fqid;
184 __be32 tag;
185 struct qm_fd fd;
186 u8 __reserved3[32];
187} __packed;
188#define QM_EQCR_VERB_VBIT 0x80
189#define QM_EQCR_VERB_CMD_MASK 0x61
190#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
191#define QM_EQCR_SEQNUM_NESN 0x8000
192#define QM_EQCR_SEQNUM_NLIS 0x4000
193#define QM_EQCR_SEQNUM_SEQMASK 0x3fff
194
195struct qm_eqcr {
196 struct qm_eqcr_entry *ring, *cursor;
197 u8 ci, available, ithresh, vbit;
198#ifdef CONFIG_FSL_DPAA_CHECKING
199 u32 busy;
200 enum qm_eqcr_pmode pmode;
201#endif
202};
203
204struct qm_dqrr {
205 const struct qm_dqrr_entry *ring, *cursor;
206 u8 pi, ci, fill, ithresh, vbit;
207#ifdef CONFIG_FSL_DPAA_CHECKING
208 enum qm_dqrr_dmode dmode;
209 enum qm_dqrr_pmode pmode;
210 enum qm_dqrr_cmode cmode;
211#endif
212};
213
214struct qm_mr {
215 union qm_mr_entry *ring, *cursor;
216 u8 pi, ci, fill, ithresh, vbit;
217#ifdef CONFIG_FSL_DPAA_CHECKING
218 enum qm_mr_pmode pmode;
219 enum qm_mr_cmode cmode;
220#endif
221};
222
223
224
225struct qm_mcc_fq {
226 u8 _ncw_verb;
227 u8 __reserved1[3];
228 __be32 fqid;
229 u8 __reserved2[56];
230} __packed;
231
232
233struct qm_mcc_cgr {
234 u8 _ncw_verb;
235 u8 __reserved1[30];
236 u8 cgid;
237 u8 __reserved2[32];
238};
239
240#define QM_MCC_VERB_VBIT 0x80
241#define QM_MCC_VERB_MASK 0x7f
242#define QM_MCC_VERB_INITFQ_PARKED 0x40
243#define QM_MCC_VERB_INITFQ_SCHED 0x41
244#define QM_MCC_VERB_QUERYFQ 0x44
245#define QM_MCC_VERB_QUERYFQ_NP 0x45
246#define QM_MCC_VERB_QUERYWQ 0x46
247#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
248#define QM_MCC_VERB_ALTER_SCHED 0x48
249#define QM_MCC_VERB_ALTER_FE 0x49
250#define QM_MCC_VERB_ALTER_RETIRE 0x4a
251#define QM_MCC_VERB_ALTER_OOS 0x4b
252#define QM_MCC_VERB_ALTER_FQXON 0x4d
253#define QM_MCC_VERB_ALTER_FQXOFF 0x4e
254#define QM_MCC_VERB_INITCGR 0x50
255#define QM_MCC_VERB_MODIFYCGR 0x51
256#define QM_MCC_VERB_CGRTESTWRITE 0x52
257#define QM_MCC_VERB_QUERYCGR 0x58
258#define QM_MCC_VERB_QUERYCONGESTION 0x59
259union qm_mc_command {
260 struct {
261 u8 _ncw_verb;
262 u8 __reserved[63];
263 };
264 struct qm_mcc_initfq initfq;
265 struct qm_mcc_initcgr initcgr;
266 struct qm_mcc_fq fq;
267 struct qm_mcc_cgr cgr;
268};
269
270
271
272struct qm_mcr_queryfq {
273 u8 verb;
274 u8 result;
275 u8 __reserved1[8];
276 struct qm_fqd fqd;
277 u8 __reserved2[30];
278} __packed;
279
280
281struct qm_mcr_alterfq {
282 u8 verb;
283 u8 result;
284 u8 fqs;
285 u8 __reserved1[61];
286};
287#define QM_MCR_VERB_RRID 0x80
288#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
289#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
290#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
291#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
292#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
293#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
294#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
295#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
296#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
297#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
298#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
299#define QM_MCR_RESULT_NULL 0x00
300#define QM_MCR_RESULT_OK 0xf0
301#define QM_MCR_RESULT_ERR_FQID 0xf1
302#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
303#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3
304#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
305#define QM_MCR_RESULT_PENDING 0xf8
306#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
307#define QM_MCR_FQS_ORLPRESENT 0x02
308#define QM_MCR_FQS_NOTEMPTY 0x01
309#define QM_MCR_TIMEOUT 10000
310union qm_mc_result {
311 struct {
312 u8 verb;
313 u8 result;
314 u8 __reserved1[62];
315 };
316 struct qm_mcr_queryfq queryfq;
317 struct qm_mcr_alterfq alterfq;
318 struct qm_mcr_querycgr querycgr;
319 struct qm_mcr_querycongestion querycongestion;
320 struct qm_mcr_querywq querywq;
321 struct qm_mcr_queryfq_np queryfq_np;
322};
323
324struct qm_mc {
325 union qm_mc_command *cr;
326 union qm_mc_result *rr;
327 u8 rridx, vbit;
328#ifdef CONFIG_FSL_DPAA_CHECKING
329 enum {
330
331 qman_mc_idle,
332
333 qman_mc_user,
334
335 qman_mc_hw
336 } state;
337#endif
338};
339
340struct qm_addr {
341 void *ce;
342 __be32 *ce_be;
343 void __iomem *ci;
344};
345
346struct qm_portal {
347
348
349
350
351
352
353 struct qm_addr addr;
354 struct qm_eqcr eqcr;
355 struct qm_dqrr dqrr;
356 struct qm_mr mr;
357 struct qm_mc mc;
358} ____cacheline_aligned;
359
360
361static inline u32 qm_in(struct qm_portal *p, u32 offset)
362{
363 return ioread32be(p->addr.ci + offset);
364}
365
366static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
367{
368 iowrite32be(val, p->addr.ci + offset);
369}
370
371
372static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
373{
374 dpaa_invalidate(p->addr.ce + offset);
375}
376
377static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
378{
379 dpaa_touch_ro(p->addr.ce + offset);
380}
381
382static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
383{
384 return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
385}
386
387
388
389#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
390#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
391
392
393static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
394{
395 uintptr_t addr = (uintptr_t)p;
396
397 addr &= ~EQCR_CARRY;
398
399 return (struct qm_eqcr_entry *)addr;
400}
401
402
403static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
404{
405 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
406}
407
408
409static inline void eqcr_inc(struct qm_eqcr *eqcr)
410{
411
412 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
413
414 eqcr->cursor = eqcr_carryclear(partial);
415 if (partial != eqcr->cursor)
416 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
417}
418
419static inline int qm_eqcr_init(struct qm_portal *portal,
420 enum qm_eqcr_pmode pmode,
421 unsigned int eq_stash_thresh,
422 int eq_stash_prio)
423{
424 struct qm_eqcr *eqcr = &portal->eqcr;
425 u32 cfg;
426 u8 pi;
427
428 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
429 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
430 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
431 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
432 eqcr->cursor = eqcr->ring + pi;
433 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
434 QM_EQCR_VERB_VBIT : 0;
435 eqcr->available = QM_EQCR_SIZE - 1 -
436 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
437 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
438#ifdef CONFIG_FSL_DPAA_CHECKING
439 eqcr->busy = 0;
440 eqcr->pmode = pmode;
441#endif
442 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
443 (eq_stash_thresh << 28) |
444 (eq_stash_prio << 26) |
445 ((pmode & 0x3) << 24);
446 qm_out(portal, QM_REG_CFG, cfg);
447 return 0;
448}
449
450static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
451{
452 return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
453}
454
455static inline void qm_eqcr_finish(struct qm_portal *portal)
456{
457 struct qm_eqcr *eqcr = &portal->eqcr;
458 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
459 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
460
461 DPAA_ASSERT(!eqcr->busy);
462 if (pi != eqcr_ptr2idx(eqcr->cursor))
463 pr_crit("losing uncommitted EQCR entries\n");
464 if (ci != eqcr->ci)
465 pr_crit("missing existing EQCR completions\n");
466 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
467 pr_crit("EQCR destroyed unquiesced\n");
468}
469
470static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
471 *portal)
472{
473 struct qm_eqcr *eqcr = &portal->eqcr;
474
475 DPAA_ASSERT(!eqcr->busy);
476 if (!eqcr->available)
477 return NULL;
478
479#ifdef CONFIG_FSL_DPAA_CHECKING
480 eqcr->busy = 1;
481#endif
482 dpaa_zero(eqcr->cursor);
483 return eqcr->cursor;
484}
485
486static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
487 *portal)
488{
489 struct qm_eqcr *eqcr = &portal->eqcr;
490 u8 diff, old_ci;
491
492 DPAA_ASSERT(!eqcr->busy);
493 if (!eqcr->available) {
494 old_ci = eqcr->ci;
495 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
496 (QM_EQCR_SIZE - 1);
497 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
498 eqcr->available += diff;
499 if (!diff)
500 return NULL;
501 }
502#ifdef CONFIG_FSL_DPAA_CHECKING
503 eqcr->busy = 1;
504#endif
505 dpaa_zero(eqcr->cursor);
506 return eqcr->cursor;
507}
508
509static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
510{
511 DPAA_ASSERT(eqcr->busy);
512 DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
513 DPAA_ASSERT(eqcr->available >= 1);
514}
515
516static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
517{
518 struct qm_eqcr *eqcr = &portal->eqcr;
519 struct qm_eqcr_entry *eqcursor;
520
521 eqcr_commit_checks(eqcr);
522 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
523 dma_wmb();
524 eqcursor = eqcr->cursor;
525 eqcursor->_ncw_verb = myverb | eqcr->vbit;
526 dpaa_flush(eqcursor);
527 eqcr_inc(eqcr);
528 eqcr->available--;
529#ifdef CONFIG_FSL_DPAA_CHECKING
530 eqcr->busy = 0;
531#endif
532}
533
534static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
535{
536 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
537}
538
539static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
540{
541 struct qm_eqcr *eqcr = &portal->eqcr;
542 u8 diff, old_ci = eqcr->ci;
543
544 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
545 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
546 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
547 eqcr->available += diff;
548 return diff;
549}
550
551static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
552{
553 struct qm_eqcr *eqcr = &portal->eqcr;
554
555 eqcr->ithresh = ithresh;
556 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
557}
558
559static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
560{
561 struct qm_eqcr *eqcr = &portal->eqcr;
562
563 return eqcr->available;
564}
565
566static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
567{
568 struct qm_eqcr *eqcr = &portal->eqcr;
569
570 return QM_EQCR_SIZE - 1 - eqcr->available;
571}
572
573
574
575#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
576#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
577
578static const struct qm_dqrr_entry *dqrr_carryclear(
579 const struct qm_dqrr_entry *p)
580{
581 uintptr_t addr = (uintptr_t)p;
582
583 addr &= ~DQRR_CARRY;
584
585 return (const struct qm_dqrr_entry *)addr;
586}
587
588static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
589{
590 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
591}
592
593static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
594{
595 return dqrr_carryclear(e + 1);
596}
597
598static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
599{
600 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
601 ((mf & (QM_DQRR_SIZE - 1)) << 20));
602}
603
604static inline int qm_dqrr_init(struct qm_portal *portal,
605 const struct qm_portal_config *config,
606 enum qm_dqrr_dmode dmode,
607 enum qm_dqrr_pmode pmode,
608 enum qm_dqrr_cmode cmode, u8 max_fill)
609{
610 struct qm_dqrr *dqrr = &portal->dqrr;
611 u32 cfg;
612
613
614 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
615 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
616 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
617 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
618 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
619 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
620 dqrr->cursor = dqrr->ring + dqrr->ci;
621 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
622 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
623 QM_DQRR_VERB_VBIT : 0;
624 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
625#ifdef CONFIG_FSL_DPAA_CHECKING
626 dqrr->dmode = dmode;
627 dqrr->pmode = pmode;
628 dqrr->cmode = cmode;
629#endif
630
631 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
632 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
633 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
634 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) |
635 ((dmode & 1) << 18) |
636 ((cmode & 3) << 16) |
637 0xa0 |
638 (0 ? 0x40 : 0) |
639 (0 ? 0x10 : 0);
640 qm_out(portal, QM_REG_CFG, cfg);
641 qm_dqrr_set_maxfill(portal, max_fill);
642 return 0;
643}
644
645static inline void qm_dqrr_finish(struct qm_portal *portal)
646{
647#ifdef CONFIG_FSL_DPAA_CHECKING
648 struct qm_dqrr *dqrr = &portal->dqrr;
649
650 if (dqrr->cmode != qm_dqrr_cdc &&
651 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
652 pr_crit("Ignoring completed DQRR entries\n");
653#endif
654}
655
656static inline const struct qm_dqrr_entry *qm_dqrr_current(
657 struct qm_portal *portal)
658{
659 struct qm_dqrr *dqrr = &portal->dqrr;
660
661 if (!dqrr->fill)
662 return NULL;
663 return dqrr->cursor;
664}
665
666static inline u8 qm_dqrr_next(struct qm_portal *portal)
667{
668 struct qm_dqrr *dqrr = &portal->dqrr;
669
670 DPAA_ASSERT(dqrr->fill);
671 dqrr->cursor = dqrr_inc(dqrr->cursor);
672 return --dqrr->fill;
673}
674
675static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
676{
677 struct qm_dqrr *dqrr = &portal->dqrr;
678 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
679
680 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
681#ifndef CONFIG_FSL_PAMU
682
683
684
685
686 dpaa_invalidate_touch_ro(res);
687#endif
688 if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
689 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
690 if (!dqrr->pi)
691 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
692 dqrr->fill++;
693 }
694}
695
696static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
697 const struct qm_dqrr_entry *dq,
698 int park)
699{
700 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
701 int idx = dqrr_ptr2idx(dq);
702
703 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
704 DPAA_ASSERT((dqrr->ring + idx) == dq);
705 DPAA_ASSERT(idx < QM_DQRR_SIZE);
706 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) |
707 ((park ? 1 : 0) << 6) |
708 idx);
709}
710
711static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
712{
713 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
714
715 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
716 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) |
717 (bitmask << 16));
718}
719
720static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
721{
722 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
723}
724
725static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
726{
727 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
728}
729
730static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
731{
732 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
733}
734
735
736
737#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
738#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
739
740static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
741{
742 uintptr_t addr = (uintptr_t)p;
743
744 addr &= ~MR_CARRY;
745
746 return (union qm_mr_entry *)addr;
747}
748
749static inline int mr_ptr2idx(const union qm_mr_entry *e)
750{
751 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
752}
753
754static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
755{
756 return mr_carryclear(e + 1);
757}
758
759static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
760 enum qm_mr_cmode cmode)
761{
762 struct qm_mr *mr = &portal->mr;
763 u32 cfg;
764
765 mr->ring = portal->addr.ce + QM_CL_MR;
766 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
767 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
768 mr->cursor = mr->ring + mr->ci;
769 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
770 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
771 ? QM_MR_VERB_VBIT : 0;
772 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
773#ifdef CONFIG_FSL_DPAA_CHECKING
774 mr->pmode = pmode;
775 mr->cmode = cmode;
776#endif
777 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
778 ((cmode & 1) << 8);
779 qm_out(portal, QM_REG_CFG, cfg);
780 return 0;
781}
782
783static inline void qm_mr_finish(struct qm_portal *portal)
784{
785 struct qm_mr *mr = &portal->mr;
786
787 if (mr->ci != mr_ptr2idx(mr->cursor))
788 pr_crit("Ignoring completed MR entries\n");
789}
790
791static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
792{
793 struct qm_mr *mr = &portal->mr;
794
795 if (!mr->fill)
796 return NULL;
797 return mr->cursor;
798}
799
800static inline int qm_mr_next(struct qm_portal *portal)
801{
802 struct qm_mr *mr = &portal->mr;
803
804 DPAA_ASSERT(mr->fill);
805 mr->cursor = mr_inc(mr->cursor);
806 return --mr->fill;
807}
808
809static inline void qm_mr_pvb_update(struct qm_portal *portal)
810{
811 struct qm_mr *mr = &portal->mr;
812 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
813
814 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
815
816 if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
817 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
818 if (!mr->pi)
819 mr->vbit ^= QM_MR_VERB_VBIT;
820 mr->fill++;
821 res = mr_inc(res);
822 }
823 dpaa_invalidate_touch_ro(res);
824}
825
826static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
827{
828 struct qm_mr *mr = &portal->mr;
829
830 DPAA_ASSERT(mr->cmode == qm_mr_cci);
831 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
832 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
833}
834
835static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
836{
837 struct qm_mr *mr = &portal->mr;
838
839 DPAA_ASSERT(mr->cmode == qm_mr_cci);
840 mr->ci = mr_ptr2idx(mr->cursor);
841 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
842}
843
844static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
845{
846 qm_out(portal, QM_REG_MR_ITR, ithresh);
847}
848
849
850
851static inline int qm_mc_init(struct qm_portal *portal)
852{
853 struct qm_mc *mc = &portal->mc;
854
855 mc->cr = portal->addr.ce + QM_CL_CR;
856 mc->rr = portal->addr.ce + QM_CL_RR0;
857 mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
858 ? 0 : 1;
859 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
860#ifdef CONFIG_FSL_DPAA_CHECKING
861 mc->state = qman_mc_idle;
862#endif
863 return 0;
864}
865
866static inline void qm_mc_finish(struct qm_portal *portal)
867{
868#ifdef CONFIG_FSL_DPAA_CHECKING
869 struct qm_mc *mc = &portal->mc;
870
871 DPAA_ASSERT(mc->state == qman_mc_idle);
872 if (mc->state != qman_mc_idle)
873 pr_crit("Losing incomplete MC command\n");
874#endif
875}
876
877static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
878{
879 struct qm_mc *mc = &portal->mc;
880
881 DPAA_ASSERT(mc->state == qman_mc_idle);
882#ifdef CONFIG_FSL_DPAA_CHECKING
883 mc->state = qman_mc_user;
884#endif
885 dpaa_zero(mc->cr);
886 return mc->cr;
887}
888
889static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
890{
891 struct qm_mc *mc = &portal->mc;
892 union qm_mc_result *rr = mc->rr + mc->rridx;
893
894 DPAA_ASSERT(mc->state == qman_mc_user);
895 dma_wmb();
896 mc->cr->_ncw_verb = myverb | mc->vbit;
897 dpaa_flush(mc->cr);
898 dpaa_invalidate_touch_ro(rr);
899#ifdef CONFIG_FSL_DPAA_CHECKING
900 mc->state = qman_mc_hw;
901#endif
902}
903
904static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
905{
906 struct qm_mc *mc = &portal->mc;
907 union qm_mc_result *rr = mc->rr + mc->rridx;
908
909 DPAA_ASSERT(mc->state == qman_mc_hw);
910
911
912
913
914
915 if (!rr->verb) {
916 dpaa_invalidate_touch_ro(rr);
917 return NULL;
918 }
919 mc->rridx ^= 1;
920 mc->vbit ^= QM_MCC_VERB_VBIT;
921#ifdef CONFIG_FSL_DPAA_CHECKING
922 mc->state = qman_mc_idle;
923#endif
924 return rr;
925}
926
927static inline int qm_mc_result_timeout(struct qm_portal *portal,
928 union qm_mc_result **mcr)
929{
930 int timeout = QM_MCR_TIMEOUT;
931
932 do {
933 *mcr = qm_mc_result(portal);
934 if (*mcr)
935 break;
936 udelay(1);
937 } while (--timeout);
938
939 return timeout;
940}
941
942static inline void fq_set(struct qman_fq *fq, u32 mask)
943{
944 fq->flags |= mask;
945}
946
947static inline void fq_clear(struct qman_fq *fq, u32 mask)
948{
949 fq->flags &= ~mask;
950}
951
952static inline int fq_isset(struct qman_fq *fq, u32 mask)
953{
954 return fq->flags & mask;
955}
956
957static inline int fq_isclear(struct qman_fq *fq, u32 mask)
958{
959 return !(fq->flags & mask);
960}
961
962struct qman_portal {
963 struct qm_portal p;
964
965 unsigned long bits;
966
967 unsigned long irq_sources;
968 u32 use_eqcr_ci_stashing;
969
970 struct qman_fq *vdqcr_owned;
971 u32 sdqcr;
972
973 const struct qm_portal_config *config;
974
975 struct qman_cgrs *cgrs;
976
977 struct list_head cgr_cbs;
978
979 spinlock_t cgr_lock;
980 struct work_struct congestion_work;
981 struct work_struct mr_work;
982 char irqname[MAX_IRQNAME];
983};
984
985static cpumask_t affine_mask;
986static DEFINE_SPINLOCK(affine_mask_lock);
987static u16 affine_channels[NR_CPUS];
988static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
989struct qman_portal *affine_portals[NR_CPUS];
990
991static inline struct qman_portal *get_affine_portal(void)
992{
993 return &get_cpu_var(qman_affine_portal);
994}
995
996static inline void put_affine_portal(void)
997{
998 put_cpu_var(qman_affine_portal);
999}
1000
1001static struct workqueue_struct *qm_portal_wq;
1002
1003int qman_wq_alloc(void)
1004{
1005 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
1006 if (!qm_portal_wq)
1007 return -ENOMEM;
1008 return 0;
1009}
1010
1011
1012
1013
1014
1015static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1016
1017static struct qman_fq **fq_table;
1018static u32 num_fqids;
1019
1020int qman_alloc_fq_table(u32 _num_fqids)
1021{
1022 num_fqids = _num_fqids;
1023
1024 fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
1025 if (!fq_table)
1026 return -ENOMEM;
1027
1028 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1029 fq_table, num_fqids * 2);
1030 return 0;
1031}
1032
1033static struct qman_fq *idx_to_fq(u32 idx)
1034{
1035 struct qman_fq *fq;
1036
1037#ifdef CONFIG_FSL_DPAA_CHECKING
1038 if (WARN_ON(idx >= num_fqids * 2))
1039 return NULL;
1040#endif
1041 fq = fq_table[idx];
1042 DPAA_ASSERT(!fq || idx == fq->idx);
1043
1044 return fq;
1045}
1046
1047
1048
1049
1050
1051static struct qman_fq *fqid_to_fq(u32 fqid)
1052{
1053 return idx_to_fq(fqid * 2);
1054}
1055
1056static struct qman_fq *tag_to_fq(u32 tag)
1057{
1058#if BITS_PER_LONG == 64
1059 return idx_to_fq(tag);
1060#else
1061 return (struct qman_fq *)tag;
1062#endif
1063}
1064
1065static u32 fq_to_tag(struct qman_fq *fq)
1066{
1067#if BITS_PER_LONG == 64
1068 return fq->idx;
1069#else
1070 return (u32)fq;
1071#endif
1072}
1073
1074static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1075static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1076 unsigned int poll_limit);
1077static void qm_congestion_task(struct work_struct *work);
1078static void qm_mr_process_task(struct work_struct *work);
1079
1080static irqreturn_t portal_isr(int irq, void *ptr)
1081{
1082 struct qman_portal *p = ptr;
1083
1084 u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
1085 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1086
1087 if (unlikely(!is))
1088 return IRQ_NONE;
1089
1090
1091 if (is & QM_PIRQ_DQRI)
1092 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1093
1094 clear |= __poll_portal_slow(p, is);
1095 qm_out(&p->p, QM_REG_ISR, clear);
1096 return IRQ_HANDLED;
1097}
1098
1099static int drain_mr_fqrni(struct qm_portal *p)
1100{
1101 const union qm_mr_entry *msg;
1102loop:
1103 msg = qm_mr_current(p);
1104 if (!msg) {
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 msleep(1);
1120 msg = qm_mr_current(p);
1121 if (!msg)
1122 return 0;
1123 }
1124 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1125
1126 pr_err("Found verb 0x%x in MR\n", msg->verb);
1127 return -1;
1128 }
1129 qm_mr_next(p);
1130 qm_mr_cci_consume(p, 1);
1131 goto loop;
1132}
1133
1134static int qman_create_portal(struct qman_portal *portal,
1135 const struct qm_portal_config *c,
1136 const struct qman_cgrs *cgrs)
1137{
1138 struct qm_portal *p;
1139 int ret;
1140 u32 isdr;
1141
1142 p = &portal->p;
1143
1144#ifdef CONFIG_FSL_PAMU
1145
1146 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1147#else
1148 portal->use_eqcr_ci_stashing = 0;
1149#endif
1150
1151
1152
1153
1154
1155 p->addr.ce = c->addr_virt_ce;
1156 p->addr.ce_be = c->addr_virt_ce;
1157 p->addr.ci = c->addr_virt_ci;
1158
1159
1160
1161
1162 if (qm_eqcr_init(p, qm_eqcr_pvb,
1163 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1164 dev_err(c->dev, "EQCR initialisation failed\n");
1165 goto fail_eqcr;
1166 }
1167 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1168 qm_dqrr_cdc, DQRR_MAXFILL)) {
1169 dev_err(c->dev, "DQRR initialisation failed\n");
1170 goto fail_dqrr;
1171 }
1172 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1173 dev_err(c->dev, "MR initialisation failed\n");
1174 goto fail_mr;
1175 }
1176 if (qm_mc_init(p)) {
1177 dev_err(c->dev, "MC initialisation failed\n");
1178 goto fail_mc;
1179 }
1180
1181 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1182 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1183 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1184 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
1185 if (!portal->cgrs)
1186 goto fail_cgrs;
1187
1188 qman_cgrs_init(&portal->cgrs[1]);
1189 if (cgrs)
1190 portal->cgrs[0] = *cgrs;
1191 else
1192
1193 qman_cgrs_fill(&portal->cgrs[0]);
1194 INIT_LIST_HEAD(&portal->cgr_cbs);
1195 spin_lock_init(&portal->cgr_lock);
1196 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1197 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1198 portal->bits = 0;
1199 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1200 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1201 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1202 isdr = 0xffffffff;
1203 qm_out(p, QM_REG_ISDR, isdr);
1204 portal->irq_sources = 0;
1205 qm_out(p, QM_REG_IER, 0);
1206 qm_out(p, QM_REG_ISR, 0xffffffff);
1207 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1208 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1209 dev_err(c->dev, "request_irq() failed\n");
1210 goto fail_irq;
1211 }
1212 if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
1213 irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
1214 dev_err(c->dev, "irq_set_affinity() failed\n");
1215 goto fail_affinity;
1216 }
1217
1218
1219 isdr &= ~QM_PIRQ_EQCI;
1220 qm_out(p, QM_REG_ISDR, isdr);
1221 ret = qm_eqcr_get_fill(p);
1222 if (ret) {
1223 dev_err(c->dev, "EQCR unclean\n");
1224 goto fail_eqcr_empty;
1225 }
1226 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1227 qm_out(p, QM_REG_ISDR, isdr);
1228 if (qm_dqrr_current(p)) {
1229 dev_err(c->dev, "DQRR unclean\n");
1230 qm_dqrr_cdc_consume_n(p, 0xffff);
1231 }
1232 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1233
1234 const union qm_mr_entry *e = qm_mr_current(p);
1235
1236 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1237 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1238 goto fail_dqrr_mr_empty;
1239 }
1240
1241 portal->config = c;
1242 qm_out(p, QM_REG_ISDR, 0);
1243 qm_out(p, QM_REG_IIR, 0);
1244
1245 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1246 return 0;
1247
1248fail_dqrr_mr_empty:
1249fail_eqcr_empty:
1250fail_affinity:
1251 free_irq(c->irq, portal);
1252fail_irq:
1253 kfree(portal->cgrs);
1254fail_cgrs:
1255 qm_mc_finish(p);
1256fail_mc:
1257 qm_mr_finish(p);
1258fail_mr:
1259 qm_dqrr_finish(p);
1260fail_dqrr:
1261 qm_eqcr_finish(p);
1262fail_eqcr:
1263 return -EIO;
1264}
1265
1266struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1267 const struct qman_cgrs *cgrs)
1268{
1269 struct qman_portal *portal;
1270 int err;
1271
1272 portal = &per_cpu(qman_affine_portal, c->cpu);
1273 err = qman_create_portal(portal, c, cgrs);
1274 if (err)
1275 return NULL;
1276
1277 spin_lock(&affine_mask_lock);
1278 cpumask_set_cpu(c->cpu, &affine_mask);
1279 affine_channels[c->cpu] = c->channel;
1280 affine_portals[c->cpu] = portal;
1281 spin_unlock(&affine_mask_lock);
1282
1283 return portal;
1284}
1285
1286static void qman_destroy_portal(struct qman_portal *qm)
1287{
1288 const struct qm_portal_config *pcfg;
1289
1290
1291 qm_dqrr_sdqcr_set(&qm->p, 0);
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302 qm_eqcr_cce_update(&qm->p);
1303 qm_eqcr_cce_update(&qm->p);
1304 pcfg = qm->config;
1305
1306 free_irq(pcfg->irq, qm);
1307
1308 kfree(qm->cgrs);
1309 qm_mc_finish(&qm->p);
1310 qm_mr_finish(&qm->p);
1311 qm_dqrr_finish(&qm->p);
1312 qm_eqcr_finish(&qm->p);
1313
1314 qm->config = NULL;
1315}
1316
1317const struct qm_portal_config *qman_destroy_affine_portal(void)
1318{
1319 struct qman_portal *qm = get_affine_portal();
1320 const struct qm_portal_config *pcfg;
1321 int cpu;
1322
1323 pcfg = qm->config;
1324 cpu = pcfg->cpu;
1325
1326 qman_destroy_portal(qm);
1327
1328 spin_lock(&affine_mask_lock);
1329 cpumask_clear_cpu(cpu, &affine_mask);
1330 spin_unlock(&affine_mask_lock);
1331 put_affine_portal();
1332 return pcfg;
1333}
1334
1335
1336static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1337 const union qm_mr_entry *msg, u8 verb)
1338{
1339 switch (verb) {
1340 case QM_MR_VERB_FQRL:
1341 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1342 fq_clear(fq, QMAN_FQ_STATE_ORL);
1343 break;
1344 case QM_MR_VERB_FQRN:
1345 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1346 fq->state == qman_fq_state_sched);
1347 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1348 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1349 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1350 fq_set(fq, QMAN_FQ_STATE_NE);
1351 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1352 fq_set(fq, QMAN_FQ_STATE_ORL);
1353 fq->state = qman_fq_state_retired;
1354 break;
1355 case QM_MR_VERB_FQPN:
1356 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1357 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1358 fq->state = qman_fq_state_parked;
1359 }
1360}
1361
1362static void qm_congestion_task(struct work_struct *work)
1363{
1364 struct qman_portal *p = container_of(work, struct qman_portal,
1365 congestion_work);
1366 struct qman_cgrs rr, c;
1367 union qm_mc_result *mcr;
1368 struct qman_cgr *cgr;
1369
1370 spin_lock(&p->cgr_lock);
1371 qm_mc_start(&p->p);
1372 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1373 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1374 spin_unlock(&p->cgr_lock);
1375 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1376 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1377 return;
1378 }
1379
1380 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1381 &p->cgrs[0]);
1382
1383 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1384
1385 qman_cgrs_cp(&p->cgrs[1], &rr);
1386
1387 list_for_each_entry(cgr, &p->cgr_cbs, node)
1388 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1389 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1390 spin_unlock(&p->cgr_lock);
1391 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1392}
1393
1394static void qm_mr_process_task(struct work_struct *work)
1395{
1396 struct qman_portal *p = container_of(work, struct qman_portal,
1397 mr_work);
1398 const union qm_mr_entry *msg;
1399 struct qman_fq *fq;
1400 u8 verb, num = 0;
1401
1402 preempt_disable();
1403
1404 while (1) {
1405 qm_mr_pvb_update(&p->p);
1406 msg = qm_mr_current(&p->p);
1407 if (!msg)
1408 break;
1409
1410 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1411
1412 if (verb & 0x20) {
1413 switch (verb) {
1414 case QM_MR_VERB_FQRNI:
1415
1416 break;
1417 case QM_MR_VERB_FQRN:
1418 case QM_MR_VERB_FQRL:
1419
1420 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1421 if (WARN_ON(!fq))
1422 break;
1423 fq_state_change(p, fq, msg, verb);
1424 if (fq->cb.fqs)
1425 fq->cb.fqs(p, fq, msg);
1426 break;
1427 case QM_MR_VERB_FQPN:
1428
1429 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1430 fq_state_change(p, fq, msg, verb);
1431 if (fq->cb.fqs)
1432 fq->cb.fqs(p, fq, msg);
1433 break;
1434 case QM_MR_VERB_DC_ERN:
1435
1436 pr_crit_once("Leaking DCP ERNs!\n");
1437 break;
1438 default:
1439 pr_crit("Invalid MR verb 0x%02x\n", verb);
1440 }
1441 } else {
1442
1443 fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1444 fq->cb.ern(p, fq, msg);
1445 }
1446 num++;
1447 qm_mr_next(&p->p);
1448 }
1449
1450 qm_mr_cci_consume(&p->p, num);
1451 qman_p_irqsource_add(p, QM_PIRQ_MRI);
1452 preempt_enable();
1453}
1454
1455static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1456{
1457 if (is & QM_PIRQ_CSCI) {
1458 qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1459 queue_work_on(smp_processor_id(), qm_portal_wq,
1460 &p->congestion_work);
1461 }
1462
1463 if (is & QM_PIRQ_EQRI) {
1464 qm_eqcr_cce_update(&p->p);
1465 qm_eqcr_set_ithresh(&p->p, 0);
1466 wake_up(&affine_queue);
1467 }
1468
1469 if (is & QM_PIRQ_MRI) {
1470 qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1471 queue_work_on(smp_processor_id(), qm_portal_wq,
1472 &p->mr_work);
1473 }
1474
1475 return is;
1476}
1477
1478
1479
1480
1481
1482static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1483{
1484 p->vdqcr_owned = NULL;
1485 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1486 wake_up(&affine_queue);
1487}
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1516 unsigned int poll_limit)
1517{
1518 const struct qm_dqrr_entry *dq;
1519 struct qman_fq *fq;
1520 enum qman_cb_dqrr_result res;
1521 unsigned int limit = 0;
1522
1523 do {
1524 qm_dqrr_pvb_update(&p->p);
1525 dq = qm_dqrr_current(&p->p);
1526 if (!dq)
1527 break;
1528
1529 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1530
1531
1532
1533
1534
1535 fq = p->vdqcr_owned;
1536
1537
1538
1539
1540
1541
1542 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1543 fq_clear(fq, QMAN_FQ_STATE_NE);
1544
1545
1546
1547
1548
1549
1550 res = fq->cb.dqrr(p, fq, dq);
1551 if (res == qman_cb_dqrr_stop)
1552 break;
1553
1554 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1555 clear_vdqcr(p, fq);
1556 } else {
1557
1558 fq = tag_to_fq(be32_to_cpu(dq->context_b));
1559
1560 res = fq->cb.dqrr(p, fq, dq);
1561
1562
1563
1564
1565 if (res == qman_cb_dqrr_stop)
1566 break;
1567 }
1568
1569
1570
1571
1572
1573
1574 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1575 (res != qman_cb_dqrr_park));
1576
1577 if (res != qman_cb_dqrr_defer)
1578 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1579 res == qman_cb_dqrr_park);
1580
1581 qm_dqrr_next(&p->p);
1582
1583
1584
1585
1586
1587
1588 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1589
1590 return limit;
1591}
1592
1593void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1594{
1595 unsigned long irqflags;
1596
1597 local_irq_save(irqflags);
1598 p->irq_sources |= bits & QM_PIRQ_VISIBLE;
1599 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1600 local_irq_restore(irqflags);
1601}
1602EXPORT_SYMBOL(qman_p_irqsource_add);
1603
1604void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1605{
1606 unsigned long irqflags;
1607 u32 ier;
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 local_irq_save(irqflags);
1620 bits &= QM_PIRQ_VISIBLE;
1621 p->irq_sources &= ~bits;
1622 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1623 ier = qm_in(&p->p, QM_REG_IER);
1624
1625
1626
1627
1628 qm_out(&p->p, QM_REG_ISR, ~ier);
1629 local_irq_restore(irqflags);
1630}
1631EXPORT_SYMBOL(qman_p_irqsource_remove);
1632
1633const cpumask_t *qman_affine_cpus(void)
1634{
1635 return &affine_mask;
1636}
1637EXPORT_SYMBOL(qman_affine_cpus);
1638
1639u16 qman_affine_channel(int cpu)
1640{
1641 if (cpu < 0) {
1642 struct qman_portal *portal = get_affine_portal();
1643
1644 cpu = portal->config->cpu;
1645 put_affine_portal();
1646 }
1647 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1648 return affine_channels[cpu];
1649}
1650EXPORT_SYMBOL(qman_affine_channel);
1651
1652struct qman_portal *qman_get_affine_portal(int cpu)
1653{
1654 return affine_portals[cpu];
1655}
1656EXPORT_SYMBOL(qman_get_affine_portal);
1657
1658int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1659{
1660 return __poll_portal_fast(p, limit);
1661}
1662EXPORT_SYMBOL(qman_p_poll_dqrr);
1663
1664void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1665{
1666 unsigned long irqflags;
1667
1668 local_irq_save(irqflags);
1669 pools &= p->config->pools;
1670 p->sdqcr |= pools;
1671 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1672 local_irq_restore(irqflags);
1673}
1674EXPORT_SYMBOL(qman_p_static_dequeue_add);
1675
1676
1677
1678static const char *mcr_result_str(u8 result)
1679{
1680 switch (result) {
1681 case QM_MCR_RESULT_NULL:
1682 return "QM_MCR_RESULT_NULL";
1683 case QM_MCR_RESULT_OK:
1684 return "QM_MCR_RESULT_OK";
1685 case QM_MCR_RESULT_ERR_FQID:
1686 return "QM_MCR_RESULT_ERR_FQID";
1687 case QM_MCR_RESULT_ERR_FQSTATE:
1688 return "QM_MCR_RESULT_ERR_FQSTATE";
1689 case QM_MCR_RESULT_ERR_NOTEMPTY:
1690 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1691 case QM_MCR_RESULT_PENDING:
1692 return "QM_MCR_RESULT_PENDING";
1693 case QM_MCR_RESULT_ERR_BADCOMMAND:
1694 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1695 }
1696 return "<unknown MCR result>";
1697}
1698
1699int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1700{
1701 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1702 int ret = qman_alloc_fqid(&fqid);
1703
1704 if (ret)
1705 return ret;
1706 }
1707 fq->fqid = fqid;
1708 fq->flags = flags;
1709 fq->state = qman_fq_state_oos;
1710 fq->cgr_groupid = 0;
1711
1712
1713 if (fqid == 0 || fqid >= num_fqids) {
1714 WARN(1, "bad fqid %d\n", fqid);
1715 return -EINVAL;
1716 }
1717
1718 fq->idx = fqid * 2;
1719 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1720 fq->idx++;
1721
1722 WARN_ON(fq_table[fq->idx]);
1723 fq_table[fq->idx] = fq;
1724
1725 return 0;
1726}
1727EXPORT_SYMBOL(qman_create_fq);
1728
1729void qman_destroy_fq(struct qman_fq *fq)
1730{
1731
1732
1733
1734
1735 switch (fq->state) {
1736 case qman_fq_state_parked:
1737 case qman_fq_state_oos:
1738 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1739 qman_release_fqid(fq->fqid);
1740
1741 DPAA_ASSERT(fq_table[fq->idx]);
1742 fq_table[fq->idx] = NULL;
1743 return;
1744 default:
1745 break;
1746 }
1747 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1748}
1749EXPORT_SYMBOL(qman_destroy_fq);
1750
1751u32 qman_fq_fqid(struct qman_fq *fq)
1752{
1753 return fq->fqid;
1754}
1755EXPORT_SYMBOL(qman_fq_fqid);
1756
1757int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1758{
1759 union qm_mc_command *mcc;
1760 union qm_mc_result *mcr;
1761 struct qman_portal *p;
1762 u8 res, myverb;
1763 int ret = 0;
1764
1765 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1766 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1767
1768 if (fq->state != qman_fq_state_oos &&
1769 fq->state != qman_fq_state_parked)
1770 return -EINVAL;
1771#ifdef CONFIG_FSL_DPAA_CHECKING
1772 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1773 return -EINVAL;
1774#endif
1775 if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1776
1777 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1778 return -EINVAL;
1779 }
1780
1781 p = get_affine_portal();
1782 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1783 (fq->state != qman_fq_state_oos &&
1784 fq->state != qman_fq_state_parked)) {
1785 ret = -EBUSY;
1786 goto out;
1787 }
1788 mcc = qm_mc_start(&p->p);
1789 if (opts)
1790 mcc->initfq = *opts;
1791 qm_fqid_set(&mcc->fq, fq->fqid);
1792 mcc->initfq.count = 0;
1793
1794
1795
1796
1797
1798 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1799 dma_addr_t phys_fq;
1800
1801 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1802 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1803
1804
1805
1806
1807 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1808 QM_INITFQ_WE_CONTEXTA)) {
1809 mcc->initfq.we_mask |=
1810 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1811 memset(&mcc->initfq.fqd.context_a, 0,
1812 sizeof(mcc->initfq.fqd.context_a));
1813 } else {
1814 struct qman_portal *p = qman_dma_portal;
1815
1816 phys_fq = dma_map_single(p->config->dev, fq,
1817 sizeof(*fq), DMA_TO_DEVICE);
1818 if (dma_mapping_error(p->config->dev, phys_fq)) {
1819 dev_err(p->config->dev, "dma_mapping failed\n");
1820 ret = -EIO;
1821 goto out;
1822 }
1823
1824 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1825 }
1826 }
1827 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1828 int wq = 0;
1829
1830 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1831 QM_INITFQ_WE_DESTWQ)) {
1832 mcc->initfq.we_mask |=
1833 cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1834 wq = 4;
1835 }
1836 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1837 }
1838 qm_mc_commit(&p->p, myverb);
1839 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1840 dev_err(p->config->dev, "MCR timeout\n");
1841 ret = -ETIMEDOUT;
1842 goto out;
1843 }
1844
1845 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1846 res = mcr->result;
1847 if (res != QM_MCR_RESULT_OK) {
1848 ret = -EIO;
1849 goto out;
1850 }
1851 if (opts) {
1852 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1853 if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1854 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1855 else
1856 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1857 }
1858 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1859 fq->cgr_groupid = opts->fqd.cgid;
1860 }
1861 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1862 qman_fq_state_sched : qman_fq_state_parked;
1863
1864out:
1865 put_affine_portal();
1866 return ret;
1867}
1868EXPORT_SYMBOL(qman_init_fq);
1869
1870int qman_schedule_fq(struct qman_fq *fq)
1871{
1872 union qm_mc_command *mcc;
1873 union qm_mc_result *mcr;
1874 struct qman_portal *p;
1875 int ret = 0;
1876
1877 if (fq->state != qman_fq_state_parked)
1878 return -EINVAL;
1879#ifdef CONFIG_FSL_DPAA_CHECKING
1880 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1881 return -EINVAL;
1882#endif
1883
1884 p = get_affine_portal();
1885 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1886 fq->state != qman_fq_state_parked) {
1887 ret = -EBUSY;
1888 goto out;
1889 }
1890 mcc = qm_mc_start(&p->p);
1891 qm_fqid_set(&mcc->fq, fq->fqid);
1892 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1893 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1894 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1895 ret = -ETIMEDOUT;
1896 goto out;
1897 }
1898
1899 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1900 if (mcr->result != QM_MCR_RESULT_OK) {
1901 ret = -EIO;
1902 goto out;
1903 }
1904 fq->state = qman_fq_state_sched;
1905out:
1906 put_affine_portal();
1907 return ret;
1908}
1909EXPORT_SYMBOL(qman_schedule_fq);
1910
1911int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1912{
1913 union qm_mc_command *mcc;
1914 union qm_mc_result *mcr;
1915 struct qman_portal *p;
1916 int ret;
1917 u8 res;
1918
1919 if (fq->state != qman_fq_state_parked &&
1920 fq->state != qman_fq_state_sched)
1921 return -EINVAL;
1922#ifdef CONFIG_FSL_DPAA_CHECKING
1923 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1924 return -EINVAL;
1925#endif
1926 p = get_affine_portal();
1927 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1928 fq->state == qman_fq_state_retired ||
1929 fq->state == qman_fq_state_oos) {
1930 ret = -EBUSY;
1931 goto out;
1932 }
1933 mcc = qm_mc_start(&p->p);
1934 qm_fqid_set(&mcc->fq, fq->fqid);
1935 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1936 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1937 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
1938 ret = -ETIMEDOUT;
1939 goto out;
1940 }
1941
1942 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1943 res = mcr->result;
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 if (res == QM_MCR_RESULT_OK) {
1954 ret = 0;
1955
1956 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1957 fq_set(fq, QMAN_FQ_STATE_NE);
1958 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1959 fq_set(fq, QMAN_FQ_STATE_ORL);
1960 if (flags)
1961 *flags = fq->flags;
1962 fq->state = qman_fq_state_retired;
1963 if (fq->cb.fqs) {
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973 union qm_mr_entry msg;
1974
1975 msg.verb = QM_MR_VERB_FQRNI;
1976 msg.fq.fqs = mcr->alterfq.fqs;
1977 qm_fqid_set(&msg.fq, fq->fqid);
1978 msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
1979 fq->cb.fqs(p, fq, &msg);
1980 }
1981 } else if (res == QM_MCR_RESULT_PENDING) {
1982 ret = 1;
1983 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1984 } else {
1985 ret = -EIO;
1986 }
1987out:
1988 put_affine_portal();
1989 return ret;
1990}
1991EXPORT_SYMBOL(qman_retire_fq);
1992
1993int qman_oos_fq(struct qman_fq *fq)
1994{
1995 union qm_mc_command *mcc;
1996 union qm_mc_result *mcr;
1997 struct qman_portal *p;
1998 int ret = 0;
1999
2000 if (fq->state != qman_fq_state_retired)
2001 return -EINVAL;
2002#ifdef CONFIG_FSL_DPAA_CHECKING
2003 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2004 return -EINVAL;
2005#endif
2006 p = get_affine_portal();
2007 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2008 fq->state != qman_fq_state_retired) {
2009 ret = -EBUSY;
2010 goto out;
2011 }
2012 mcc = qm_mc_start(&p->p);
2013 qm_fqid_set(&mcc->fq, fq->fqid);
2014 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2015 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2016 ret = -ETIMEDOUT;
2017 goto out;
2018 }
2019 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2020 if (mcr->result != QM_MCR_RESULT_OK) {
2021 ret = -EIO;
2022 goto out;
2023 }
2024 fq->state = qman_fq_state_oos;
2025out:
2026 put_affine_portal();
2027 return ret;
2028}
2029EXPORT_SYMBOL(qman_oos_fq);
2030
2031int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2032{
2033 union qm_mc_command *mcc;
2034 union qm_mc_result *mcr;
2035 struct qman_portal *p = get_affine_portal();
2036 int ret = 0;
2037
2038 mcc = qm_mc_start(&p->p);
2039 qm_fqid_set(&mcc->fq, fq->fqid);
2040 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2041 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2042 ret = -ETIMEDOUT;
2043 goto out;
2044 }
2045
2046 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2047 if (mcr->result == QM_MCR_RESULT_OK)
2048 *fqd = mcr->queryfq.fqd;
2049 else
2050 ret = -EIO;
2051out:
2052 put_affine_portal();
2053 return ret;
2054}
2055
2056int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2057{
2058 union qm_mc_command *mcc;
2059 union qm_mc_result *mcr;
2060 struct qman_portal *p = get_affine_portal();
2061 int ret = 0;
2062
2063 mcc = qm_mc_start(&p->p);
2064 qm_fqid_set(&mcc->fq, fq->fqid);
2065 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2066 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2067 ret = -ETIMEDOUT;
2068 goto out;
2069 }
2070
2071 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2072 if (mcr->result == QM_MCR_RESULT_OK)
2073 *np = mcr->queryfq_np;
2074 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2075 ret = -ERANGE;
2076 else
2077 ret = -EIO;
2078out:
2079 put_affine_portal();
2080 return ret;
2081}
2082EXPORT_SYMBOL(qman_query_fq_np);
2083
2084static int qman_query_cgr(struct qman_cgr *cgr,
2085 struct qm_mcr_querycgr *cgrd)
2086{
2087 union qm_mc_command *mcc;
2088 union qm_mc_result *mcr;
2089 struct qman_portal *p = get_affine_portal();
2090 int ret = 0;
2091
2092 mcc = qm_mc_start(&p->p);
2093 mcc->cgr.cgid = cgr->cgrid;
2094 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2095 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2096 ret = -ETIMEDOUT;
2097 goto out;
2098 }
2099 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2100 if (mcr->result == QM_MCR_RESULT_OK)
2101 *cgrd = mcr->querycgr;
2102 else {
2103 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2104 mcr_result_str(mcr->result));
2105 ret = -EIO;
2106 }
2107out:
2108 put_affine_portal();
2109 return ret;
2110}
2111
2112int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2113{
2114 struct qm_mcr_querycgr query_cgr;
2115 int err;
2116
2117 err = qman_query_cgr(cgr, &query_cgr);
2118 if (err)
2119 return err;
2120
2121 *result = !!query_cgr.cgr.cs;
2122 return 0;
2123}
2124EXPORT_SYMBOL(qman_query_cgr_congested);
2125
2126
2127static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2128{
2129 unsigned long irqflags;
2130 int ret = -EBUSY;
2131
2132 local_irq_save(irqflags);
2133 if (p->vdqcr_owned)
2134 goto out;
2135 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2136 goto out;
2137
2138 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2139 p->vdqcr_owned = fq;
2140 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2141 ret = 0;
2142out:
2143 local_irq_restore(irqflags);
2144 return ret;
2145}
2146
2147static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2148{
2149 int ret;
2150
2151 *p = get_affine_portal();
2152 ret = set_p_vdqcr(*p, fq, vdqcr);
2153 put_affine_portal();
2154 return ret;
2155}
2156
2157static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2158 u32 vdqcr, u32 flags)
2159{
2160 int ret = 0;
2161
2162 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2163 ret = wait_event_interruptible(affine_queue,
2164 !set_vdqcr(p, fq, vdqcr));
2165 else
2166 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2167 return ret;
2168}
2169
2170int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2171{
2172 struct qman_portal *p;
2173 int ret;
2174
2175 if (fq->state != qman_fq_state_parked &&
2176 fq->state != qman_fq_state_retired)
2177 return -EINVAL;
2178 if (vdqcr & QM_VDQCR_FQID_MASK)
2179 return -EINVAL;
2180 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2181 return -EBUSY;
2182 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2183 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2184 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2185 else
2186 ret = set_vdqcr(&p, fq, vdqcr);
2187 if (ret)
2188 return ret;
2189
2190 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2191 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2192
2193
2194
2195
2196
2197
2198 wait_event_interruptible(affine_queue,
2199 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2200 else
2201 wait_event(affine_queue,
2202 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2203 }
2204 return 0;
2205}
2206EXPORT_SYMBOL(qman_volatile_dequeue);
2207
2208static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2209{
2210 if (avail)
2211 qm_eqcr_cce_prefetch(&p->p);
2212 else
2213 qm_eqcr_cce_update(&p->p);
2214}
2215
2216int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2217{
2218 struct qman_portal *p;
2219 struct qm_eqcr_entry *eq;
2220 unsigned long irqflags;
2221 u8 avail;
2222
2223 p = get_affine_portal();
2224 local_irq_save(irqflags);
2225
2226 if (p->use_eqcr_ci_stashing) {
2227
2228
2229
2230
2231 eq = qm_eqcr_start_stash(&p->p);
2232 } else {
2233
2234
2235
2236
2237 avail = qm_eqcr_get_avail(&p->p);
2238 if (avail < 2)
2239 update_eqcr_ci(p, avail);
2240 eq = qm_eqcr_start_no_stash(&p->p);
2241 }
2242
2243 if (unlikely(!eq))
2244 goto out;
2245
2246 qm_fqid_set(eq, fq->fqid);
2247 eq->tag = cpu_to_be32(fq_to_tag(fq));
2248 eq->fd = *fd;
2249
2250 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2251out:
2252 local_irq_restore(irqflags);
2253 put_affine_portal();
2254 return 0;
2255}
2256EXPORT_SYMBOL(qman_enqueue);
2257
2258static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2259 struct qm_mcc_initcgr *opts)
2260{
2261 union qm_mc_command *mcc;
2262 union qm_mc_result *mcr;
2263 struct qman_portal *p = get_affine_portal();
2264 u8 verb = QM_MCC_VERB_MODIFYCGR;
2265 int ret = 0;
2266
2267 mcc = qm_mc_start(&p->p);
2268 if (opts)
2269 mcc->initcgr = *opts;
2270 mcc->initcgr.cgid = cgr->cgrid;
2271 if (flags & QMAN_CGR_FLAG_USE_INIT)
2272 verb = QM_MCC_VERB_INITCGR;
2273 qm_mc_commit(&p->p, verb);
2274 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2275 ret = -ETIMEDOUT;
2276 goto out;
2277 }
2278
2279 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2280 if (mcr->result != QM_MCR_RESULT_OK)
2281 ret = -EIO;
2282
2283out:
2284 put_affine_portal();
2285 return ret;
2286}
2287
2288#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2289
2290
2291static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2292{
2293 if (qman_ip_rev >= QMAN_REV30)
2294 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2295 QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2296 else
2297 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2298}
2299
2300static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2301{
2302 if (qman_ip_rev >= QMAN_REV30)
2303 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2304 else
2305 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2306}
2307
2308static u8 qman_cgr_cpus[CGR_NUM];
2309
2310void qman_init_cgr_all(void)
2311{
2312 struct qman_cgr cgr;
2313 int err_cnt = 0;
2314
2315 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2316 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2317 err_cnt++;
2318 }
2319
2320 if (err_cnt)
2321 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2322 err_cnt, (err_cnt > 1) ? "s" : "");
2323}
2324
2325int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2326 struct qm_mcc_initcgr *opts)
2327{
2328 struct qm_mcr_querycgr cgr_state;
2329 int ret;
2330 struct qman_portal *p;
2331
2332
2333
2334
2335
2336
2337
2338 if (cgr->cgrid >= CGR_NUM)
2339 return -EINVAL;
2340
2341 preempt_disable();
2342 p = get_affine_portal();
2343 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2344 preempt_enable();
2345
2346 cgr->chan = p->config->channel;
2347 spin_lock(&p->cgr_lock);
2348
2349 if (opts) {
2350 struct qm_mcc_initcgr local_opts = *opts;
2351
2352 ret = qman_query_cgr(cgr, &cgr_state);
2353 if (ret)
2354 goto out;
2355
2356 qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2357 be32_to_cpu(cgr_state.cgr.cscn_targ));
2358 local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2359
2360
2361 if (flags & QMAN_CGR_FLAG_USE_INIT)
2362 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2363 &local_opts);
2364 else
2365 ret = qm_modify_cgr(cgr, 0, &local_opts);
2366 if (ret)
2367 goto out;
2368 }
2369
2370 list_add(&cgr->node, &p->cgr_cbs);
2371
2372
2373 ret = qman_query_cgr(cgr, &cgr_state);
2374 if (ret) {
2375
2376 dev_err(p->config->dev, "CGR HW state partially modified\n");
2377 ret = 0;
2378 goto out;
2379 }
2380 if (cgr->cb && cgr_state.cgr.cscn_en &&
2381 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2382 cgr->cb(p, cgr, 1);
2383out:
2384 spin_unlock(&p->cgr_lock);
2385 put_affine_portal();
2386 return ret;
2387}
2388EXPORT_SYMBOL(qman_create_cgr);
2389
2390int qman_delete_cgr(struct qman_cgr *cgr)
2391{
2392 unsigned long irqflags;
2393 struct qm_mcr_querycgr cgr_state;
2394 struct qm_mcc_initcgr local_opts;
2395 int ret = 0;
2396 struct qman_cgr *i;
2397 struct qman_portal *p = get_affine_portal();
2398
2399 if (cgr->chan != p->config->channel) {
2400
2401 dev_err(p->config->dev, "CGR not owned by current portal");
2402 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2403 cgr->chan, p->config->channel);
2404
2405 ret = -EINVAL;
2406 goto put_portal;
2407 }
2408 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2409 spin_lock_irqsave(&p->cgr_lock, irqflags);
2410 list_del(&cgr->node);
2411
2412
2413
2414
2415 list_for_each_entry(i, &p->cgr_cbs, node)
2416 if (i->cgrid == cgr->cgrid && i->cb)
2417 goto release_lock;
2418 ret = qman_query_cgr(cgr, &cgr_state);
2419 if (ret) {
2420
2421 list_add(&cgr->node, &p->cgr_cbs);
2422 goto release_lock;
2423 }
2424
2425 local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2426 qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2427 be32_to_cpu(cgr_state.cgr.cscn_targ));
2428
2429 ret = qm_modify_cgr(cgr, 0, &local_opts);
2430 if (ret)
2431
2432 list_add(&cgr->node, &p->cgr_cbs);
2433release_lock:
2434 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2435put_portal:
2436 put_affine_portal();
2437 return ret;
2438}
2439EXPORT_SYMBOL(qman_delete_cgr);
2440
2441struct cgr_comp {
2442 struct qman_cgr *cgr;
2443 struct completion completion;
2444};
2445
2446static void qman_delete_cgr_smp_call(void *p)
2447{
2448 qman_delete_cgr((struct qman_cgr *)p);
2449}
2450
2451void qman_delete_cgr_safe(struct qman_cgr *cgr)
2452{
2453 preempt_disable();
2454 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2455 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2456 qman_delete_cgr_smp_call, cgr, true);
2457 preempt_enable();
2458 return;
2459 }
2460
2461 qman_delete_cgr(cgr);
2462 preempt_enable();
2463}
2464EXPORT_SYMBOL(qman_delete_cgr_safe);
2465
2466
2467
2468static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2469{
2470 const union qm_mr_entry *msg;
2471 int found = 0;
2472
2473 qm_mr_pvb_update(p);
2474 msg = qm_mr_current(p);
2475 while (msg) {
2476 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2477 found = 1;
2478 qm_mr_next(p);
2479 qm_mr_cci_consume_to_current(p);
2480 qm_mr_pvb_update(p);
2481 msg = qm_mr_current(p);
2482 }
2483 return found;
2484}
2485
2486static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2487 bool wait)
2488{
2489 const struct qm_dqrr_entry *dqrr;
2490 int found = 0;
2491
2492 do {
2493 qm_dqrr_pvb_update(p);
2494 dqrr = qm_dqrr_current(p);
2495 if (!dqrr)
2496 cpu_relax();
2497 } while (wait && !dqrr);
2498
2499 while (dqrr) {
2500 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2501 found = 1;
2502 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2503 qm_dqrr_pvb_update(p);
2504 qm_dqrr_next(p);
2505 dqrr = qm_dqrr_current(p);
2506 }
2507 return found;
2508}
2509
2510#define qm_mr_drain(p, V) \
2511 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2512
2513#define qm_dqrr_drain(p, f, S) \
2514 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2515
2516#define qm_dqrr_drain_wait(p, f, S) \
2517 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2518
2519#define qm_dqrr_drain_nomatch(p) \
2520 _qm_dqrr_consume_and_match(p, 0, 0, false)
2521
2522static int qman_shutdown_fq(u32 fqid)
2523{
2524 struct qman_portal *p;
2525 struct device *dev;
2526 union qm_mc_command *mcc;
2527 union qm_mc_result *mcr;
2528 int orl_empty, drain = 0, ret = 0;
2529 u32 channel, wq, res;
2530 u8 state;
2531
2532 p = get_affine_portal();
2533 dev = p->config->dev;
2534
2535 mcc = qm_mc_start(&p->p);
2536 qm_fqid_set(&mcc->fq, fqid);
2537 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2538 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2539 dev_err(dev, "QUERYFQ_NP timeout\n");
2540 ret = -ETIMEDOUT;
2541 goto out;
2542 }
2543
2544 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2545 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2546 if (state == QM_MCR_NP_STATE_OOS)
2547 goto out;
2548
2549
2550 mcc = qm_mc_start(&p->p);
2551 qm_fqid_set(&mcc->fq, fqid);
2552 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2553 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2554 dev_err(dev, "QUERYFQ timeout\n");
2555 ret = -ETIMEDOUT;
2556 goto out;
2557 }
2558
2559 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2560
2561 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2562 wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2563
2564 switch (state) {
2565 case QM_MCR_NP_STATE_TEN_SCHED:
2566 case QM_MCR_NP_STATE_TRU_SCHED:
2567 case QM_MCR_NP_STATE_ACTIVE:
2568 case QM_MCR_NP_STATE_PARKED:
2569 orl_empty = 0;
2570 mcc = qm_mc_start(&p->p);
2571 qm_fqid_set(&mcc->fq, fqid);
2572 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2573 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2574 dev_err(dev, "QUERYFQ_NP timeout\n");
2575 ret = -ETIMEDOUT;
2576 goto out;
2577 }
2578 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2579 QM_MCR_VERB_ALTER_RETIRE);
2580 res = mcr->result;
2581
2582 if (res == QM_MCR_RESULT_PENDING) {
2583
2584
2585
2586
2587
2588
2589 int found_fqrn = 0;
2590 u16 dequeue_wq = 0;
2591
2592
2593 drain = 1;
2594
2595 if (channel >= qm_channel_pool1 &&
2596 channel < qm_channel_pool1 + 15) {
2597
2598 dequeue_wq = (channel -
2599 qm_channel_pool1 + 1)<<4 | wq;
2600 } else if (channel < qm_channel_pool1) {
2601
2602 dequeue_wq = wq;
2603 } else {
2604 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2605 fqid, channel);
2606 ret = -EBUSY;
2607 goto out;
2608 }
2609
2610 if (channel < qm_channel_pool1)
2611 qm_dqrr_sdqcr_set(&p->p,
2612 QM_SDQCR_TYPE_ACTIVE |
2613 QM_SDQCR_CHANNELS_DEDICATED);
2614 else
2615 qm_dqrr_sdqcr_set(&p->p,
2616 QM_SDQCR_TYPE_ACTIVE |
2617 QM_SDQCR_CHANNELS_POOL_CONV
2618 (channel));
2619 do {
2620
2621 qm_dqrr_drain_nomatch(&p->p);
2622
2623 found_fqrn = qm_mr_drain(&p->p, FQRN);
2624 cpu_relax();
2625 } while (!found_fqrn);
2626
2627 }
2628 if (res != QM_MCR_RESULT_OK &&
2629 res != QM_MCR_RESULT_PENDING) {
2630 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2631 fqid, res);
2632 ret = -EIO;
2633 goto out;
2634 }
2635 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2636
2637
2638
2639
2640 orl_empty = 1;
2641 }
2642
2643
2644
2645
2646 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2647
2648 do {
2649 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2650
2651 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2652
2653
2654
2655
2656 } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2657 }
2658 qm_dqrr_sdqcr_set(&p->p, 0);
2659
2660 while (!orl_empty) {
2661
2662 orl_empty = qm_mr_drain(&p->p, FQRL);
2663 cpu_relax();
2664 }
2665 mcc = qm_mc_start(&p->p);
2666 qm_fqid_set(&mcc->fq, fqid);
2667 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2668 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2669 ret = -ETIMEDOUT;
2670 goto out;
2671 }
2672
2673 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2674 QM_MCR_VERB_ALTER_OOS);
2675 if (mcr->result != QM_MCR_RESULT_OK) {
2676 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2677 fqid, mcr->result);
2678 ret = -EIO;
2679 goto out;
2680 }
2681 break;
2682
2683 case QM_MCR_NP_STATE_RETIRED:
2684
2685 mcc = qm_mc_start(&p->p);
2686 qm_fqid_set(&mcc->fq, fqid);
2687 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2688 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2689 ret = -ETIMEDOUT;
2690 goto out;
2691 }
2692
2693 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2694 QM_MCR_VERB_ALTER_OOS);
2695 if (mcr->result) {
2696 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2697 fqid, mcr->result);
2698 ret = -EIO;
2699 goto out;
2700 }
2701 break;
2702
2703 case QM_MCR_NP_STATE_OOS:
2704
2705 break;
2706
2707 default:
2708 ret = -EIO;
2709 }
2710
2711out:
2712 put_affine_portal();
2713 return ret;
2714}
2715
2716const struct qm_portal_config *qman_get_qm_portal_config(
2717 struct qman_portal *portal)
2718{
2719 return portal->config;
2720}
2721EXPORT_SYMBOL(qman_get_qm_portal_config);
2722
2723struct gen_pool *qm_fqalloc;
2724struct gen_pool *qm_qpalloc;
2725struct gen_pool *qm_cgralloc;
2726
2727static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2728{
2729 unsigned long addr;
2730
2731 addr = gen_pool_alloc(p, cnt);
2732 if (!addr)
2733 return -ENOMEM;
2734
2735 *result = addr & ~DPAA_GENALLOC_OFF;
2736
2737 return 0;
2738}
2739
2740int qman_alloc_fqid_range(u32 *result, u32 count)
2741{
2742 return qman_alloc_range(qm_fqalloc, result, count);
2743}
2744EXPORT_SYMBOL(qman_alloc_fqid_range);
2745
2746int qman_alloc_pool_range(u32 *result, u32 count)
2747{
2748 return qman_alloc_range(qm_qpalloc, result, count);
2749}
2750EXPORT_SYMBOL(qman_alloc_pool_range);
2751
2752int qman_alloc_cgrid_range(u32 *result, u32 count)
2753{
2754 return qman_alloc_range(qm_cgralloc, result, count);
2755}
2756EXPORT_SYMBOL(qman_alloc_cgrid_range);
2757
2758int qman_release_fqid(u32 fqid)
2759{
2760 int ret = qman_shutdown_fq(fqid);
2761
2762 if (ret) {
2763 pr_debug("FQID %d leaked\n", fqid);
2764 return ret;
2765 }
2766
2767 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2768 return 0;
2769}
2770EXPORT_SYMBOL(qman_release_fqid);
2771
2772static int qpool_cleanup(u32 qp)
2773{
2774
2775
2776
2777
2778
2779
2780 struct qman_fq fq = {
2781 .fqid = QM_FQID_RANGE_START
2782 };
2783 int err;
2784
2785 do {
2786 struct qm_mcr_queryfq_np np;
2787
2788 err = qman_query_fq_np(&fq, &np);
2789 if (err == -ERANGE)
2790
2791 return 0;
2792 else if (WARN_ON(err))
2793 return err;
2794
2795 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2796 struct qm_fqd fqd;
2797
2798 err = qman_query_fq(&fq, &fqd);
2799 if (WARN_ON(err))
2800 return err;
2801 if (qm_fqd_get_chan(&fqd) == qp) {
2802
2803 err = qman_shutdown_fq(fq.fqid);
2804 if (err)
2805
2806
2807
2808
2809 return err;
2810 }
2811 }
2812
2813 fq.fqid++;
2814 } while (1);
2815}
2816
2817int qman_release_pool(u32 qp)
2818{
2819 int ret;
2820
2821 ret = qpool_cleanup(qp);
2822 if (ret) {
2823 pr_debug("CHID %d leaked\n", qp);
2824 return ret;
2825 }
2826
2827 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2828 return 0;
2829}
2830EXPORT_SYMBOL(qman_release_pool);
2831
2832static int cgr_cleanup(u32 cgrid)
2833{
2834
2835
2836
2837
2838 struct qman_fq fq = {
2839 .fqid = QM_FQID_RANGE_START
2840 };
2841 int err;
2842
2843 do {
2844 struct qm_mcr_queryfq_np np;
2845
2846 err = qman_query_fq_np(&fq, &np);
2847 if (err == -ERANGE)
2848
2849 return 0;
2850 else if (WARN_ON(err))
2851 return err;
2852
2853 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2854 struct qm_fqd fqd;
2855
2856 err = qman_query_fq(&fq, &fqd);
2857 if (WARN_ON(err))
2858 return err;
2859 if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
2860 fqd.cgid == cgrid) {
2861 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2862 cgrid, fq.fqid);
2863 return -EIO;
2864 }
2865 }
2866
2867 fq.fqid++;
2868 } while (1);
2869}
2870
2871int qman_release_cgrid(u32 cgrid)
2872{
2873 int ret;
2874
2875 ret = cgr_cleanup(cgrid);
2876 if (ret) {
2877 pr_debug("CGRID %d leaked\n", cgrid);
2878 return ret;
2879 }
2880
2881 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2882 return 0;
2883}
2884EXPORT_SYMBOL(qman_release_cgrid);
2885