1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include "qman_priv.h"
32
33#define DQRR_MAXFILL 15
34#define EQCR_ITHRESH 4
35#define IRQNAME "QMan portal %d"
36#define MAX_IRQNAME 16
37#define QMAN_POLL_LIMIT 32
38#define QMAN_PIRQ_DQRR_ITHRESH 12
39#define QMAN_DQRR_IT_MAX 15
40#define QMAN_ITP_MAX 0xFFF
41#define QMAN_PIRQ_MR_ITHRESH 4
42#define QMAN_PIRQ_IPERIOD 100
43
44
45
46#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
47
48#define QM_REG_EQCR_PI_CINH 0x3000
49#define QM_REG_EQCR_CI_CINH 0x3040
50#define QM_REG_EQCR_ITR 0x3080
51#define QM_REG_DQRR_PI_CINH 0x3100
52#define QM_REG_DQRR_CI_CINH 0x3140
53#define QM_REG_DQRR_ITR 0x3180
54#define QM_REG_DQRR_DCAP 0x31C0
55#define QM_REG_DQRR_SDQCR 0x3200
56#define QM_REG_DQRR_VDQCR 0x3240
57#define QM_REG_DQRR_PDQCR 0x3280
58#define QM_REG_MR_PI_CINH 0x3300
59#define QM_REG_MR_CI_CINH 0x3340
60#define QM_REG_MR_ITR 0x3380
61#define QM_REG_CFG 0x3500
62#define QM_REG_ISR 0x3600
63#define QM_REG_IER 0x3640
64#define QM_REG_ISDR 0x3680
65#define QM_REG_IIR 0x36C0
66#define QM_REG_ITPR 0x3740
67
68
69#define QM_CL_EQCR 0x0000
70#define QM_CL_DQRR 0x1000
71#define QM_CL_MR 0x2000
72#define QM_CL_EQCR_PI_CENA 0x3000
73#define QM_CL_EQCR_CI_CENA 0x3040
74#define QM_CL_DQRR_PI_CENA 0x3100
75#define QM_CL_DQRR_CI_CENA 0x3140
76#define QM_CL_MR_PI_CENA 0x3300
77#define QM_CL_MR_CI_CENA 0x3340
78#define QM_CL_CR 0x3800
79#define QM_CL_RR0 0x3900
80#define QM_CL_RR1 0x3940
81
82#else
83
84#define QM_REG_EQCR_PI_CINH 0x0000
85#define QM_REG_EQCR_CI_CINH 0x0004
86#define QM_REG_EQCR_ITR 0x0008
87#define QM_REG_DQRR_PI_CINH 0x0040
88#define QM_REG_DQRR_CI_CINH 0x0044
89#define QM_REG_DQRR_ITR 0x0048
90#define QM_REG_DQRR_DCAP 0x0050
91#define QM_REG_DQRR_SDQCR 0x0054
92#define QM_REG_DQRR_VDQCR 0x0058
93#define QM_REG_DQRR_PDQCR 0x005c
94#define QM_REG_MR_PI_CINH 0x0080
95#define QM_REG_MR_CI_CINH 0x0084
96#define QM_REG_MR_ITR 0x0088
97#define QM_REG_CFG 0x0100
98#define QM_REG_ISR 0x0e00
99#define QM_REG_IER 0x0e04
100#define QM_REG_ISDR 0x0e08
101#define QM_REG_IIR 0x0e0c
102#define QM_REG_ITPR 0x0e14
103
104
105#define QM_CL_EQCR 0x0000
106#define QM_CL_DQRR 0x1000
107#define QM_CL_MR 0x2000
108#define QM_CL_EQCR_PI_CENA 0x3000
109#define QM_CL_EQCR_CI_CENA 0x3100
110#define QM_CL_DQRR_PI_CENA 0x3200
111#define QM_CL_DQRR_CI_CENA 0x3300
112#define QM_CL_MR_PI_CENA 0x3400
113#define QM_CL_MR_CI_CENA 0x3500
114#define QM_CL_CR 0x3800
115#define QM_CL_RR0 0x3900
116#define QM_CL_RR1 0x3940
117#endif
118
119
120
121
122
123
124
125
126
127
128#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144enum qm_eqcr_pmode {
145 qm_eqcr_pci = 0,
146 qm_eqcr_pce = 1,
147 qm_eqcr_pvb = 2
148};
149enum qm_dqrr_dmode {
150 qm_dqrr_dpush = 0,
151 qm_dqrr_dpull = 1
152};
153enum qm_dqrr_pmode {
154 qm_dqrr_pci,
155 qm_dqrr_pce,
156 qm_dqrr_pvb
157};
158enum qm_dqrr_cmode {
159 qm_dqrr_cci = 0,
160 qm_dqrr_cce = 1,
161 qm_dqrr_cdc = 2
162};
163enum qm_mr_pmode {
164 qm_mr_pci,
165 qm_mr_pce,
166 qm_mr_pvb
167};
168enum qm_mr_cmode {
169 qm_mr_cci = 0,
170 qm_mr_cce = 1
171};
172
173
174
175#define QM_EQCR_SIZE 8
176#define QM_DQRR_SIZE 16
177#define QM_MR_SIZE 8
178
179
180struct qm_eqcr_entry {
181 u8 _ncw_verb;
182 u8 dca;
183 __be16 seqnum;
184 u8 __reserved[4];
185 __be32 fqid;
186 __be32 tag;
187 struct qm_fd fd;
188 u8 __reserved3[32];
189} __packed __aligned(8);
190#define QM_EQCR_VERB_VBIT 0x80
191#define QM_EQCR_VERB_CMD_MASK 0x61
192#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
193#define QM_EQCR_SEQNUM_NESN 0x8000
194#define QM_EQCR_SEQNUM_NLIS 0x4000
195#define QM_EQCR_SEQNUM_SEQMASK 0x3fff
196
197struct qm_eqcr {
198 struct qm_eqcr_entry *ring, *cursor;
199 u8 ci, available, ithresh, vbit;
200#ifdef CONFIG_FSL_DPAA_CHECKING
201 u32 busy;
202 enum qm_eqcr_pmode pmode;
203#endif
204};
205
206struct qm_dqrr {
207 const struct qm_dqrr_entry *ring, *cursor;
208 u8 pi, ci, fill, ithresh, vbit;
209#ifdef CONFIG_FSL_DPAA_CHECKING
210 enum qm_dqrr_dmode dmode;
211 enum qm_dqrr_pmode pmode;
212 enum qm_dqrr_cmode cmode;
213#endif
214};
215
216struct qm_mr {
217 union qm_mr_entry *ring, *cursor;
218 u8 pi, ci, fill, ithresh, vbit;
219#ifdef CONFIG_FSL_DPAA_CHECKING
220 enum qm_mr_pmode pmode;
221 enum qm_mr_cmode cmode;
222#endif
223};
224
225
226
227struct qm_mcc_fq {
228 u8 _ncw_verb;
229 u8 __reserved1[3];
230 __be32 fqid;
231 u8 __reserved2[56];
232} __packed;
233
234
235struct qm_mcc_cgr {
236 u8 _ncw_verb;
237 u8 __reserved1[30];
238 u8 cgid;
239 u8 __reserved2[32];
240};
241
242#define QM_MCC_VERB_VBIT 0x80
243#define QM_MCC_VERB_MASK 0x7f
244#define QM_MCC_VERB_INITFQ_PARKED 0x40
245#define QM_MCC_VERB_INITFQ_SCHED 0x41
246#define QM_MCC_VERB_QUERYFQ 0x44
247#define QM_MCC_VERB_QUERYFQ_NP 0x45
248#define QM_MCC_VERB_QUERYWQ 0x46
249#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
250#define QM_MCC_VERB_ALTER_SCHED 0x48
251#define QM_MCC_VERB_ALTER_FE 0x49
252#define QM_MCC_VERB_ALTER_RETIRE 0x4a
253#define QM_MCC_VERB_ALTER_OOS 0x4b
254#define QM_MCC_VERB_ALTER_FQXON 0x4d
255#define QM_MCC_VERB_ALTER_FQXOFF 0x4e
256#define QM_MCC_VERB_INITCGR 0x50
257#define QM_MCC_VERB_MODIFYCGR 0x51
258#define QM_MCC_VERB_CGRTESTWRITE 0x52
259#define QM_MCC_VERB_QUERYCGR 0x58
260#define QM_MCC_VERB_QUERYCONGESTION 0x59
261union qm_mc_command {
262 struct {
263 u8 _ncw_verb;
264 u8 __reserved[63];
265 };
266 struct qm_mcc_initfq initfq;
267 struct qm_mcc_initcgr initcgr;
268 struct qm_mcc_fq fq;
269 struct qm_mcc_cgr cgr;
270};
271
272
273
274struct qm_mcr_queryfq {
275 u8 verb;
276 u8 result;
277 u8 __reserved1[8];
278 struct qm_fqd fqd;
279 u8 __reserved2[30];
280} __packed;
281
282
283struct qm_mcr_alterfq {
284 u8 verb;
285 u8 result;
286 u8 fqs;
287 u8 __reserved1[61];
288};
289#define QM_MCR_VERB_RRID 0x80
290#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
291#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
292#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
293#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
294#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
295#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
296#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
297#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
298#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
299#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
300#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
301#define QM_MCR_RESULT_NULL 0x00
302#define QM_MCR_RESULT_OK 0xf0
303#define QM_MCR_RESULT_ERR_FQID 0xf1
304#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
305#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3
306#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
307#define QM_MCR_RESULT_PENDING 0xf8
308#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
309#define QM_MCR_FQS_ORLPRESENT 0x02
310#define QM_MCR_FQS_NOTEMPTY 0x01
311#define QM_MCR_TIMEOUT 10000
312union qm_mc_result {
313 struct {
314 u8 verb;
315 u8 result;
316 u8 __reserved1[62];
317 };
318 struct qm_mcr_queryfq queryfq;
319 struct qm_mcr_alterfq alterfq;
320 struct qm_mcr_querycgr querycgr;
321 struct qm_mcr_querycongestion querycongestion;
322 struct qm_mcr_querywq querywq;
323 struct qm_mcr_queryfq_np queryfq_np;
324};
325
326struct qm_mc {
327 union qm_mc_command *cr;
328 union qm_mc_result *rr;
329 u8 rridx, vbit;
330#ifdef CONFIG_FSL_DPAA_CHECKING
331 enum {
332
333 qman_mc_idle,
334
335 qman_mc_user,
336
337 qman_mc_hw
338 } state;
339#endif
340};
341
342struct qm_addr {
343 void *ce;
344 __be32 *ce_be;
345 void __iomem *ci;
346};
347
348struct qm_portal {
349
350
351
352
353
354
355 struct qm_addr addr;
356 struct qm_eqcr eqcr;
357 struct qm_dqrr dqrr;
358 struct qm_mr mr;
359 struct qm_mc mc;
360} ____cacheline_aligned;
361
362
363static inline u32 qm_in(struct qm_portal *p, u32 offset)
364{
365 return ioread32be(p->addr.ci + offset);
366}
367
368static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
369{
370 iowrite32be(val, p->addr.ci + offset);
371}
372
373
374static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
375{
376 dpaa_invalidate(p->addr.ce + offset);
377}
378
379static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
380{
381 dpaa_touch_ro(p->addr.ce + offset);
382}
383
384static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
385{
386 return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
387}
388
389
390
391#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
392#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
393
394
395static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
396{
397 uintptr_t addr = (uintptr_t)p;
398
399 addr &= ~EQCR_CARRY;
400
401 return (struct qm_eqcr_entry *)addr;
402}
403
404
405static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
406{
407 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
408}
409
410
411static inline void eqcr_inc(struct qm_eqcr *eqcr)
412{
413
414 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
415
416 eqcr->cursor = eqcr_carryclear(partial);
417 if (partial != eqcr->cursor)
418 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
419}
420
421static inline int qm_eqcr_init(struct qm_portal *portal,
422 enum qm_eqcr_pmode pmode,
423 unsigned int eq_stash_thresh,
424 int eq_stash_prio)
425{
426 struct qm_eqcr *eqcr = &portal->eqcr;
427 u32 cfg;
428 u8 pi;
429
430 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
431 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
432 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
433 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
434 eqcr->cursor = eqcr->ring + pi;
435 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
436 QM_EQCR_VERB_VBIT : 0;
437 eqcr->available = QM_EQCR_SIZE - 1 -
438 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
439 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
440#ifdef CONFIG_FSL_DPAA_CHECKING
441 eqcr->busy = 0;
442 eqcr->pmode = pmode;
443#endif
444 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
445 (eq_stash_thresh << 28) |
446 (eq_stash_prio << 26) |
447 ((pmode & 0x3) << 24);
448 qm_out(portal, QM_REG_CFG, cfg);
449 return 0;
450}
451
452static inline void qm_eqcr_finish(struct qm_portal *portal)
453{
454 struct qm_eqcr *eqcr = &portal->eqcr;
455 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
456 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
457
458 DPAA_ASSERT(!eqcr->busy);
459 if (pi != eqcr_ptr2idx(eqcr->cursor))
460 pr_crit("losing uncommitted EQCR entries\n");
461 if (ci != eqcr->ci)
462 pr_crit("missing existing EQCR completions\n");
463 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
464 pr_crit("EQCR destroyed unquiesced\n");
465}
466
467static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
468 *portal)
469{
470 struct qm_eqcr *eqcr = &portal->eqcr;
471
472 DPAA_ASSERT(!eqcr->busy);
473 if (!eqcr->available)
474 return NULL;
475
476#ifdef CONFIG_FSL_DPAA_CHECKING
477 eqcr->busy = 1;
478#endif
479 dpaa_zero(eqcr->cursor);
480 return eqcr->cursor;
481}
482
483static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
484 *portal)
485{
486 struct qm_eqcr *eqcr = &portal->eqcr;
487 u8 diff, old_ci;
488
489 DPAA_ASSERT(!eqcr->busy);
490 if (!eqcr->available) {
491 old_ci = eqcr->ci;
492 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
493 (QM_EQCR_SIZE - 1);
494 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
495 eqcr->available += diff;
496 if (!diff)
497 return NULL;
498 }
499#ifdef CONFIG_FSL_DPAA_CHECKING
500 eqcr->busy = 1;
501#endif
502 dpaa_zero(eqcr->cursor);
503 return eqcr->cursor;
504}
505
506static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
507{
508 DPAA_ASSERT(eqcr->busy);
509 DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
510 DPAA_ASSERT(eqcr->available >= 1);
511}
512
513static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
514{
515 struct qm_eqcr *eqcr = &portal->eqcr;
516 struct qm_eqcr_entry *eqcursor;
517
518 eqcr_commit_checks(eqcr);
519 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
520 dma_wmb();
521 eqcursor = eqcr->cursor;
522 eqcursor->_ncw_verb = myverb | eqcr->vbit;
523 dpaa_flush(eqcursor);
524 eqcr_inc(eqcr);
525 eqcr->available--;
526#ifdef CONFIG_FSL_DPAA_CHECKING
527 eqcr->busy = 0;
528#endif
529}
530
531static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
532{
533 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
534}
535
536static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
537{
538 struct qm_eqcr *eqcr = &portal->eqcr;
539 u8 diff, old_ci = eqcr->ci;
540
541 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
542 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
543 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
544 eqcr->available += diff;
545 return diff;
546}
547
548static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
549{
550 struct qm_eqcr *eqcr = &portal->eqcr;
551
552 eqcr->ithresh = ithresh;
553 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
554}
555
556static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
557{
558 struct qm_eqcr *eqcr = &portal->eqcr;
559
560 return eqcr->available;
561}
562
563static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
564{
565 struct qm_eqcr *eqcr = &portal->eqcr;
566
567 return QM_EQCR_SIZE - 1 - eqcr->available;
568}
569
570
571
572#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
573#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
574
575static const struct qm_dqrr_entry *dqrr_carryclear(
576 const struct qm_dqrr_entry *p)
577{
578 uintptr_t addr = (uintptr_t)p;
579
580 addr &= ~DQRR_CARRY;
581
582 return (const struct qm_dqrr_entry *)addr;
583}
584
585static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
586{
587 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
588}
589
590static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
591{
592 return dqrr_carryclear(e + 1);
593}
594
595static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
596{
597 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
598 ((mf & (QM_DQRR_SIZE - 1)) << 20));
599}
600
601static inline int qm_dqrr_init(struct qm_portal *portal,
602 const struct qm_portal_config *config,
603 enum qm_dqrr_dmode dmode,
604 enum qm_dqrr_pmode pmode,
605 enum qm_dqrr_cmode cmode, u8 max_fill)
606{
607 struct qm_dqrr *dqrr = &portal->dqrr;
608 u32 cfg;
609
610
611 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
612 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
613 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
614 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
615 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
616 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
617 dqrr->cursor = dqrr->ring + dqrr->ci;
618 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
619 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
620 QM_DQRR_VERB_VBIT : 0;
621 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
622#ifdef CONFIG_FSL_DPAA_CHECKING
623 dqrr->dmode = dmode;
624 dqrr->pmode = pmode;
625 dqrr->cmode = cmode;
626#endif
627
628 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
629 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
630 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
631 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) |
632 ((dmode & 1) << 18) |
633 ((cmode & 3) << 16) |
634 0xa0 |
635 (0 ? 0x40 : 0) |
636 (0 ? 0x10 : 0);
637 qm_out(portal, QM_REG_CFG, cfg);
638 qm_dqrr_set_maxfill(portal, max_fill);
639 return 0;
640}
641
642static inline void qm_dqrr_finish(struct qm_portal *portal)
643{
644#ifdef CONFIG_FSL_DPAA_CHECKING
645 struct qm_dqrr *dqrr = &portal->dqrr;
646
647 if (dqrr->cmode != qm_dqrr_cdc &&
648 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
649 pr_crit("Ignoring completed DQRR entries\n");
650#endif
651}
652
653static inline const struct qm_dqrr_entry *qm_dqrr_current(
654 struct qm_portal *portal)
655{
656 struct qm_dqrr *dqrr = &portal->dqrr;
657
658 if (!dqrr->fill)
659 return NULL;
660 return dqrr->cursor;
661}
662
663static inline u8 qm_dqrr_next(struct qm_portal *portal)
664{
665 struct qm_dqrr *dqrr = &portal->dqrr;
666
667 DPAA_ASSERT(dqrr->fill);
668 dqrr->cursor = dqrr_inc(dqrr->cursor);
669 return --dqrr->fill;
670}
671
672static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
673{
674 struct qm_dqrr *dqrr = &portal->dqrr;
675 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
676
677 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
678#ifndef CONFIG_FSL_PAMU
679
680
681
682
683 dpaa_invalidate_touch_ro(res);
684#endif
685 if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
686 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
687 if (!dqrr->pi)
688 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
689 dqrr->fill++;
690 }
691}
692
693static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
694 const struct qm_dqrr_entry *dq,
695 int park)
696{
697 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
698 int idx = dqrr_ptr2idx(dq);
699
700 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
701 DPAA_ASSERT((dqrr->ring + idx) == dq);
702 DPAA_ASSERT(idx < QM_DQRR_SIZE);
703 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) |
704 ((park ? 1 : 0) << 6) |
705 idx);
706}
707
708static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
709{
710 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
711
712 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
713 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) |
714 (bitmask << 16));
715}
716
717static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
718{
719 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
720}
721
722static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
723{
724 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
725}
726
727static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
728{
729
730 if (ithresh > QMAN_DQRR_IT_MAX)
731 return -EINVAL;
732
733 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
734
735 return 0;
736}
737
738
739
740#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
741#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
742
743static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
744{
745 uintptr_t addr = (uintptr_t)p;
746
747 addr &= ~MR_CARRY;
748
749 return (union qm_mr_entry *)addr;
750}
751
752static inline int mr_ptr2idx(const union qm_mr_entry *e)
753{
754 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
755}
756
757static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
758{
759 return mr_carryclear(e + 1);
760}
761
762static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
763 enum qm_mr_cmode cmode)
764{
765 struct qm_mr *mr = &portal->mr;
766 u32 cfg;
767
768 mr->ring = portal->addr.ce + QM_CL_MR;
769 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
770 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
771 mr->cursor = mr->ring + mr->ci;
772 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
773 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
774 ? QM_MR_VERB_VBIT : 0;
775 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
776#ifdef CONFIG_FSL_DPAA_CHECKING
777 mr->pmode = pmode;
778 mr->cmode = cmode;
779#endif
780 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
781 ((cmode & 1) << 8);
782 qm_out(portal, QM_REG_CFG, cfg);
783 return 0;
784}
785
786static inline void qm_mr_finish(struct qm_portal *portal)
787{
788 struct qm_mr *mr = &portal->mr;
789
790 if (mr->ci != mr_ptr2idx(mr->cursor))
791 pr_crit("Ignoring completed MR entries\n");
792}
793
794static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
795{
796 struct qm_mr *mr = &portal->mr;
797
798 if (!mr->fill)
799 return NULL;
800 return mr->cursor;
801}
802
803static inline int qm_mr_next(struct qm_portal *portal)
804{
805 struct qm_mr *mr = &portal->mr;
806
807 DPAA_ASSERT(mr->fill);
808 mr->cursor = mr_inc(mr->cursor);
809 return --mr->fill;
810}
811
812static inline void qm_mr_pvb_update(struct qm_portal *portal)
813{
814 struct qm_mr *mr = &portal->mr;
815 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
816
817 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
818
819 if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
820 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
821 if (!mr->pi)
822 mr->vbit ^= QM_MR_VERB_VBIT;
823 mr->fill++;
824 res = mr_inc(res);
825 }
826 dpaa_invalidate_touch_ro(res);
827}
828
829static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
830{
831 struct qm_mr *mr = &portal->mr;
832
833 DPAA_ASSERT(mr->cmode == qm_mr_cci);
834 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
835 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
836}
837
838static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
839{
840 struct qm_mr *mr = &portal->mr;
841
842 DPAA_ASSERT(mr->cmode == qm_mr_cci);
843 mr->ci = mr_ptr2idx(mr->cursor);
844 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
845}
846
847static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
848{
849 qm_out(portal, QM_REG_MR_ITR, ithresh);
850}
851
852
853
854static inline int qm_mc_init(struct qm_portal *portal)
855{
856 u8 rr0, rr1;
857 struct qm_mc *mc = &portal->mc;
858
859 mc->cr = portal->addr.ce + QM_CL_CR;
860 mc->rr = portal->addr.ce + QM_CL_RR0;
861
862
863
864
865
866
867
868 rr0 = mc->rr->verb;
869 rr1 = (mc->rr+1)->verb;
870 if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
871 mc->rridx = 1;
872 else
873 mc->rridx = 0;
874 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
875#ifdef CONFIG_FSL_DPAA_CHECKING
876 mc->state = qman_mc_idle;
877#endif
878 return 0;
879}
880
881static inline void qm_mc_finish(struct qm_portal *portal)
882{
883#ifdef CONFIG_FSL_DPAA_CHECKING
884 struct qm_mc *mc = &portal->mc;
885
886 DPAA_ASSERT(mc->state == qman_mc_idle);
887 if (mc->state != qman_mc_idle)
888 pr_crit("Losing incomplete MC command\n");
889#endif
890}
891
892static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
893{
894 struct qm_mc *mc = &portal->mc;
895
896 DPAA_ASSERT(mc->state == qman_mc_idle);
897#ifdef CONFIG_FSL_DPAA_CHECKING
898 mc->state = qman_mc_user;
899#endif
900 dpaa_zero(mc->cr);
901 return mc->cr;
902}
903
904static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
905{
906 struct qm_mc *mc = &portal->mc;
907 union qm_mc_result *rr = mc->rr + mc->rridx;
908
909 DPAA_ASSERT(mc->state == qman_mc_user);
910 dma_wmb();
911 mc->cr->_ncw_verb = myverb | mc->vbit;
912 dpaa_flush(mc->cr);
913 dpaa_invalidate_touch_ro(rr);
914#ifdef CONFIG_FSL_DPAA_CHECKING
915 mc->state = qman_mc_hw;
916#endif
917}
918
919static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
920{
921 struct qm_mc *mc = &portal->mc;
922 union qm_mc_result *rr = mc->rr + mc->rridx;
923
924 DPAA_ASSERT(mc->state == qman_mc_hw);
925
926
927
928
929
930 if (!rr->verb) {
931 dpaa_invalidate_touch_ro(rr);
932 return NULL;
933 }
934 mc->rridx ^= 1;
935 mc->vbit ^= QM_MCC_VERB_VBIT;
936#ifdef CONFIG_FSL_DPAA_CHECKING
937 mc->state = qman_mc_idle;
938#endif
939 return rr;
940}
941
942static inline int qm_mc_result_timeout(struct qm_portal *portal,
943 union qm_mc_result **mcr)
944{
945 int timeout = QM_MCR_TIMEOUT;
946
947 do {
948 *mcr = qm_mc_result(portal);
949 if (*mcr)
950 break;
951 udelay(1);
952 } while (--timeout);
953
954 return timeout;
955}
956
957static inline void fq_set(struct qman_fq *fq, u32 mask)
958{
959 fq->flags |= mask;
960}
961
962static inline void fq_clear(struct qman_fq *fq, u32 mask)
963{
964 fq->flags &= ~mask;
965}
966
967static inline int fq_isset(struct qman_fq *fq, u32 mask)
968{
969 return fq->flags & mask;
970}
971
972static inline int fq_isclear(struct qman_fq *fq, u32 mask)
973{
974 return !(fq->flags & mask);
975}
976
977struct qman_portal {
978 struct qm_portal p;
979
980 unsigned long bits;
981
982 unsigned long irq_sources;
983 u32 use_eqcr_ci_stashing;
984
985 struct qman_fq *vdqcr_owned;
986 u32 sdqcr;
987
988 const struct qm_portal_config *config;
989
990 struct qman_cgrs *cgrs;
991
992 struct list_head cgr_cbs;
993
994 spinlock_t cgr_lock;
995 struct work_struct congestion_work;
996 struct work_struct mr_work;
997 char irqname[MAX_IRQNAME];
998};
999
1000static cpumask_t affine_mask;
1001static DEFINE_SPINLOCK(affine_mask_lock);
1002static u16 affine_channels[NR_CPUS];
1003static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
1004struct qman_portal *affine_portals[NR_CPUS];
1005
1006static inline struct qman_portal *get_affine_portal(void)
1007{
1008 return &get_cpu_var(qman_affine_portal);
1009}
1010
1011static inline void put_affine_portal(void)
1012{
1013 put_cpu_var(qman_affine_portal);
1014}
1015
1016
1017static inline struct qman_portal *get_portal_for_channel(u16 channel)
1018{
1019 int i;
1020
1021 for (i = 0; i < num_possible_cpus(); i++) {
1022 if (affine_portals[i] &&
1023 affine_portals[i]->config->channel == channel)
1024 return affine_portals[i];
1025 }
1026
1027 return NULL;
1028}
1029
1030static struct workqueue_struct *qm_portal_wq;
1031
1032int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
1033{
1034 int res;
1035
1036 if (!portal)
1037 return -EINVAL;
1038
1039 res = qm_dqrr_set_ithresh(&portal->p, ithresh);
1040 if (res)
1041 return res;
1042
1043 portal->p.dqrr.ithresh = ithresh;
1044
1045 return 0;
1046}
1047EXPORT_SYMBOL(qman_dqrr_set_ithresh);
1048
1049void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
1050{
1051 if (portal && ithresh)
1052 *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
1053}
1054EXPORT_SYMBOL(qman_dqrr_get_ithresh);
1055
1056void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
1057{
1058 if (portal && iperiod)
1059 *iperiod = qm_in(&portal->p, QM_REG_ITPR);
1060}
1061EXPORT_SYMBOL(qman_portal_get_iperiod);
1062
1063int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
1064{
1065 if (!portal || iperiod > QMAN_ITP_MAX)
1066 return -EINVAL;
1067
1068 qm_out(&portal->p, QM_REG_ITPR, iperiod);
1069
1070 return 0;
1071}
1072EXPORT_SYMBOL(qman_portal_set_iperiod);
1073
1074int qman_wq_alloc(void)
1075{
1076 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
1077 if (!qm_portal_wq)
1078 return -ENOMEM;
1079 return 0;
1080}
1081
1082
1083void qman_enable_irqs(void)
1084{
1085 int i;
1086
1087 for (i = 0; i < num_possible_cpus(); i++) {
1088 if (affine_portals[i]) {
1089 qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
1090 qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
1091 }
1092
1093 }
1094}
1095
1096
1097
1098
1099
1100static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1101
1102static struct qman_fq **fq_table;
1103static u32 num_fqids;
1104
1105int qman_alloc_fq_table(u32 _num_fqids)
1106{
1107 num_fqids = _num_fqids;
1108
1109 fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
1110 num_fqids, 2));
1111 if (!fq_table)
1112 return -ENOMEM;
1113
1114 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1115 fq_table, num_fqids * 2);
1116 return 0;
1117}
1118
1119static struct qman_fq *idx_to_fq(u32 idx)
1120{
1121 struct qman_fq *fq;
1122
1123#ifdef CONFIG_FSL_DPAA_CHECKING
1124 if (WARN_ON(idx >= num_fqids * 2))
1125 return NULL;
1126#endif
1127 fq = fq_table[idx];
1128 DPAA_ASSERT(!fq || idx == fq->idx);
1129
1130 return fq;
1131}
1132
1133
1134
1135
1136
1137static struct qman_fq *fqid_to_fq(u32 fqid)
1138{
1139 return idx_to_fq(fqid * 2);
1140}
1141
1142static struct qman_fq *tag_to_fq(u32 tag)
1143{
1144#if BITS_PER_LONG == 64
1145 return idx_to_fq(tag);
1146#else
1147 return (struct qman_fq *)tag;
1148#endif
1149}
1150
1151static u32 fq_to_tag(struct qman_fq *fq)
1152{
1153#if BITS_PER_LONG == 64
1154 return fq->idx;
1155#else
1156 return (u32)fq;
1157#endif
1158}
1159
1160static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1161static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1162 unsigned int poll_limit, bool sched_napi);
1163static void qm_congestion_task(struct work_struct *work);
1164static void qm_mr_process_task(struct work_struct *work);
1165
1166static irqreturn_t portal_isr(int irq, void *ptr)
1167{
1168 struct qman_portal *p = ptr;
1169 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1170 u32 clear = 0;
1171
1172 if (unlikely(!is))
1173 return IRQ_NONE;
1174
1175
1176 if (is & QM_PIRQ_DQRI) {
1177 __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
1178 clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
1179 }
1180
1181 clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1182 qm_out(&p->p, QM_REG_ISR, clear);
1183 return IRQ_HANDLED;
1184}
1185
1186static int drain_mr_fqrni(struct qm_portal *p)
1187{
1188 const union qm_mr_entry *msg;
1189loop:
1190 qm_mr_pvb_update(p);
1191 msg = qm_mr_current(p);
1192 if (!msg) {
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 mdelay(1);
1208 qm_mr_pvb_update(p);
1209 msg = qm_mr_current(p);
1210 if (!msg)
1211 return 0;
1212 }
1213 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1214
1215 pr_err("Found verb 0x%x in MR\n", msg->verb);
1216 return -1;
1217 }
1218 qm_mr_next(p);
1219 qm_mr_cci_consume(p, 1);
1220 goto loop;
1221}
1222
1223static int qman_create_portal(struct qman_portal *portal,
1224 const struct qm_portal_config *c,
1225 const struct qman_cgrs *cgrs)
1226{
1227 struct qm_portal *p;
1228 int ret;
1229 u32 isdr;
1230
1231 p = &portal->p;
1232
1233#ifdef CONFIG_FSL_PAMU
1234
1235 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1236#else
1237 portal->use_eqcr_ci_stashing = 0;
1238#endif
1239
1240
1241
1242
1243
1244 p->addr.ce = c->addr_virt_ce;
1245 p->addr.ce_be = c->addr_virt_ce;
1246 p->addr.ci = c->addr_virt_ci;
1247
1248
1249
1250
1251 if (qm_eqcr_init(p, qm_eqcr_pvb,
1252 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1253 dev_err(c->dev, "EQCR initialisation failed\n");
1254 goto fail_eqcr;
1255 }
1256 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1257 qm_dqrr_cdc, DQRR_MAXFILL)) {
1258 dev_err(c->dev, "DQRR initialisation failed\n");
1259 goto fail_dqrr;
1260 }
1261 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1262 dev_err(c->dev, "MR initialisation failed\n");
1263 goto fail_mr;
1264 }
1265 if (qm_mc_init(p)) {
1266 dev_err(c->dev, "MC initialisation failed\n");
1267 goto fail_mc;
1268 }
1269
1270 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1271 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1272 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1273 portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
1274 if (!portal->cgrs)
1275 goto fail_cgrs;
1276
1277 qman_cgrs_init(&portal->cgrs[1]);
1278 if (cgrs)
1279 portal->cgrs[0] = *cgrs;
1280 else
1281
1282 qman_cgrs_fill(&portal->cgrs[0]);
1283 INIT_LIST_HEAD(&portal->cgr_cbs);
1284 spin_lock_init(&portal->cgr_lock);
1285 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1286 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1287 portal->bits = 0;
1288 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1289 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1290 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1291 isdr = 0xffffffff;
1292 qm_out(p, QM_REG_ISDR, isdr);
1293 portal->irq_sources = 0;
1294 qm_out(p, QM_REG_IER, 0);
1295 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1296 qm_out(p, QM_REG_IIR, 1);
1297 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1298 dev_err(c->dev, "request_irq() failed\n");
1299 goto fail_irq;
1300 }
1301
1302 if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
1303 goto fail_affinity;
1304
1305
1306 isdr &= ~QM_PIRQ_EQCI;
1307 qm_out(p, QM_REG_ISDR, isdr);
1308 ret = qm_eqcr_get_fill(p);
1309 if (ret) {
1310 dev_err(c->dev, "EQCR unclean\n");
1311 goto fail_eqcr_empty;
1312 }
1313 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1314 qm_out(p, QM_REG_ISDR, isdr);
1315 if (qm_dqrr_current(p)) {
1316 dev_dbg(c->dev, "DQRR unclean\n");
1317 qm_dqrr_cdc_consume_n(p, 0xffff);
1318 }
1319 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1320
1321 const union qm_mr_entry *e = qm_mr_current(p);
1322
1323 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1324 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1325 goto fail_dqrr_mr_empty;
1326 }
1327
1328 portal->config = c;
1329 qm_out(p, QM_REG_ISR, 0xffffffff);
1330 qm_out(p, QM_REG_ISDR, 0);
1331 if (!qman_requires_cleanup())
1332 qm_out(p, QM_REG_IIR, 0);
1333
1334 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1335 return 0;
1336
1337fail_dqrr_mr_empty:
1338fail_eqcr_empty:
1339fail_affinity:
1340 free_irq(c->irq, portal);
1341fail_irq:
1342 kfree(portal->cgrs);
1343fail_cgrs:
1344 qm_mc_finish(p);
1345fail_mc:
1346 qm_mr_finish(p);
1347fail_mr:
1348 qm_dqrr_finish(p);
1349fail_dqrr:
1350 qm_eqcr_finish(p);
1351fail_eqcr:
1352 return -EIO;
1353}
1354
1355struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1356 const struct qman_cgrs *cgrs)
1357{
1358 struct qman_portal *portal;
1359 int err;
1360
1361 portal = &per_cpu(qman_affine_portal, c->cpu);
1362 err = qman_create_portal(portal, c, cgrs);
1363 if (err)
1364 return NULL;
1365
1366 spin_lock(&affine_mask_lock);
1367 cpumask_set_cpu(c->cpu, &affine_mask);
1368 affine_channels[c->cpu] = c->channel;
1369 affine_portals[c->cpu] = portal;
1370 spin_unlock(&affine_mask_lock);
1371
1372 return portal;
1373}
1374
1375static void qman_destroy_portal(struct qman_portal *qm)
1376{
1377 const struct qm_portal_config *pcfg;
1378
1379
1380 qm_dqrr_sdqcr_set(&qm->p, 0);
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 qm_eqcr_cce_update(&qm->p);
1392 qm_eqcr_cce_update(&qm->p);
1393 pcfg = qm->config;
1394
1395 free_irq(pcfg->irq, qm);
1396
1397 kfree(qm->cgrs);
1398 qm_mc_finish(&qm->p);
1399 qm_mr_finish(&qm->p);
1400 qm_dqrr_finish(&qm->p);
1401 qm_eqcr_finish(&qm->p);
1402
1403 qm->config = NULL;
1404}
1405
1406const struct qm_portal_config *qman_destroy_affine_portal(void)
1407{
1408 struct qman_portal *qm = get_affine_portal();
1409 const struct qm_portal_config *pcfg;
1410 int cpu;
1411
1412 pcfg = qm->config;
1413 cpu = pcfg->cpu;
1414
1415 qman_destroy_portal(qm);
1416
1417 spin_lock(&affine_mask_lock);
1418 cpumask_clear_cpu(cpu, &affine_mask);
1419 spin_unlock(&affine_mask_lock);
1420 put_affine_portal();
1421 return pcfg;
1422}
1423
1424
1425static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1426 const union qm_mr_entry *msg, u8 verb)
1427{
1428 switch (verb) {
1429 case QM_MR_VERB_FQRL:
1430 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1431 fq_clear(fq, QMAN_FQ_STATE_ORL);
1432 break;
1433 case QM_MR_VERB_FQRN:
1434 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1435 fq->state == qman_fq_state_sched);
1436 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1437 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1438 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1439 fq_set(fq, QMAN_FQ_STATE_NE);
1440 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1441 fq_set(fq, QMAN_FQ_STATE_ORL);
1442 fq->state = qman_fq_state_retired;
1443 break;
1444 case QM_MR_VERB_FQPN:
1445 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1446 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1447 fq->state = qman_fq_state_parked;
1448 }
1449}
1450
1451static void qm_congestion_task(struct work_struct *work)
1452{
1453 struct qman_portal *p = container_of(work, struct qman_portal,
1454 congestion_work);
1455 struct qman_cgrs rr, c;
1456 union qm_mc_result *mcr;
1457 struct qman_cgr *cgr;
1458
1459 spin_lock(&p->cgr_lock);
1460 qm_mc_start(&p->p);
1461 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1462 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1463 spin_unlock(&p->cgr_lock);
1464 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1465 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1466 return;
1467 }
1468
1469 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1470 &p->cgrs[0]);
1471
1472 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1473
1474 qman_cgrs_cp(&p->cgrs[1], &rr);
1475
1476 list_for_each_entry(cgr, &p->cgr_cbs, node)
1477 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1478 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1479 spin_unlock(&p->cgr_lock);
1480 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1481}
1482
1483static void qm_mr_process_task(struct work_struct *work)
1484{
1485 struct qman_portal *p = container_of(work, struct qman_portal,
1486 mr_work);
1487 const union qm_mr_entry *msg;
1488 struct qman_fq *fq;
1489 u8 verb, num = 0;
1490
1491 preempt_disable();
1492
1493 while (1) {
1494 qm_mr_pvb_update(&p->p);
1495 msg = qm_mr_current(&p->p);
1496 if (!msg)
1497 break;
1498
1499 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1500
1501 if (verb & 0x20) {
1502 switch (verb) {
1503 case QM_MR_VERB_FQRNI:
1504
1505 break;
1506 case QM_MR_VERB_FQRN:
1507 case QM_MR_VERB_FQRL:
1508
1509 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1510 if (WARN_ON(!fq))
1511 break;
1512 fq_state_change(p, fq, msg, verb);
1513 if (fq->cb.fqs)
1514 fq->cb.fqs(p, fq, msg);
1515 break;
1516 case QM_MR_VERB_FQPN:
1517
1518 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1519 fq_state_change(p, fq, msg, verb);
1520 if (fq->cb.fqs)
1521 fq->cb.fqs(p, fq, msg);
1522 break;
1523 case QM_MR_VERB_DC_ERN:
1524
1525 pr_crit_once("Leaking DCP ERNs!\n");
1526 break;
1527 default:
1528 pr_crit("Invalid MR verb 0x%02x\n", verb);
1529 }
1530 } else {
1531
1532 fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1533 fq->cb.ern(p, fq, msg);
1534 }
1535 num++;
1536 qm_mr_next(&p->p);
1537 }
1538
1539 qm_mr_cci_consume(&p->p, num);
1540 qman_p_irqsource_add(p, QM_PIRQ_MRI);
1541 preempt_enable();
1542}
1543
1544static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1545{
1546 if (is & QM_PIRQ_CSCI) {
1547 qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1548 queue_work_on(smp_processor_id(), qm_portal_wq,
1549 &p->congestion_work);
1550 }
1551
1552 if (is & QM_PIRQ_EQRI) {
1553 qm_eqcr_cce_update(&p->p);
1554 qm_eqcr_set_ithresh(&p->p, 0);
1555 wake_up(&affine_queue);
1556 }
1557
1558 if (is & QM_PIRQ_MRI) {
1559 qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1560 queue_work_on(smp_processor_id(), qm_portal_wq,
1561 &p->mr_work);
1562 }
1563
1564 return is;
1565}
1566
1567
1568
1569
1570
1571static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1572{
1573 p->vdqcr_owned = NULL;
1574 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1575 wake_up(&affine_queue);
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1605 unsigned int poll_limit, bool sched_napi)
1606{
1607 const struct qm_dqrr_entry *dq;
1608 struct qman_fq *fq;
1609 enum qman_cb_dqrr_result res;
1610 unsigned int limit = 0;
1611
1612 do {
1613 qm_dqrr_pvb_update(&p->p);
1614 dq = qm_dqrr_current(&p->p);
1615 if (!dq)
1616 break;
1617
1618 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1619
1620
1621
1622
1623
1624 fq = p->vdqcr_owned;
1625
1626
1627
1628
1629
1630
1631 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1632 fq_clear(fq, QMAN_FQ_STATE_NE);
1633
1634
1635
1636
1637
1638
1639 res = fq->cb.dqrr(p, fq, dq, sched_napi);
1640 if (res == qman_cb_dqrr_stop)
1641 break;
1642
1643 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1644 clear_vdqcr(p, fq);
1645 } else {
1646
1647 fq = tag_to_fq(be32_to_cpu(dq->context_b));
1648
1649 res = fq->cb.dqrr(p, fq, dq, sched_napi);
1650
1651
1652
1653
1654 if (res == qman_cb_dqrr_stop)
1655 break;
1656 }
1657
1658
1659
1660
1661
1662
1663 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1664 (res != qman_cb_dqrr_park));
1665
1666 if (res != qman_cb_dqrr_defer)
1667 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1668 res == qman_cb_dqrr_park);
1669
1670 qm_dqrr_next(&p->p);
1671
1672
1673
1674
1675
1676
1677 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1678
1679 return limit;
1680}
1681
1682void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1683{
1684 unsigned long irqflags;
1685
1686 local_irq_save(irqflags);
1687 p->irq_sources |= bits & QM_PIRQ_VISIBLE;
1688 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1689 local_irq_restore(irqflags);
1690}
1691EXPORT_SYMBOL(qman_p_irqsource_add);
1692
1693void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1694{
1695 unsigned long irqflags;
1696 u32 ier;
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 local_irq_save(irqflags);
1709 bits &= QM_PIRQ_VISIBLE;
1710 p->irq_sources &= ~bits;
1711 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1712 ier = qm_in(&p->p, QM_REG_IER);
1713
1714
1715
1716
1717 qm_out(&p->p, QM_REG_ISR, ~ier);
1718 local_irq_restore(irqflags);
1719}
1720EXPORT_SYMBOL(qman_p_irqsource_remove);
1721
1722const cpumask_t *qman_affine_cpus(void)
1723{
1724 return &affine_mask;
1725}
1726EXPORT_SYMBOL(qman_affine_cpus);
1727
1728u16 qman_affine_channel(int cpu)
1729{
1730 if (cpu < 0) {
1731 struct qman_portal *portal = get_affine_portal();
1732
1733 cpu = portal->config->cpu;
1734 put_affine_portal();
1735 }
1736 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1737 return affine_channels[cpu];
1738}
1739EXPORT_SYMBOL(qman_affine_channel);
1740
1741struct qman_portal *qman_get_affine_portal(int cpu)
1742{
1743 return affine_portals[cpu];
1744}
1745EXPORT_SYMBOL(qman_get_affine_portal);
1746
1747int qman_start_using_portal(struct qman_portal *p, struct device *dev)
1748{
1749 return (!device_link_add(dev, p->config->dev,
1750 DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
1751}
1752EXPORT_SYMBOL(qman_start_using_portal);
1753
1754int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1755{
1756 return __poll_portal_fast(p, limit, false);
1757}
1758EXPORT_SYMBOL(qman_p_poll_dqrr);
1759
1760void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1761{
1762 unsigned long irqflags;
1763
1764 local_irq_save(irqflags);
1765 pools &= p->config->pools;
1766 p->sdqcr |= pools;
1767 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1768 local_irq_restore(irqflags);
1769}
1770EXPORT_SYMBOL(qman_p_static_dequeue_add);
1771
1772
1773
1774static const char *mcr_result_str(u8 result)
1775{
1776 switch (result) {
1777 case QM_MCR_RESULT_NULL:
1778 return "QM_MCR_RESULT_NULL";
1779 case QM_MCR_RESULT_OK:
1780 return "QM_MCR_RESULT_OK";
1781 case QM_MCR_RESULT_ERR_FQID:
1782 return "QM_MCR_RESULT_ERR_FQID";
1783 case QM_MCR_RESULT_ERR_FQSTATE:
1784 return "QM_MCR_RESULT_ERR_FQSTATE";
1785 case QM_MCR_RESULT_ERR_NOTEMPTY:
1786 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1787 case QM_MCR_RESULT_PENDING:
1788 return "QM_MCR_RESULT_PENDING";
1789 case QM_MCR_RESULT_ERR_BADCOMMAND:
1790 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1791 }
1792 return "<unknown MCR result>";
1793}
1794
1795int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1796{
1797 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1798 int ret = qman_alloc_fqid(&fqid);
1799
1800 if (ret)
1801 return ret;
1802 }
1803 fq->fqid = fqid;
1804 fq->flags = flags;
1805 fq->state = qman_fq_state_oos;
1806 fq->cgr_groupid = 0;
1807
1808
1809 if (fqid == 0 || fqid >= num_fqids) {
1810 WARN(1, "bad fqid %d\n", fqid);
1811 return -EINVAL;
1812 }
1813
1814 fq->idx = fqid * 2;
1815 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1816 fq->idx++;
1817
1818 WARN_ON(fq_table[fq->idx]);
1819 fq_table[fq->idx] = fq;
1820
1821 return 0;
1822}
1823EXPORT_SYMBOL(qman_create_fq);
1824
1825void qman_destroy_fq(struct qman_fq *fq)
1826{
1827
1828
1829
1830
1831 switch (fq->state) {
1832 case qman_fq_state_parked:
1833 case qman_fq_state_oos:
1834 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1835 qman_release_fqid(fq->fqid);
1836
1837 DPAA_ASSERT(fq_table[fq->idx]);
1838 fq_table[fq->idx] = NULL;
1839 return;
1840 default:
1841 break;
1842 }
1843 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1844}
1845EXPORT_SYMBOL(qman_destroy_fq);
1846
1847u32 qman_fq_fqid(struct qman_fq *fq)
1848{
1849 return fq->fqid;
1850}
1851EXPORT_SYMBOL(qman_fq_fqid);
1852
1853int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1854{
1855 union qm_mc_command *mcc;
1856 union qm_mc_result *mcr;
1857 struct qman_portal *p;
1858 u8 res, myverb;
1859 int ret = 0;
1860
1861 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1862 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1863
1864 if (fq->state != qman_fq_state_oos &&
1865 fq->state != qman_fq_state_parked)
1866 return -EINVAL;
1867#ifdef CONFIG_FSL_DPAA_CHECKING
1868 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1869 return -EINVAL;
1870#endif
1871 if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1872
1873 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1874 return -EINVAL;
1875 }
1876
1877 p = get_affine_portal();
1878 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1879 (fq->state != qman_fq_state_oos &&
1880 fq->state != qman_fq_state_parked)) {
1881 ret = -EBUSY;
1882 goto out;
1883 }
1884 mcc = qm_mc_start(&p->p);
1885 if (opts)
1886 mcc->initfq = *opts;
1887 qm_fqid_set(&mcc->fq, fq->fqid);
1888 mcc->initfq.count = 0;
1889
1890
1891
1892
1893
1894 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1895 dma_addr_t phys_fq;
1896
1897 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1898 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1899
1900
1901
1902
1903 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1904 QM_INITFQ_WE_CONTEXTA)) {
1905 mcc->initfq.we_mask |=
1906 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1907 memset(&mcc->initfq.fqd.context_a, 0,
1908 sizeof(mcc->initfq.fqd.context_a));
1909 } else {
1910 struct qman_portal *p = qman_dma_portal;
1911
1912 phys_fq = dma_map_single(p->config->dev, fq,
1913 sizeof(*fq), DMA_TO_DEVICE);
1914 if (dma_mapping_error(p->config->dev, phys_fq)) {
1915 dev_err(p->config->dev, "dma_mapping failed\n");
1916 ret = -EIO;
1917 goto out;
1918 }
1919
1920 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1921 }
1922 }
1923 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1924 int wq = 0;
1925
1926 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1927 QM_INITFQ_WE_DESTWQ)) {
1928 mcc->initfq.we_mask |=
1929 cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1930 wq = 4;
1931 }
1932 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1933 }
1934 qm_mc_commit(&p->p, myverb);
1935 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1936 dev_err(p->config->dev, "MCR timeout\n");
1937 ret = -ETIMEDOUT;
1938 goto out;
1939 }
1940
1941 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1942 res = mcr->result;
1943 if (res != QM_MCR_RESULT_OK) {
1944 ret = -EIO;
1945 goto out;
1946 }
1947 if (opts) {
1948 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1949 if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1950 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1951 else
1952 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1953 }
1954 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1955 fq->cgr_groupid = opts->fqd.cgid;
1956 }
1957 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1958 qman_fq_state_sched : qman_fq_state_parked;
1959
1960out:
1961 put_affine_portal();
1962 return ret;
1963}
1964EXPORT_SYMBOL(qman_init_fq);
1965
1966int qman_schedule_fq(struct qman_fq *fq)
1967{
1968 union qm_mc_command *mcc;
1969 union qm_mc_result *mcr;
1970 struct qman_portal *p;
1971 int ret = 0;
1972
1973 if (fq->state != qman_fq_state_parked)
1974 return -EINVAL;
1975#ifdef CONFIG_FSL_DPAA_CHECKING
1976 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1977 return -EINVAL;
1978#endif
1979
1980 p = get_affine_portal();
1981 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1982 fq->state != qman_fq_state_parked) {
1983 ret = -EBUSY;
1984 goto out;
1985 }
1986 mcc = qm_mc_start(&p->p);
1987 qm_fqid_set(&mcc->fq, fq->fqid);
1988 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1989 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1990 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1991 ret = -ETIMEDOUT;
1992 goto out;
1993 }
1994
1995 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1996 if (mcr->result != QM_MCR_RESULT_OK) {
1997 ret = -EIO;
1998 goto out;
1999 }
2000 fq->state = qman_fq_state_sched;
2001out:
2002 put_affine_portal();
2003 return ret;
2004}
2005EXPORT_SYMBOL(qman_schedule_fq);
2006
2007int qman_retire_fq(struct qman_fq *fq, u32 *flags)
2008{
2009 union qm_mc_command *mcc;
2010 union qm_mc_result *mcr;
2011 struct qman_portal *p;
2012 int ret;
2013 u8 res;
2014
2015 if (fq->state != qman_fq_state_parked &&
2016 fq->state != qman_fq_state_sched)
2017 return -EINVAL;
2018#ifdef CONFIG_FSL_DPAA_CHECKING
2019 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2020 return -EINVAL;
2021#endif
2022 p = get_affine_portal();
2023 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
2024 fq->state == qman_fq_state_retired ||
2025 fq->state == qman_fq_state_oos) {
2026 ret = -EBUSY;
2027 goto out;
2028 }
2029 mcc = qm_mc_start(&p->p);
2030 qm_fqid_set(&mcc->fq, fq->fqid);
2031 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2032 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2033 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
2034 ret = -ETIMEDOUT;
2035 goto out;
2036 }
2037
2038 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
2039 res = mcr->result;
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049 if (res == QM_MCR_RESULT_OK) {
2050 ret = 0;
2051
2052 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
2053 fq_set(fq, QMAN_FQ_STATE_NE);
2054 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
2055 fq_set(fq, QMAN_FQ_STATE_ORL);
2056 if (flags)
2057 *flags = fq->flags;
2058 fq->state = qman_fq_state_retired;
2059 if (fq->cb.fqs) {
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 union qm_mr_entry msg;
2070
2071 msg.verb = QM_MR_VERB_FQRNI;
2072 msg.fq.fqs = mcr->alterfq.fqs;
2073 qm_fqid_set(&msg.fq, fq->fqid);
2074 msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
2075 fq->cb.fqs(p, fq, &msg);
2076 }
2077 } else if (res == QM_MCR_RESULT_PENDING) {
2078 ret = 1;
2079 fq_set(fq, QMAN_FQ_STATE_CHANGING);
2080 } else {
2081 ret = -EIO;
2082 }
2083out:
2084 put_affine_portal();
2085 return ret;
2086}
2087EXPORT_SYMBOL(qman_retire_fq);
2088
2089int qman_oos_fq(struct qman_fq *fq)
2090{
2091 union qm_mc_command *mcc;
2092 union qm_mc_result *mcr;
2093 struct qman_portal *p;
2094 int ret = 0;
2095
2096 if (fq->state != qman_fq_state_retired)
2097 return -EINVAL;
2098#ifdef CONFIG_FSL_DPAA_CHECKING
2099 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2100 return -EINVAL;
2101#endif
2102 p = get_affine_portal();
2103 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2104 fq->state != qman_fq_state_retired) {
2105 ret = -EBUSY;
2106 goto out;
2107 }
2108 mcc = qm_mc_start(&p->p);
2109 qm_fqid_set(&mcc->fq, fq->fqid);
2110 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2111 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2112 ret = -ETIMEDOUT;
2113 goto out;
2114 }
2115 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2116 if (mcr->result != QM_MCR_RESULT_OK) {
2117 ret = -EIO;
2118 goto out;
2119 }
2120 fq->state = qman_fq_state_oos;
2121out:
2122 put_affine_portal();
2123 return ret;
2124}
2125EXPORT_SYMBOL(qman_oos_fq);
2126
2127int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2128{
2129 union qm_mc_command *mcc;
2130 union qm_mc_result *mcr;
2131 struct qman_portal *p = get_affine_portal();
2132 int ret = 0;
2133
2134 mcc = qm_mc_start(&p->p);
2135 qm_fqid_set(&mcc->fq, fq->fqid);
2136 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2137 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2138 ret = -ETIMEDOUT;
2139 goto out;
2140 }
2141
2142 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2143 if (mcr->result == QM_MCR_RESULT_OK)
2144 *fqd = mcr->queryfq.fqd;
2145 else
2146 ret = -EIO;
2147out:
2148 put_affine_portal();
2149 return ret;
2150}
2151
2152int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2153{
2154 union qm_mc_command *mcc;
2155 union qm_mc_result *mcr;
2156 struct qman_portal *p = get_affine_portal();
2157 int ret = 0;
2158
2159 mcc = qm_mc_start(&p->p);
2160 qm_fqid_set(&mcc->fq, fq->fqid);
2161 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2162 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2163 ret = -ETIMEDOUT;
2164 goto out;
2165 }
2166
2167 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2168 if (mcr->result == QM_MCR_RESULT_OK)
2169 *np = mcr->queryfq_np;
2170 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2171 ret = -ERANGE;
2172 else
2173 ret = -EIO;
2174out:
2175 put_affine_portal();
2176 return ret;
2177}
2178EXPORT_SYMBOL(qman_query_fq_np);
2179
2180static int qman_query_cgr(struct qman_cgr *cgr,
2181 struct qm_mcr_querycgr *cgrd)
2182{
2183 union qm_mc_command *mcc;
2184 union qm_mc_result *mcr;
2185 struct qman_portal *p = get_affine_portal();
2186 int ret = 0;
2187
2188 mcc = qm_mc_start(&p->p);
2189 mcc->cgr.cgid = cgr->cgrid;
2190 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2191 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2192 ret = -ETIMEDOUT;
2193 goto out;
2194 }
2195 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2196 if (mcr->result == QM_MCR_RESULT_OK)
2197 *cgrd = mcr->querycgr;
2198 else {
2199 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2200 mcr_result_str(mcr->result));
2201 ret = -EIO;
2202 }
2203out:
2204 put_affine_portal();
2205 return ret;
2206}
2207
2208int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2209{
2210 struct qm_mcr_querycgr query_cgr;
2211 int err;
2212
2213 err = qman_query_cgr(cgr, &query_cgr);
2214 if (err)
2215 return err;
2216
2217 *result = !!query_cgr.cgr.cs;
2218 return 0;
2219}
2220EXPORT_SYMBOL(qman_query_cgr_congested);
2221
2222
2223static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2224{
2225 unsigned long irqflags;
2226 int ret = -EBUSY;
2227
2228 local_irq_save(irqflags);
2229 if (p->vdqcr_owned)
2230 goto out;
2231 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2232 goto out;
2233
2234 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2235 p->vdqcr_owned = fq;
2236 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2237 ret = 0;
2238out:
2239 local_irq_restore(irqflags);
2240 return ret;
2241}
2242
2243static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2244{
2245 int ret;
2246
2247 *p = get_affine_portal();
2248 ret = set_p_vdqcr(*p, fq, vdqcr);
2249 put_affine_portal();
2250 return ret;
2251}
2252
2253static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2254 u32 vdqcr, u32 flags)
2255{
2256 int ret = 0;
2257
2258 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2259 ret = wait_event_interruptible(affine_queue,
2260 !set_vdqcr(p, fq, vdqcr));
2261 else
2262 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2263 return ret;
2264}
2265
2266int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2267{
2268 struct qman_portal *p;
2269 int ret;
2270
2271 if (fq->state != qman_fq_state_parked &&
2272 fq->state != qman_fq_state_retired)
2273 return -EINVAL;
2274 if (vdqcr & QM_VDQCR_FQID_MASK)
2275 return -EINVAL;
2276 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2277 return -EBUSY;
2278 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2279 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2280 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2281 else
2282 ret = set_vdqcr(&p, fq, vdqcr);
2283 if (ret)
2284 return ret;
2285
2286 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2287 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2288
2289
2290
2291
2292
2293
2294 wait_event_interruptible(affine_queue,
2295 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2296 else
2297 wait_event(affine_queue,
2298 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2299 }
2300 return 0;
2301}
2302EXPORT_SYMBOL(qman_volatile_dequeue);
2303
2304static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2305{
2306 if (avail)
2307 qm_eqcr_cce_prefetch(&p->p);
2308 else
2309 qm_eqcr_cce_update(&p->p);
2310}
2311
2312int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2313{
2314 struct qman_portal *p;
2315 struct qm_eqcr_entry *eq;
2316 unsigned long irqflags;
2317 u8 avail;
2318
2319 p = get_affine_portal();
2320 local_irq_save(irqflags);
2321
2322 if (p->use_eqcr_ci_stashing) {
2323
2324
2325
2326
2327 eq = qm_eqcr_start_stash(&p->p);
2328 } else {
2329
2330
2331
2332
2333 avail = qm_eqcr_get_avail(&p->p);
2334 if (avail < 2)
2335 update_eqcr_ci(p, avail);
2336 eq = qm_eqcr_start_no_stash(&p->p);
2337 }
2338
2339 if (unlikely(!eq))
2340 goto out;
2341
2342 qm_fqid_set(eq, fq->fqid);
2343 eq->tag = cpu_to_be32(fq_to_tag(fq));
2344 eq->fd = *fd;
2345
2346 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2347out:
2348 local_irq_restore(irqflags);
2349 put_affine_portal();
2350 return 0;
2351}
2352EXPORT_SYMBOL(qman_enqueue);
2353
2354static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2355 struct qm_mcc_initcgr *opts)
2356{
2357 union qm_mc_command *mcc;
2358 union qm_mc_result *mcr;
2359 struct qman_portal *p = get_affine_portal();
2360 u8 verb = QM_MCC_VERB_MODIFYCGR;
2361 int ret = 0;
2362
2363 mcc = qm_mc_start(&p->p);
2364 if (opts)
2365 mcc->initcgr = *opts;
2366 mcc->initcgr.cgid = cgr->cgrid;
2367 if (flags & QMAN_CGR_FLAG_USE_INIT)
2368 verb = QM_MCC_VERB_INITCGR;
2369 qm_mc_commit(&p->p, verb);
2370 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2371 ret = -ETIMEDOUT;
2372 goto out;
2373 }
2374
2375 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2376 if (mcr->result != QM_MCR_RESULT_OK)
2377 ret = -EIO;
2378
2379out:
2380 put_affine_portal();
2381 return ret;
2382}
2383
2384#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2385
2386
2387static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2388{
2389 if (qman_ip_rev >= QMAN_REV30)
2390 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2391 QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2392 else
2393 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2394}
2395
2396static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2397{
2398 if (qman_ip_rev >= QMAN_REV30)
2399 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2400 else
2401 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2402}
2403
2404static u8 qman_cgr_cpus[CGR_NUM];
2405
2406void qman_init_cgr_all(void)
2407{
2408 struct qman_cgr cgr;
2409 int err_cnt = 0;
2410
2411 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2412 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2413 err_cnt++;
2414 }
2415
2416 if (err_cnt)
2417 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2418 err_cnt, (err_cnt > 1) ? "s" : "");
2419}
2420
2421int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2422 struct qm_mcc_initcgr *opts)
2423{
2424 struct qm_mcr_querycgr cgr_state;
2425 int ret;
2426 struct qman_portal *p;
2427
2428
2429
2430
2431
2432
2433
2434 if (cgr->cgrid >= CGR_NUM)
2435 return -EINVAL;
2436
2437 preempt_disable();
2438 p = get_affine_portal();
2439 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2440 preempt_enable();
2441
2442 cgr->chan = p->config->channel;
2443 spin_lock(&p->cgr_lock);
2444
2445 if (opts) {
2446 struct qm_mcc_initcgr local_opts = *opts;
2447
2448 ret = qman_query_cgr(cgr, &cgr_state);
2449 if (ret)
2450 goto out;
2451
2452 qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2453 be32_to_cpu(cgr_state.cgr.cscn_targ));
2454 local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2455
2456
2457 if (flags & QMAN_CGR_FLAG_USE_INIT)
2458 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2459 &local_opts);
2460 else
2461 ret = qm_modify_cgr(cgr, 0, &local_opts);
2462 if (ret)
2463 goto out;
2464 }
2465
2466 list_add(&cgr->node, &p->cgr_cbs);
2467
2468
2469 ret = qman_query_cgr(cgr, &cgr_state);
2470 if (ret) {
2471
2472 dev_err(p->config->dev, "CGR HW state partially modified\n");
2473 ret = 0;
2474 goto out;
2475 }
2476 if (cgr->cb && cgr_state.cgr.cscn_en &&
2477 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2478 cgr->cb(p, cgr, 1);
2479out:
2480 spin_unlock(&p->cgr_lock);
2481 put_affine_portal();
2482 return ret;
2483}
2484EXPORT_SYMBOL(qman_create_cgr);
2485
2486int qman_delete_cgr(struct qman_cgr *cgr)
2487{
2488 unsigned long irqflags;
2489 struct qm_mcr_querycgr cgr_state;
2490 struct qm_mcc_initcgr local_opts;
2491 int ret = 0;
2492 struct qman_cgr *i;
2493 struct qman_portal *p = get_affine_portal();
2494
2495 if (cgr->chan != p->config->channel) {
2496
2497 dev_err(p->config->dev, "CGR not owned by current portal");
2498 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2499 cgr->chan, p->config->channel);
2500
2501 ret = -EINVAL;
2502 goto put_portal;
2503 }
2504 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2505 spin_lock_irqsave(&p->cgr_lock, irqflags);
2506 list_del(&cgr->node);
2507
2508
2509
2510
2511 list_for_each_entry(i, &p->cgr_cbs, node)
2512 if (i->cgrid == cgr->cgrid && i->cb)
2513 goto release_lock;
2514 ret = qman_query_cgr(cgr, &cgr_state);
2515 if (ret) {
2516
2517 list_add(&cgr->node, &p->cgr_cbs);
2518 goto release_lock;
2519 }
2520
2521 local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2522 qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2523 be32_to_cpu(cgr_state.cgr.cscn_targ));
2524
2525 ret = qm_modify_cgr(cgr, 0, &local_opts);
2526 if (ret)
2527
2528 list_add(&cgr->node, &p->cgr_cbs);
2529release_lock:
2530 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2531put_portal:
2532 put_affine_portal();
2533 return ret;
2534}
2535EXPORT_SYMBOL(qman_delete_cgr);
2536
2537struct cgr_comp {
2538 struct qman_cgr *cgr;
2539 struct completion completion;
2540};
2541
2542static void qman_delete_cgr_smp_call(void *p)
2543{
2544 qman_delete_cgr((struct qman_cgr *)p);
2545}
2546
2547void qman_delete_cgr_safe(struct qman_cgr *cgr)
2548{
2549 preempt_disable();
2550 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2551 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2552 qman_delete_cgr_smp_call, cgr, true);
2553 preempt_enable();
2554 return;
2555 }
2556
2557 qman_delete_cgr(cgr);
2558 preempt_enable();
2559}
2560EXPORT_SYMBOL(qman_delete_cgr_safe);
2561
2562
2563
2564static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2565{
2566 const union qm_mr_entry *msg;
2567 int found = 0;
2568
2569 qm_mr_pvb_update(p);
2570 msg = qm_mr_current(p);
2571 while (msg) {
2572 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2573 found = 1;
2574 qm_mr_next(p);
2575 qm_mr_cci_consume_to_current(p);
2576 qm_mr_pvb_update(p);
2577 msg = qm_mr_current(p);
2578 }
2579 return found;
2580}
2581
2582static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2583 bool wait)
2584{
2585 const struct qm_dqrr_entry *dqrr;
2586 int found = 0;
2587
2588 do {
2589 qm_dqrr_pvb_update(p);
2590 dqrr = qm_dqrr_current(p);
2591 if (!dqrr)
2592 cpu_relax();
2593 } while (wait && !dqrr);
2594
2595 while (dqrr) {
2596 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2597 found = 1;
2598 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2599 qm_dqrr_pvb_update(p);
2600 qm_dqrr_next(p);
2601 dqrr = qm_dqrr_current(p);
2602 }
2603 return found;
2604}
2605
2606#define qm_mr_drain(p, V) \
2607 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2608
2609#define qm_dqrr_drain(p, f, S) \
2610 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2611
2612#define qm_dqrr_drain_wait(p, f, S) \
2613 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2614
2615#define qm_dqrr_drain_nomatch(p) \
2616 _qm_dqrr_consume_and_match(p, 0, 0, false)
2617
2618int qman_shutdown_fq(u32 fqid)
2619{
2620 struct qman_portal *p, *channel_portal;
2621 struct device *dev;
2622 union qm_mc_command *mcc;
2623 union qm_mc_result *mcr;
2624 int orl_empty, drain = 0, ret = 0;
2625 u32 channel, res;
2626 u8 state;
2627
2628 p = get_affine_portal();
2629 dev = p->config->dev;
2630
2631 mcc = qm_mc_start(&p->p);
2632 qm_fqid_set(&mcc->fq, fqid);
2633 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2634 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2635 dev_err(dev, "QUERYFQ_NP timeout\n");
2636 ret = -ETIMEDOUT;
2637 goto out;
2638 }
2639
2640 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2641 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2642 if (state == QM_MCR_NP_STATE_OOS)
2643 goto out;
2644
2645
2646 mcc = qm_mc_start(&p->p);
2647 qm_fqid_set(&mcc->fq, fqid);
2648 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2649 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2650 dev_err(dev, "QUERYFQ timeout\n");
2651 ret = -ETIMEDOUT;
2652 goto out;
2653 }
2654
2655 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2656
2657 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2658 qm_fqd_get_wq(&mcr->queryfq.fqd);
2659
2660 if (channel < qm_channel_pool1) {
2661 channel_portal = get_portal_for_channel(channel);
2662 if (channel_portal == NULL) {
2663 dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
2664 channel);
2665 ret = -EIO;
2666 goto out;
2667 }
2668 } else
2669 channel_portal = p;
2670
2671 switch (state) {
2672 case QM_MCR_NP_STATE_TEN_SCHED:
2673 case QM_MCR_NP_STATE_TRU_SCHED:
2674 case QM_MCR_NP_STATE_ACTIVE:
2675 case QM_MCR_NP_STATE_PARKED:
2676 orl_empty = 0;
2677 mcc = qm_mc_start(&channel_portal->p);
2678 qm_fqid_set(&mcc->fq, fqid);
2679 qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
2680 if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
2681 dev_err(dev, "ALTER_RETIRE timeout\n");
2682 ret = -ETIMEDOUT;
2683 goto out;
2684 }
2685 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2686 QM_MCR_VERB_ALTER_RETIRE);
2687 res = mcr->result;
2688
2689 if (res == QM_MCR_RESULT_OK)
2690 drain_mr_fqrni(&channel_portal->p);
2691
2692 if (res == QM_MCR_RESULT_PENDING) {
2693
2694
2695
2696
2697
2698
2699 int found_fqrn = 0;
2700
2701
2702 drain = 1;
2703
2704 if (channel >= qm_channel_pool1 &&
2705 channel < qm_channel_pool1 + 15) {
2706
2707 } else if (channel < qm_channel_pool1) {
2708
2709 } else {
2710 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2711 fqid, channel);
2712 ret = -EBUSY;
2713 goto out;
2714 }
2715
2716 if (channel < qm_channel_pool1)
2717 qm_dqrr_sdqcr_set(&channel_portal->p,
2718 QM_SDQCR_TYPE_ACTIVE |
2719 QM_SDQCR_CHANNELS_DEDICATED);
2720 else
2721 qm_dqrr_sdqcr_set(&channel_portal->p,
2722 QM_SDQCR_TYPE_ACTIVE |
2723 QM_SDQCR_CHANNELS_POOL_CONV
2724 (channel));
2725 do {
2726
2727 qm_dqrr_drain_nomatch(&channel_portal->p);
2728
2729 found_fqrn = qm_mr_drain(&channel_portal->p,
2730 FQRN);
2731 cpu_relax();
2732 } while (!found_fqrn);
2733
2734 qm_dqrr_sdqcr_set(&channel_portal->p,
2735 channel_portal->sdqcr);
2736
2737 }
2738 if (res != QM_MCR_RESULT_OK &&
2739 res != QM_MCR_RESULT_PENDING) {
2740 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2741 fqid, res);
2742 ret = -EIO;
2743 goto out;
2744 }
2745 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2746
2747
2748
2749
2750 orl_empty = 1;
2751 }
2752
2753
2754
2755
2756 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2757
2758 do {
2759 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2760
2761 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2762
2763
2764
2765
2766 } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2767 }
2768
2769 while (!orl_empty) {
2770
2771 orl_empty = qm_mr_drain(&p->p, FQRL);
2772 cpu_relax();
2773 }
2774 mcc = qm_mc_start(&p->p);
2775 qm_fqid_set(&mcc->fq, fqid);
2776 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2777 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2778 ret = -ETIMEDOUT;
2779 goto out;
2780 }
2781
2782 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2783 QM_MCR_VERB_ALTER_OOS);
2784 if (mcr->result != QM_MCR_RESULT_OK) {
2785 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2786 fqid, mcr->result);
2787 ret = -EIO;
2788 goto out;
2789 }
2790 break;
2791
2792 case QM_MCR_NP_STATE_RETIRED:
2793
2794 mcc = qm_mc_start(&p->p);
2795 qm_fqid_set(&mcc->fq, fqid);
2796 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2797 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2798 ret = -ETIMEDOUT;
2799 goto out;
2800 }
2801
2802 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2803 QM_MCR_VERB_ALTER_OOS);
2804 if (mcr->result != QM_MCR_RESULT_OK) {
2805 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2806 fqid, mcr->result);
2807 ret = -EIO;
2808 goto out;
2809 }
2810 break;
2811
2812 case QM_MCR_NP_STATE_OOS:
2813
2814 break;
2815
2816 default:
2817 ret = -EIO;
2818 }
2819
2820out:
2821 put_affine_portal();
2822 return ret;
2823}
2824
2825const struct qm_portal_config *qman_get_qm_portal_config(
2826 struct qman_portal *portal)
2827{
2828 return portal->config;
2829}
2830EXPORT_SYMBOL(qman_get_qm_portal_config);
2831
2832struct gen_pool *qm_fqalloc;
2833struct gen_pool *qm_qpalloc;
2834struct gen_pool *qm_cgralloc;
2835
2836static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2837{
2838 unsigned long addr;
2839
2840 if (!p)
2841 return -ENODEV;
2842
2843 addr = gen_pool_alloc(p, cnt);
2844 if (!addr)
2845 return -ENOMEM;
2846
2847 *result = addr & ~DPAA_GENALLOC_OFF;
2848
2849 return 0;
2850}
2851
2852int qman_alloc_fqid_range(u32 *result, u32 count)
2853{
2854 return qman_alloc_range(qm_fqalloc, result, count);
2855}
2856EXPORT_SYMBOL(qman_alloc_fqid_range);
2857
2858int qman_alloc_pool_range(u32 *result, u32 count)
2859{
2860 return qman_alloc_range(qm_qpalloc, result, count);
2861}
2862EXPORT_SYMBOL(qman_alloc_pool_range);
2863
2864int qman_alloc_cgrid_range(u32 *result, u32 count)
2865{
2866 return qman_alloc_range(qm_cgralloc, result, count);
2867}
2868EXPORT_SYMBOL(qman_alloc_cgrid_range);
2869
2870int qman_release_fqid(u32 fqid)
2871{
2872 int ret = qman_shutdown_fq(fqid);
2873
2874 if (ret) {
2875 pr_debug("FQID %d leaked\n", fqid);
2876 return ret;
2877 }
2878
2879 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2880 return 0;
2881}
2882EXPORT_SYMBOL(qman_release_fqid);
2883
2884static int qpool_cleanup(u32 qp)
2885{
2886
2887
2888
2889
2890
2891
2892 struct qman_fq fq = {
2893 .fqid = QM_FQID_RANGE_START
2894 };
2895 int err;
2896
2897 do {
2898 struct qm_mcr_queryfq_np np;
2899
2900 err = qman_query_fq_np(&fq, &np);
2901 if (err == -ERANGE)
2902
2903 return 0;
2904 else if (WARN_ON(err))
2905 return err;
2906
2907 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2908 struct qm_fqd fqd;
2909
2910 err = qman_query_fq(&fq, &fqd);
2911 if (WARN_ON(err))
2912 return err;
2913 if (qm_fqd_get_chan(&fqd) == qp) {
2914
2915 err = qman_shutdown_fq(fq.fqid);
2916 if (err)
2917
2918
2919
2920
2921 return err;
2922 }
2923 }
2924
2925 fq.fqid++;
2926 } while (1);
2927}
2928
2929int qman_release_pool(u32 qp)
2930{
2931 int ret;
2932
2933 ret = qpool_cleanup(qp);
2934 if (ret) {
2935 pr_debug("CHID %d leaked\n", qp);
2936 return ret;
2937 }
2938
2939 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2940 return 0;
2941}
2942EXPORT_SYMBOL(qman_release_pool);
2943
2944static int cgr_cleanup(u32 cgrid)
2945{
2946
2947
2948
2949
2950 struct qman_fq fq = {
2951 .fqid = QM_FQID_RANGE_START
2952 };
2953 int err;
2954
2955 do {
2956 struct qm_mcr_queryfq_np np;
2957
2958 err = qman_query_fq_np(&fq, &np);
2959 if (err == -ERANGE)
2960
2961 return 0;
2962 else if (WARN_ON(err))
2963 return err;
2964
2965 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2966 struct qm_fqd fqd;
2967
2968 err = qman_query_fq(&fq, &fqd);
2969 if (WARN_ON(err))
2970 return err;
2971 if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
2972 fqd.cgid == cgrid) {
2973 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2974 cgrid, fq.fqid);
2975 return -EIO;
2976 }
2977 }
2978
2979 fq.fqid++;
2980 } while (1);
2981}
2982
2983int qman_release_cgrid(u32 cgrid)
2984{
2985 int ret;
2986
2987 ret = cgr_cleanup(cgrid);
2988 if (ret) {
2989 pr_debug("CGRID %d leaked\n", cgrid);
2990 return ret;
2991 }
2992
2993 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2994 return 0;
2995}
2996EXPORT_SYMBOL(qman_release_cgrid);
2997