1
2
3
4
5#include "otx2_ethdev.h"
6#include "otx2_flow.h"
7
8static int
9flow_mcam_alloc_counter(struct otx2_mbox *mbox, uint16_t *ctr)
10{
11 struct npc_mcam_alloc_counter_req *req;
12 struct npc_mcam_alloc_counter_rsp *rsp;
13 int rc;
14
15 req = otx2_mbox_alloc_msg_npc_mcam_alloc_counter(mbox);
16 req->count = 1;
17 otx2_mbox_msg_send(mbox, 0);
18 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
19
20 *ctr = rsp->cntr_list[0];
21 return rc;
22}
23
24int
25otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id)
26{
27 struct npc_mcam_oper_counter_req *req;
28 int rc;
29
30 req = otx2_mbox_alloc_msg_npc_mcam_free_counter(mbox);
31 req->cntr = ctr_id;
32 otx2_mbox_msg_send(mbox, 0);
33 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
34
35 return rc;
36}
37
38int
39otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id,
40 uint64_t *count)
41{
42 struct npc_mcam_oper_counter_req *req;
43 struct npc_mcam_oper_counter_rsp *rsp;
44 int rc;
45
46 req = otx2_mbox_alloc_msg_npc_mcam_counter_stats(mbox);
47 req->cntr = ctr_id;
48 otx2_mbox_msg_send(mbox, 0);
49 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
50
51 *count = rsp->stat;
52 return rc;
53}
54
55int
56otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id)
57{
58 struct npc_mcam_oper_counter_req *req;
59 int rc;
60
61 req = otx2_mbox_alloc_msg_npc_mcam_clear_counter(mbox);
62 req->cntr = ctr_id;
63 otx2_mbox_msg_send(mbox, 0);
64 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
65
66 return rc;
67}
68
69int
70otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry)
71{
72 struct npc_mcam_free_entry_req *req;
73 int rc;
74
75 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
76 req->entry = entry;
77 otx2_mbox_msg_send(mbox, 0);
78 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
79
80 return rc;
81}
82
83int
84otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox)
85{
86 struct npc_mcam_free_entry_req *req;
87 int rc;
88
89 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
90 req->all = 1;
91 otx2_mbox_msg_send(mbox, 0);
92 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
93
94 return rc;
95}
96
97static void
98flow_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
99{
100 int idx;
101
102 for (idx = 0; idx < len; idx++)
103 ptr[idx] = data[len - 1 - idx];
104}
105
106static int
107flow_check_copysz(size_t size, size_t len)
108{
109 if (len <= size)
110 return len;
111 return -1;
112}
113
114static inline int
115flow_mem_is_zero(const void *mem, int len)
116{
117 const char *m = mem;
118 int i;
119
120 for (i = 0; i < len; i++) {
121 if (m[i] != 0)
122 return 0;
123 }
124 return 1;
125}
126
127static void
128flow_set_hw_mask(struct otx2_flow_item_info *info,
129 struct npc_xtract_info *xinfo,
130 char *hw_mask)
131{
132 int max_off, offset;
133 int j;
134
135 if (xinfo->enable == 0)
136 return;
137
138 if (xinfo->hdr_off < info->hw_hdr_len)
139 return;
140
141 max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
142
143 if (max_off > info->len)
144 max_off = info->len;
145
146 offset = xinfo->hdr_off - info->hw_hdr_len;
147 for (j = offset; j < max_off; j++)
148 hw_mask[j] = 0xff;
149}
150
151void
152otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst,
153 struct otx2_flow_item_info *info, int lid, int lt)
154{
155 struct npc_xtract_info *xinfo, *lfinfo;
156 char *hw_mask = info->hw_mask;
157 int lf_cfg;
158 int i, j;
159 int intf;
160
161 intf = pst->flow->nix_intf;
162 xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
163 memset(hw_mask, 0, info->len);
164
165 for (i = 0; i < NPC_MAX_LD; i++) {
166 flow_set_hw_mask(info, &xinfo[i], hw_mask);
167 }
168
169 for (i = 0; i < NPC_MAX_LD; i++) {
170
171 if (xinfo[i].flags_enable == 0)
172 continue;
173
174 lf_cfg = pst->npc->prx_lfcfg[i].i;
175 if (lf_cfg == lid) {
176 for (j = 0; j < NPC_MAX_LFL; j++) {
177 lfinfo = pst->npc->prx_fxcfg[intf]
178 [i][j].xtract;
179 flow_set_hw_mask(info, &lfinfo[0], hw_mask);
180 }
181 }
182 }
183}
184
185static int
186flow_update_extraction_data(struct otx2_parse_state *pst,
187 struct otx2_flow_item_info *info,
188 struct npc_xtract_info *xinfo)
189{
190 uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
191 uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
192 struct npc_xtract_info *x;
193 int k, idx, hdr_off;
194 int len = 0;
195
196 x = xinfo;
197 len = x->len;
198 hdr_off = x->hdr_off;
199
200 if (hdr_off < info->hw_hdr_len)
201 return 0;
202
203 if (x->enable == 0)
204 return 0;
205
206 otx2_npc_dbg("x->hdr_off = %d, len = %d, info->len = %d,"
207 "x->key_off = %d", x->hdr_off, len, info->len,
208 x->key_off);
209
210 hdr_off -= info->hw_hdr_len;
211
212 if (hdr_off + len > info->len)
213 len = info->len - hdr_off;
214
215
216 if (!flow_mem_is_zero(pst->mcam_mask + x->key_off,
217 len)) {
218
219 rte_flow_error_set(pst->error, ENOTSUP,
220 RTE_FLOW_ERROR_TYPE_ITEM,
221 pst->pattern,
222 "Extraction unsupported");
223 return -rte_errno;
224 }
225
226 len = flow_check_copysz((OTX2_MAX_MCAM_WIDTH_DWORDS * 8)
227 - x->key_off,
228 len);
229 if (len < 0) {
230 rte_flow_error_set(pst->error, ENOTSUP,
231 RTE_FLOW_ERROR_TYPE_ITEM,
232 pst->pattern,
233 "Internal Error");
234 return -rte_errno;
235 }
236
237
238
239
240
241 flow_prep_mcam_ldata(int_info,
242 (const uint8_t *)info->spec + hdr_off,
243 x->len);
244 flow_prep_mcam_ldata(int_info_mask,
245 (const uint8_t *)info->mask + hdr_off,
246 x->len);
247
248 otx2_npc_dbg("Spec: ");
249 for (k = 0; k < info->len; k++)
250 otx2_npc_dbg("0x%.2x ",
251 ((const uint8_t *)info->spec)[k]);
252
253 otx2_npc_dbg("Int_info: ");
254 for (k = 0; k < info->len; k++)
255 otx2_npc_dbg("0x%.2x ", int_info[k]);
256
257 memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
258 memcpy(pst->mcam_data + x->key_off, int_info, len);
259
260 otx2_npc_dbg("Parse state mcam data & mask");
261 for (idx = 0; idx < len ; idx++)
262 otx2_npc_dbg("data[%d]: 0x%x, mask[%d]: 0x%x", idx,
263 *(pst->mcam_data + idx + x->key_off), idx,
264 *(pst->mcam_mask + idx + x->key_off));
265 return 0;
266}
267
268int
269otx2_flow_update_parse_state(struct otx2_parse_state *pst,
270 struct otx2_flow_item_info *info, int lid, int lt,
271 uint8_t flags)
272{
273 struct npc_lid_lt_xtract_info *xinfo;
274 struct npc_xtract_info *lfinfo;
275 int intf, lf_cfg;
276 int i, j, rc = 0;
277
278 otx2_npc_dbg("Parse state function info mask total %s",
279 (const uint8_t *)info->mask);
280
281 pst->layer_mask |= lid;
282 pst->lt[lid] = lt;
283 pst->flags[lid] = flags;
284
285 intf = pst->flow->nix_intf;
286 xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
287 otx2_npc_dbg("Is_terminating = %d", xinfo->is_terminating);
288 if (xinfo->is_terminating)
289 pst->terminate = 1;
290
291 if (info->spec == NULL) {
292 otx2_npc_dbg("Info spec NULL");
293 goto done;
294 }
295
296 for (i = 0; i < NPC_MAX_LD; i++) {
297 rc = flow_update_extraction_data(pst, info, &xinfo->xtract[i]);
298 if (rc != 0)
299 return rc;
300 }
301
302 for (i = 0; i < NPC_MAX_LD; i++) {
303 if (xinfo->xtract[i].flags_enable == 0)
304 continue;
305
306 lf_cfg = pst->npc->prx_lfcfg[i].i;
307 if (lf_cfg == lid) {
308 for (j = 0; j < NPC_MAX_LFL; j++) {
309 lfinfo = pst->npc->prx_fxcfg[intf]
310 [i][j].xtract;
311 rc = flow_update_extraction_data(pst, info,
312 &lfinfo[0]);
313 if (rc != 0)
314 return rc;
315
316 if (lfinfo[0].enable)
317 pst->flags[lid] = j;
318 }
319 }
320 }
321
322done:
323
324 pst->pattern++;
325 return 0;
326}
327
328static inline int
329flow_range_is_valid(const char *spec, const char *last, const char *mask,
330 int len)
331{
332
333
334
335 while (len--) {
336 if (last[len] &&
337 (spec[len] & mask[len]) != (last[len] & mask[len]))
338 return 0;
339 }
340 return 1;
341}
342
343
344static inline int
345flow_mask_is_supported(const char *mask, const char *hw_mask, int len)
346{
347
348
349
350
351 if (hw_mask == NULL)
352 return flow_mem_is_zero(mask, len);
353
354 while (len--) {
355 if ((mask[len] | hw_mask[len]) != hw_mask[len])
356 return 0;
357 }
358 return 1;
359}
360
361int
362otx2_flow_parse_item_basic(const struct rte_flow_item *item,
363 struct otx2_flow_item_info *info,
364 struct rte_flow_error *error)
365{
366
367 if (item == NULL) {
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
370 "Item is NULL");
371 return -rte_errno;
372 }
373
374
375
376
377 if (item->spec == NULL) {
378 if (item->last == NULL && item->mask == NULL) {
379 info->spec = NULL;
380 return 0;
381 }
382 rte_flow_error_set(error, EINVAL,
383 RTE_FLOW_ERROR_TYPE_ITEM, item,
384 "mask or last set without spec");
385 return -rte_errno;
386 }
387
388
389 info->spec = item->spec;
390
391
392
393
394 if (item->mask == NULL) {
395 otx2_npc_dbg("Item mask null, using default mask");
396 if (info->def_mask == NULL) {
397 rte_flow_error_set(error, EINVAL,
398 RTE_FLOW_ERROR_TYPE_ITEM, item,
399 "No mask or default mask given");
400 return -rte_errno;
401 }
402 info->mask = info->def_mask;
403 } else {
404 info->mask = item->mask;
405 }
406
407
408
409
410 if (!flow_mask_is_supported(info->mask, info->hw_mask, info->len)) {
411 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Unsupported field in the mask");
413 return -rte_errno;
414 }
415
416
417
418
419
420
421
422 if (item->last != NULL && !flow_mem_is_zero(item->last, info->len)) {
423 if (!flow_range_is_valid(item->spec, item->last, info->mask,
424 info->len)) {
425 rte_flow_error_set(error, EINVAL,
426 RTE_FLOW_ERROR_TYPE_ITEM, item,
427 "Unsupported range for match");
428 return -rte_errno;
429 }
430 }
431
432 return 0;
433}
434
435void
436otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask)
437{
438 uint64_t cdata[2] = {0ULL, 0ULL}, nibble;
439 int i, j = 0;
440
441 for (i = 0; i < NPC_MAX_KEY_NIBBLES; i++) {
442 if (nibble_mask & (1 << i)) {
443 nibble = (data[i / 16] >> ((i & 0xf) * 4)) & 0xf;
444 cdata[j / 16] |= (nibble << ((j & 0xf) * 4));
445 j += 1;
446 }
447 }
448
449 data[0] = cdata[0];
450 data[1] = cdata[1];
451}
452
453static int
454flow_first_set_bit(uint64_t slab)
455{
456 int num = 0;
457
458 if ((slab & 0xffffffff) == 0) {
459 num += 32;
460 slab >>= 32;
461 }
462 if ((slab & 0xffff) == 0) {
463 num += 16;
464 slab >>= 16;
465 }
466 if ((slab & 0xff) == 0) {
467 num += 8;
468 slab >>= 8;
469 }
470 if ((slab & 0xf) == 0) {
471 num += 4;
472 slab >>= 4;
473 }
474 if ((slab & 0x3) == 0) {
475 num += 2;
476 slab >>= 2;
477 }
478 if ((slab & 0x1) == 0)
479 num += 1;
480
481 return num;
482}
483
484static int
485flow_shift_lv_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
486 struct otx2_npc_flow_info *flow_info,
487 uint32_t old_ent, uint32_t new_ent)
488{
489 struct npc_mcam_shift_entry_req *req;
490 struct npc_mcam_shift_entry_rsp *rsp;
491 struct otx2_flow_list *list;
492 struct rte_flow *flow_iter;
493 int rc = 0;
494
495 otx2_npc_dbg("Old ent:%u new ent:%u priority:%u", old_ent, new_ent,
496 flow->priority);
497
498 list = &flow_info->flow_list[flow->priority];
499
500
501
502
503 req = otx2_mbox_alloc_msg_npc_mcam_shift_entry(mbox);
504 req->curr_entry[0] = old_ent;
505 req->new_entry[0] = new_ent;
506 req->shift_count = 1;
507
508 otx2_mbox_msg_send(mbox, 0);
509 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
510 if (rc)
511 return rc;
512
513
514 TAILQ_FOREACH(flow_iter, list, next) {
515 if (flow_iter->mcam_id == old_ent)
516 TAILQ_REMOVE(list, flow_iter, next);
517 }
518
519
520 TAILQ_FOREACH(flow_iter, list, next) {
521 if (flow_iter->mcam_id > new_ent)
522 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
523 }
524 return rc;
525}
526
527
528static int
529flow_shift_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
530 struct otx2_npc_flow_info *flow_info,
531 struct npc_mcam_alloc_entry_rsp *rsp, int dir, int prio_lvl)
532{
533 struct rte_bitmap *fr_bmp, *fr_bmp_rev, *lv_bmp, *lv_bmp_rev, *bmp;
534 uint32_t e_fr = 0, e_lv = 0, e, e_id = 0, mcam_entries;
535 uint64_t fr_bit_pos = 0, lv_bit_pos = 0, bit_pos = 0;
536
537 uint32_t sl_fr_bit_off = 0, sl_lv_bit_off = 0;
538
539
540 int rc_fr = 0, rc_lv = 0, rc = 0, idx = 0;
541 struct otx2_mcam_ents_info *ent_info;
542
543 uint64_t sl_fr = 0, sl_lv = 0, *sl;
544
545 fr_bmp = flow_info->free_entries[prio_lvl];
546 fr_bmp_rev = flow_info->free_entries_rev[prio_lvl];
547 lv_bmp = flow_info->live_entries[prio_lvl];
548 lv_bmp_rev = flow_info->live_entries_rev[prio_lvl];
549 ent_info = &flow_info->flow_entry_info[prio_lvl];
550 mcam_entries = flow_info->mcam_entries;
551
552
553
554
555
556
557 while (idx <= rsp->count) {
558 if (!sl_fr && !sl_lv) {
559
560 if (dir < 0) {
561 rc_fr = rte_bitmap_scan(fr_bmp, &e_fr, &sl_fr);
562 rc_lv = rte_bitmap_scan(lv_bmp, &e_lv, &sl_lv);
563 otx2_npc_dbg("Fwd slab rc fr %u rc lv %u "
564 "e_fr %u e_lv %u", rc_fr, rc_lv,
565 e_fr, e_lv);
566 } else {
567 rc_fr = rte_bitmap_scan(fr_bmp_rev,
568 &sl_fr_bit_off,
569 &sl_fr);
570 rc_lv = rte_bitmap_scan(lv_bmp_rev,
571 &sl_lv_bit_off,
572 &sl_lv);
573
574 otx2_npc_dbg("Rev slab rc fr %u rc lv %u "
575 "e_fr %u e_lv %u", rc_fr, rc_lv,
576 e_fr, e_lv);
577 }
578 }
579
580 if (rc_fr) {
581 fr_bit_pos = flow_first_set_bit(sl_fr);
582 e_fr = sl_fr_bit_off + fr_bit_pos;
583 otx2_npc_dbg("Fr_bit_pos 0x%" PRIx64, fr_bit_pos);
584 } else {
585 e_fr = ~(0);
586 }
587
588 if (rc_lv) {
589 lv_bit_pos = flow_first_set_bit(sl_lv);
590 e_lv = sl_lv_bit_off + lv_bit_pos;
591 otx2_npc_dbg("Lv_bit_pos 0x%" PRIx64, lv_bit_pos);
592 } else {
593 e_lv = ~(0);
594 }
595
596
597 if (e_fr < e_lv) {
598 bmp = fr_bmp;
599 e = e_fr;
600 sl = &sl_fr;
601 bit_pos = fr_bit_pos;
602 if (dir > 0)
603 e_id = mcam_entries - e - 1;
604 else
605 e_id = e;
606 otx2_npc_dbg("Fr e %u e_id %u", e, e_id);
607 } else {
608 bmp = lv_bmp;
609 e = e_lv;
610 sl = &sl_lv;
611 bit_pos = lv_bit_pos;
612 if (dir > 0)
613 e_id = mcam_entries - e - 1;
614 else
615 e_id = e;
616
617 otx2_npc_dbg("Lv e %u e_id %u", e, e_id);
618 if (idx < rsp->count)
619 rc =
620 flow_shift_lv_ent(mbox, flow,
621 flow_info, e_id,
622 rsp->entry + idx);
623 }
624
625 rte_bitmap_clear(bmp, e);
626 rte_bitmap_set(bmp, rsp->entry + idx);
627
628
629
630 rsp->entry_list[idx] = e_id;
631 *sl &= ~(1 << bit_pos);
632
633
634
635
636 if (dir < 0) {
637 ent_info->max_id = rsp->entry + idx;
638 ent_info->min_id = e_id;
639 } else {
640 ent_info->max_id = e_id;
641 ent_info->min_id = rsp->entry;
642 }
643
644 idx++;
645 }
646 return rc;
647}
648
649
650
651
652
653static int
654flow_validate_and_shift_prio_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
655 struct otx2_npc_flow_info *flow_info,
656 struct npc_mcam_alloc_entry_rsp *rsp,
657 int req_prio)
658{
659 int prio_idx = 0, rc = 0, needs_shift = 0, idx, prio = flow->priority;
660 struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
661 int dir = (req_prio == NPC_MCAM_HIGHER_PRIO) ? 1 : -1;
662 uint32_t tot_ent = 0;
663
664 otx2_npc_dbg("Dir %d, priority = %d", dir, prio);
665
666 if (dir < 0)
667 prio_idx = flow_info->flow_max_priority - 1;
668
669
670
671
672
673
674
675
676
677
678
679
680
681 do {
682 tot_ent = info[prio_idx].free_ent + info[prio_idx].live_ent;
683
684 if (dir < 0 && prio_idx != prio &&
685 rsp->entry > info[prio_idx].max_id && tot_ent) {
686 otx2_npc_dbg("Rsp entry %u prio idx %u "
687 "max id %u", rsp->entry, prio_idx,
688 info[prio_idx].max_id);
689
690 needs_shift = 1;
691 } else if ((dir > 0) && (prio_idx != prio) &&
692 (rsp->entry < info[prio_idx].min_id) && tot_ent) {
693 otx2_npc_dbg("Rsp entry %u prio idx %u "
694 "min id %u", rsp->entry, prio_idx,
695 info[prio_idx].min_id);
696 needs_shift = 1;
697 }
698
699 otx2_npc_dbg("Needs_shift = %d", needs_shift);
700 if (needs_shift) {
701 needs_shift = 0;
702 rc = flow_shift_ent(mbox, flow, flow_info, rsp, dir,
703 prio_idx);
704 } else {
705 for (idx = 0; idx < rsp->count; idx++)
706 rsp->entry_list[idx] = rsp->entry + idx;
707 }
708 } while ((prio_idx != prio) && (prio_idx += dir));
709
710 return rc;
711}
712
713static int
714flow_find_ref_entry(struct otx2_npc_flow_info *flow_info, int *prio,
715 int prio_lvl)
716{
717 struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
718 int step = 1;
719
720 while (step < flow_info->flow_max_priority) {
721 if (((prio_lvl + step) < flow_info->flow_max_priority) &&
722 info[prio_lvl + step].live_ent) {
723 *prio = NPC_MCAM_HIGHER_PRIO;
724 return info[prio_lvl + step].min_id;
725 }
726
727 if (((prio_lvl - step) >= 0) &&
728 info[prio_lvl - step].live_ent) {
729 otx2_npc_dbg("Prio_lvl %u live %u", prio_lvl - step,
730 info[prio_lvl - step].live_ent);
731 *prio = NPC_MCAM_LOWER_PRIO;
732 return info[prio_lvl - step].max_id;
733 }
734 step++;
735 }
736 *prio = NPC_MCAM_ANY_PRIO;
737 return 0;
738}
739
740static int
741flow_fill_entry_cache(struct otx2_mbox *mbox, struct rte_flow *flow,
742 struct otx2_npc_flow_info *flow_info, uint32_t *free_ent)
743{
744 struct rte_bitmap *free_bmp, *free_bmp_rev, *live_bmp, *live_bmp_rev;
745 struct npc_mcam_alloc_entry_rsp rsp_local;
746 struct npc_mcam_alloc_entry_rsp *rsp_cmd;
747 struct npc_mcam_alloc_entry_req *req;
748 struct npc_mcam_alloc_entry_rsp *rsp;
749 struct otx2_mcam_ents_info *info;
750 uint16_t ref_ent, idx;
751 int rc, prio;
752
753 info = &flow_info->flow_entry_info[flow->priority];
754 free_bmp = flow_info->free_entries[flow->priority];
755 free_bmp_rev = flow_info->free_entries_rev[flow->priority];
756 live_bmp = flow_info->live_entries[flow->priority];
757 live_bmp_rev = flow_info->live_entries_rev[flow->priority];
758
759 ref_ent = flow_find_ref_entry(flow_info, &prio, flow->priority);
760
761 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
762 req->contig = 1;
763 req->count = flow_info->flow_prealloc_size;
764 req->priority = prio;
765 req->ref_entry = ref_ent;
766
767 otx2_npc_dbg("Fill cache ref entry %u prio %u", ref_ent, prio);
768
769 otx2_mbox_msg_send(mbox, 0);
770 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp_cmd);
771 if (rc)
772 return rc;
773
774 rsp = &rsp_local;
775 memcpy(rsp, rsp_cmd, sizeof(*rsp));
776
777 otx2_npc_dbg("Alloc entry %u count %u , prio = %d", rsp->entry,
778 rsp->count, prio);
779
780
781 if (prio != NPC_MCAM_ANY_PRIO) {
782 flow_validate_and_shift_prio_ent(mbox, flow, flow_info, rsp,
783 prio);
784 } else {
785
786 for (idx = 0; idx < rsp->count; idx++)
787 rsp->entry_list[idx] = rsp->entry + idx;
788 }
789
790 otx2_npc_dbg("Fill entry cache rsp count %u", rsp->count);
791
792
793
794 for (idx = 0; idx < rsp->count; idx++) {
795 if (unlikely(rsp->entry_list[idx] < info->min_id))
796 info->min_id = rsp->entry_list[idx];
797
798 if (unlikely(rsp->entry_list[idx] > info->max_id))
799 info->max_id = rsp->entry_list[idx];
800
801
802
803
804 if (prio == NPC_MCAM_HIGHER_PRIO) {
805 if (unlikely(idx == (rsp->count - 1))) {
806 *free_ent = rsp->entry_list[idx];
807 continue;
808 }
809 } else {
810 if (unlikely(!idx)) {
811 *free_ent = rsp->entry_list[idx];
812 continue;
813 }
814 }
815 info->free_ent++;
816 rte_bitmap_set(free_bmp, rsp->entry_list[idx]);
817 rte_bitmap_set(free_bmp_rev, flow_info->mcam_entries -
818 rsp->entry_list[idx] - 1);
819
820 otx2_npc_dbg("Final rsp entry %u rsp entry rev %u",
821 rsp->entry_list[idx],
822 flow_info->mcam_entries - rsp->entry_list[idx] - 1);
823 }
824
825 otx2_npc_dbg("Cache free entry %u, rev = %u", *free_ent,
826 flow_info->mcam_entries - *free_ent - 1);
827 info->live_ent++;
828 rte_bitmap_set(live_bmp, *free_ent);
829 rte_bitmap_set(live_bmp_rev, flow_info->mcam_entries - *free_ent - 1);
830
831 return 0;
832}
833
834static int
835flow_check_preallocated_entry_cache(struct otx2_mbox *mbox,
836 struct rte_flow *flow,
837 struct otx2_npc_flow_info *flow_info)
838{
839 struct rte_bitmap *free, *free_rev, *live, *live_rev;
840 uint32_t pos = 0, free_ent = 0, mcam_entries;
841 struct otx2_mcam_ents_info *info;
842 uint64_t slab = 0;
843 int rc;
844
845 otx2_npc_dbg("Flow priority %u", flow->priority);
846
847 info = &flow_info->flow_entry_info[flow->priority];
848
849 free_rev = flow_info->free_entries_rev[flow->priority];
850 free = flow_info->free_entries[flow->priority];
851 live_rev = flow_info->live_entries_rev[flow->priority];
852 live = flow_info->live_entries[flow->priority];
853 mcam_entries = flow_info->mcam_entries;
854
855 if (info->free_ent) {
856 rc = rte_bitmap_scan(free, &pos, &slab);
857 if (rc) {
858
859 free_ent = pos + __builtin_ctzll(slab);
860 otx2_npc_dbg("Allocated from cache entry %u", free_ent);
861
862 rte_bitmap_clear(free, free_ent);
863 rte_bitmap_set(live, free_ent);
864 rte_bitmap_clear(free_rev,
865 mcam_entries - free_ent - 1);
866 rte_bitmap_set(live_rev,
867 mcam_entries - free_ent - 1);
868
869 info->free_ent--;
870 info->live_ent++;
871 return free_ent;
872 }
873
874 otx2_npc_dbg("No free entry:its a mess");
875 return -1;
876 }
877
878 rc = flow_fill_entry_cache(mbox, flow, flow_info, &free_ent);
879 if (rc)
880 return rc;
881
882 return free_ent;
883}
884
885int
886otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, struct otx2_mbox *mbox,
887 struct otx2_parse_state *pst,
888 struct otx2_npc_flow_info *flow_info)
889{
890 int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1);
891 struct npc_mcam_read_base_rule_rsp *base_rule_rsp;
892 struct npc_mcam_write_entry_req *req;
893 struct mcam_entry *base_entry;
894 struct mbox_msghdr *rsp;
895 uint16_t ctr = ~(0);
896 int rc, idx;
897 int entry;
898
899 if (use_ctr) {
900 rc = flow_mcam_alloc_counter(mbox, &ctr);
901 if (rc)
902 return rc;
903 }
904
905 entry = flow_check_preallocated_entry_cache(mbox, flow, flow_info);
906 if (entry < 0) {
907 otx2_err("Prealloc failed");
908 otx2_flow_mcam_free_counter(mbox, ctr);
909 return NPC_MCAM_ALLOC_FAILED;
910 }
911
912 if (pst->is_vf) {
913 (void)otx2_mbox_alloc_msg_npc_read_base_steer_rule(mbox);
914 rc = otx2_mbox_process_msg(mbox, (void *)&base_rule_rsp);
915 if (rc) {
916 otx2_err("Failed to fetch VF's base MCAM entry");
917 return rc;
918 }
919 base_entry = &base_rule_rsp->entry_data;
920 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
921 flow->mcam_data[idx] |= base_entry->kw[idx];
922 flow->mcam_mask[idx] |= base_entry->kw_mask[idx];
923 }
924 }
925
926 req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
927 req->set_cntr = use_ctr;
928 req->cntr = ctr;
929 req->entry = entry;
930 otx2_npc_dbg("Alloc & write entry %u", entry);
931
932 req->intf =
933 (flow->nix_intf == OTX2_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
934 req->enable_entry = 1;
935 req->entry_data.action = flow->npc_action;
936 req->entry_data.vtag_action = flow->vtag_action;
937
938 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
939 req->entry_data.kw[idx] = flow->mcam_data[idx];
940 req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
941 }
942
943 if (flow->nix_intf == OTX2_INTF_RX) {
944 req->entry_data.kw[0] |= flow_info->channel;
945 req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
946 } else {
947 uint16_t pf_func = (flow->npc_action >> 48) & 0xffff;
948
949 pf_func = htons(pf_func);
950 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
951 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
952 }
953
954 otx2_mbox_msg_send(mbox, 0);
955 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
956 if (rc != 0)
957 return rc;
958
959 flow->mcam_id = entry;
960 if (use_ctr)
961 flow->ctr_id = ctr;
962 return 0;
963}
964