1
2
3
4
5#include <rte_acl.h>
6#include "tb_mem.h"
7#include "acl.h"
8
9#define ACL_POOL_ALIGN 8
10#define ACL_POOL_ALLOC_MIN 0x800000
11
12
13#define ACL_PTR_ALLOC 32
14
15
16#define ACL_MAX_INDEXES (2 * RTE_ACL_MAX_FIELDS)
17
18
19#define NODE_MAX 0x4000
20#define NODE_MIN 0x800
21
22
23enum {
24 TALLY_0 = 0,
25 TALLY_25,
26 TALLY_50,
27 TALLY_75,
28 TALLY_100,
29 TALLY_DEACTIVATED,
30 TALLY_DEPTH,
31
32 TALLY_NUM
33};
34
35static const uint32_t wild_limits[TALLY_DEACTIVATED] = {0, 25, 50, 75, 100};
36
37enum {
38 ACL_INTERSECT_NONE = 0,
39 ACL_INTERSECT_A = 1,
40 ACL_INTERSECT_B = 2,
41 ACL_INTERSECT = 4,
42};
43
44enum {
45 ACL_PRIORITY_EQUAL = 0,
46 ACL_PRIORITY_NODE_A = 1,
47 ACL_PRIORITY_NODE_B = 2,
48 ACL_PRIORITY_MIXED = 3
49};
50
51
52struct acl_mem_block {
53 uint32_t block_size;
54 void *mem_ptr;
55};
56
57#define MEM_BLOCK_NUM 16
58
59
60struct rte_acl_build_rule {
61 struct rte_acl_build_rule *next;
62 struct rte_acl_config *config;
63
64 const struct rte_acl_rule *f;
65 uint32_t *wildness;
66};
67
68
69struct acl_build_context {
70 const struct rte_acl_ctx *acx;
71 struct rte_acl_build_rule *build_rules;
72 struct rte_acl_config cfg;
73 int32_t node_max;
74 int32_t cur_node_max;
75 uint32_t node;
76 uint32_t num_nodes;
77 uint32_t category_mask;
78 uint32_t num_rules;
79 uint32_t node_id;
80 uint32_t src_mask;
81 uint32_t num_build_rules;
82 uint32_t num_tries;
83 struct tb_mem_pool pool;
84 struct rte_acl_trie tries[RTE_ACL_MAX_TRIES];
85 struct rte_acl_bld_trie bld_tries[RTE_ACL_MAX_TRIES];
86 uint32_t data_indexes[RTE_ACL_MAX_TRIES][ACL_MAX_INDEXES];
87
88
89 struct acl_mem_block blocks[MEM_BLOCK_NUM];
90 struct rte_acl_node *node_free_list;
91};
92
93static int acl_merge_trie(struct acl_build_context *context,
94 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
95 uint32_t level, struct rte_acl_node **node_c);
96
97static void
98acl_deref_ptr(struct acl_build_context *context,
99 struct rte_acl_node *node, int index);
100
101static void *
102acl_build_alloc(struct acl_build_context *context, size_t n, size_t s)
103{
104 uint32_t m;
105 void *p;
106 size_t alloc_size = n * s;
107
108
109
110
111 for (m = 0; m < RTE_DIM(context->blocks); m++) {
112 if (context->blocks[m].block_size ==
113 alloc_size && context->blocks[m].mem_ptr != NULL) {
114 p = context->blocks[m].mem_ptr;
115 context->blocks[m].mem_ptr = *((void **)p);
116 memset(p, 0, alloc_size);
117 return p;
118 }
119 }
120
121
122
123
124 p = tb_alloc(&context->pool, alloc_size);
125 return p;
126}
127
128
129
130
131static void
132acl_build_free(struct acl_build_context *context, size_t s, void *p)
133{
134 uint32_t n;
135
136 for (n = 0; n < RTE_DIM(context->blocks); n++) {
137 if (context->blocks[n].block_size == s) {
138 *((void **)p) = context->blocks[n].mem_ptr;
139 context->blocks[n].mem_ptr = p;
140 return;
141 }
142 }
143 for (n = 0; n < RTE_DIM(context->blocks); n++) {
144 if (context->blocks[n].block_size == 0) {
145 context->blocks[n].block_size = s;
146 *((void **)p) = NULL;
147 context->blocks[n].mem_ptr = p;
148 return;
149 }
150 }
151}
152
153
154
155
156static struct rte_acl_node *
157acl_alloc_node(struct acl_build_context *context, int level)
158{
159 struct rte_acl_node *node;
160
161 if (context->node_free_list != NULL) {
162 node = context->node_free_list;
163 context->node_free_list = node->next;
164 memset(node, 0, sizeof(struct rte_acl_node));
165 } else {
166 node = acl_build_alloc(context, sizeof(struct rte_acl_node), 1);
167 }
168
169 if (node != NULL) {
170 node->num_ptrs = 0;
171 node->level = level;
172 node->node_type = RTE_ACL_NODE_UNDEFINED;
173 node->node_index = RTE_ACL_NODE_UNDEFINED;
174 context->num_nodes++;
175 node->id = context->node_id++;
176 }
177 return node;
178}
179
180
181
182
183static void
184acl_free_node(struct acl_build_context *context,
185 struct rte_acl_node *node)
186{
187 uint32_t n;
188
189 if (node->prev != NULL)
190 node->prev->next = NULL;
191 for (n = 0; n < node->num_ptrs; n++)
192 acl_deref_ptr(context, node, n);
193
194
195 if (node->mrt != NULL) {
196 acl_build_free(context, sizeof(struct rte_acl_match_results),
197 node->mrt);
198 node->mrt = NULL;
199 }
200
201
202 if (node->ptrs != NULL) {
203 acl_build_free(context,
204 node->max_ptrs * sizeof(struct rte_acl_ptr_set),
205 node->ptrs);
206 node->ptrs = NULL;
207 }
208
209
210 context->num_nodes--;
211 node->next = context->node_free_list;
212 context->node_free_list = node;
213}
214
215
216
217
218
219static void
220acl_include(struct rte_acl_bitset *dst, struct rte_acl_bitset *src, bits_t mask)
221{
222 uint32_t n;
223
224 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
225 dst->bits[n] = (dst->bits[n] & mask) | src->bits[n];
226}
227
228
229
230
231static int
232acl_exclude(struct rte_acl_bitset *dst,
233 struct rte_acl_bitset *src1,
234 struct rte_acl_bitset *src2)
235{
236 uint32_t n;
237 bits_t all_bits = 0;
238
239 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
240 dst->bits[n] = src1->bits[n] & ~src2->bits[n];
241 all_bits |= dst->bits[n];
242 }
243 return all_bits != 0;
244}
245
246
247
248
249static int
250acl_add_ptr(struct acl_build_context *context,
251 struct rte_acl_node *node,
252 struct rte_acl_node *ptr,
253 struct rte_acl_bitset *bits)
254{
255 uint32_t n, num_ptrs;
256 struct rte_acl_ptr_set *ptrs = NULL;
257
258
259
260
261 for (n = 0; n < node->num_ptrs; n++) {
262 if (node->ptrs[n].ptr != NULL) {
263 if (node->ptrs[n].ptr == ptr) {
264 acl_include(&node->ptrs[n].values, bits, -1);
265 acl_include(&node->values, bits, -1);
266 return 0;
267 }
268 }
269 }
270
271
272 if (node->num_ptrs >= node->max_ptrs) {
273
274 num_ptrs = node->max_ptrs + ACL_PTR_ALLOC;
275 ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs));
276
277
278 if (node->ptrs != NULL) {
279 memcpy(ptrs, node->ptrs,
280 node->num_ptrs * sizeof(*ptrs));
281 acl_build_free(context, node->max_ptrs * sizeof(*ptrs),
282 node->ptrs);
283 }
284 node->ptrs = ptrs;
285 node->max_ptrs = num_ptrs;
286 }
287
288
289 for (n = node->min_add; n < node->max_ptrs; n++) {
290 if (node->ptrs[n].ptr == NULL) {
291 node->ptrs[n].ptr = ptr;
292 acl_include(&node->ptrs[n].values, bits, 0);
293 acl_include(&node->values, bits, -1);
294 if (ptr != NULL)
295 ptr->ref_count++;
296 if (node->num_ptrs <= n)
297 node->num_ptrs = n + 1;
298 return 0;
299 }
300 }
301
302 return 0;
303}
304
305
306
307
308static int
309acl_add_ptr_range(struct acl_build_context *context,
310 struct rte_acl_node *root,
311 struct rte_acl_node *node,
312 uint8_t low,
313 uint8_t high)
314{
315 uint32_t n;
316 struct rte_acl_bitset bitset;
317
318
319 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
320 bitset.bits[n] = 0;
321
322
323 for (n = 0; n < UINT8_MAX + 1; n++)
324 if (n >= low && n <= high)
325 bitset.bits[n / (sizeof(bits_t) * 8)] |=
326 1U << (n % (sizeof(bits_t) * CHAR_BIT));
327
328 return acl_add_ptr(context, root, node, &bitset);
329}
330
331
332
333
334static int
335acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, uint32_t mask)
336{
337 int range = 0;
338 uint32_t n;
339
340
341 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
342 bitset->bits[n] = 0;
343
344
345 for (n = 0; n < UINT8_MAX + 1; n++) {
346 if ((n & mask) == value) {
347 range++;
348 bitset->bits[n / (sizeof(bits_t) * 8)] |=
349 1U << (n % (sizeof(bits_t) * CHAR_BIT));
350 }
351 }
352 return range;
353}
354
355
356
357
358
359static int
360acl_intersect_type(const struct rte_acl_bitset *a_bits,
361 const struct rte_acl_bitset *b_bits,
362 struct rte_acl_bitset *intersect)
363{
364 uint32_t n;
365 bits_t intersect_bits = 0;
366 bits_t a_superset = 0;
367 bits_t b_superset = 0;
368
369
370
371
372
373 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
374 intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
375 a_superset |= a_bits->bits[n] ^ intersect->bits[n];
376 b_superset |= b_bits->bits[n] ^ intersect->bits[n];
377 intersect_bits |= intersect->bits[n];
378 }
379
380 n = (intersect_bits == 0 ? ACL_INTERSECT_NONE : ACL_INTERSECT) |
381 (b_superset == 0 ? 0 : ACL_INTERSECT_B) |
382 (a_superset == 0 ? 0 : ACL_INTERSECT_A);
383
384 return n;
385}
386
387
388
389
390static struct rte_acl_node *
391acl_dup_node(struct acl_build_context *context, struct rte_acl_node *node)
392{
393 uint32_t n;
394 struct rte_acl_node *next;
395
396 next = acl_alloc_node(context, node->level);
397
398
399 if (node->num_ptrs > 0) {
400 next->ptrs = acl_build_alloc(context,
401 node->max_ptrs,
402 sizeof(struct rte_acl_ptr_set));
403 next->max_ptrs = node->max_ptrs;
404 }
405
406
407 for (n = 0; n < node->num_ptrs; n++) {
408 if (node->ptrs[n].ptr != NULL) {
409 next->ptrs[n].ptr = node->ptrs[n].ptr;
410 next->ptrs[n].ptr->ref_count++;
411 acl_include(&next->ptrs[n].values,
412 &node->ptrs[n].values, -1);
413 }
414 }
415
416 next->num_ptrs = node->num_ptrs;
417
418
419 if (node->match_flag == 0)
420 next->match_flag = 0;
421 else {
422 next->match_flag = -1;
423 next->mrt = acl_build_alloc(context, 1, sizeof(*next->mrt));
424 memcpy(next->mrt, node->mrt, sizeof(*next->mrt));
425 }
426
427
428 acl_include(&next->values, &node->values, -1);
429
430 node->next = next;
431 next->prev = node;
432
433 return next;
434}
435
436
437
438
439static void
440acl_deref_ptr(struct acl_build_context *context,
441 struct rte_acl_node *node, int index)
442{
443 struct rte_acl_node *ref_node;
444
445
446 if (node != NULL && node->ptrs[index].ptr != NULL) {
447 ref_node = node->ptrs[index].ptr;
448 ref_node->ref_count--;
449 if (ref_node->ref_count == 0)
450 acl_free_node(context, ref_node);
451 }
452}
453
454
455
456
457static int
458acl_copy_ptr(struct acl_build_context *context,
459 struct rte_acl_node *dst,
460 struct rte_acl_node *src,
461 int index,
462 struct rte_acl_bitset *b_bits)
463{
464 int rc;
465 struct rte_acl_bitset bits;
466
467 if (b_bits != NULL)
468 if (!acl_exclude(&bits, &src->ptrs[index].values, b_bits))
469 return 0;
470
471 rc = acl_add_ptr(context, dst, src->ptrs[index].ptr, &bits);
472 if (rc < 0)
473 return rc;
474 return 1;
475}
476
477
478
479
480static void
481acl_compact_node_ptrs(struct rte_acl_node *node_a)
482{
483 uint32_t n;
484 int min_add = node_a->min_add;
485
486 while (node_a->num_ptrs > 0 &&
487 node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
488 node_a->num_ptrs--;
489
490 for (n = min_add; n + 1 < node_a->num_ptrs; n++) {
491
492
493 if (node_a->ptrs[n].ptr == NULL) {
494
495
496 acl_include(&node_a->ptrs[n].values,
497 &node_a->ptrs[node_a->num_ptrs - 1].values,
498 0);
499 node_a->ptrs[n].ptr =
500 node_a->ptrs[node_a->num_ptrs - 1].ptr;
501
502
503
504
505
506 node_a->ptrs[node_a->num_ptrs - 1].ptr = NULL;
507 while (node_a->num_ptrs > 0 &&
508 node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
509 node_a->num_ptrs--;
510 }
511 }
512}
513
514static int
515acl_resolve_leaf(struct acl_build_context *context,
516 struct rte_acl_node *node_a,
517 struct rte_acl_node *node_b,
518 struct rte_acl_node **node_c)
519{
520 uint32_t n;
521 int combined_priority = ACL_PRIORITY_EQUAL;
522
523 for (n = 0; n < context->cfg.num_categories; n++) {
524 if (node_a->mrt->priority[n] != node_b->mrt->priority[n]) {
525 combined_priority |= (node_a->mrt->priority[n] >
526 node_b->mrt->priority[n]) ?
527 ACL_PRIORITY_NODE_A : ACL_PRIORITY_NODE_B;
528 }
529 }
530
531
532
533
534
535 if (combined_priority == ACL_PRIORITY_NODE_A ||
536 combined_priority == ACL_PRIORITY_EQUAL) {
537 *node_c = node_a;
538 return 0;
539 }
540
541
542
543
544
545 if (combined_priority == ACL_PRIORITY_NODE_B) {
546 *node_c = node_b;
547 return 0;
548 }
549
550
551
552
553
554
555
556 node_a->next = NULL;
557
558 *node_c = acl_dup_node(context, node_a);
559 for (n = 0; n < context->cfg.num_categories; n++) {
560 if ((*node_c)->mrt->priority[n] < node_b->mrt->priority[n]) {
561 (*node_c)->mrt->priority[n] = node_b->mrt->priority[n];
562 (*node_c)->mrt->results[n] = node_b->mrt->results[n];
563 }
564 }
565 return 0;
566}
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591static int
592acl_merge_trie(struct acl_build_context *context,
593 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
594 uint32_t level, struct rte_acl_node **return_c)
595{
596 uint32_t n, m, ptrs_c, ptrs_b;
597 uint32_t min_add_c, min_add_b;
598 int node_intersect_type;
599 struct rte_acl_bitset node_intersect;
600 struct rte_acl_node *node_c;
601 struct rte_acl_node *node_a_next;
602 int node_b_refs;
603 int node_a_refs;
604
605 node_c = node_a;
606 node_a_next = node_a->next;
607 min_add_c = 0;
608 min_add_b = 0;
609 node_a_refs = node_a->num_ptrs;
610 node_b_refs = 0;
611 node_intersect_type = 0;
612
613
614 if (node_a->match_flag != 0) {
615 acl_resolve_leaf(context, node_a, node_b, return_c);
616 return 0;
617 }
618
619
620
621
622
623
624 if (level > 0)
625 node_c = acl_dup_node(context, node_a);
626
627
628
629
630
631 node_intersect_type = acl_intersect_type(&node_c->values,
632 &node_b->values,
633 &node_intersect);
634
635 if (node_intersect_type & ACL_INTERSECT) {
636
637 min_add_b = node_b->min_add;
638 node_b->min_add = node_b->num_ptrs;
639 ptrs_b = node_b->num_ptrs;
640
641 min_add_c = node_c->min_add;
642 node_c->min_add = node_c->num_ptrs;
643 ptrs_c = node_c->num_ptrs;
644
645 for (n = 0; n < ptrs_c; n++) {
646 if (node_c->ptrs[n].ptr == NULL) {
647 node_a_refs--;
648 continue;
649 }
650 node_c->ptrs[n].ptr->next = NULL;
651 for (m = 0; m < ptrs_b; m++) {
652
653 struct rte_acl_bitset child_intersect;
654 int child_intersect_type;
655 struct rte_acl_node *child_node_c = NULL;
656
657 if (node_b->ptrs[m].ptr == NULL ||
658 node_c->ptrs[n].ptr ==
659 node_b->ptrs[m].ptr)
660 continue;
661
662 child_intersect_type = acl_intersect_type(
663 &node_c->ptrs[n].values,
664 &node_b->ptrs[m].values,
665 &child_intersect);
666
667 if ((child_intersect_type & ACL_INTERSECT) !=
668 0) {
669 if (acl_merge_trie(context,
670 node_c->ptrs[n].ptr,
671 node_b->ptrs[m].ptr,
672 level + 1,
673 &child_node_c))
674 return 1;
675
676 if (child_node_c != NULL &&
677 child_node_c !=
678 node_c->ptrs[n].ptr) {
679
680 node_b_refs++;
681
682
683
684
685
686
687 acl_add_ptr(context, node_c,
688 child_node_c,
689 &child_intersect);
690
691
692
693
694
695 node_a_refs += (child_node_c !=
696 node_b->ptrs[m].ptr);
697
698
699
700
701
702 if (!acl_exclude(
703 &node_c->ptrs[n].values,
704 &node_c->ptrs[n].values,
705 &child_intersect)) {
706 acl_deref_ptr(context,
707 node_c, n);
708 node_c->ptrs[n].ptr =
709 NULL;
710 node_a_refs--;
711 }
712 }
713 }
714 }
715 }
716
717
718 node_c->min_add = min_add_c;
719 acl_compact_node_ptrs(node_c);
720 node_b->min_add = min_add_b;
721 acl_compact_node_ptrs(node_b);
722 }
723
724
725
726
727 if ((node_intersect_type & ACL_INTERSECT_B) != 0) {
728 node_b_refs++;
729 for (m = 0; m < node_b->num_ptrs; m++)
730 if (node_b->ptrs[m].ptr != NULL)
731 acl_copy_ptr(context, node_c,
732 node_b, m, &node_intersect);
733 }
734
735
736
737
738
739
740 if (node_c != node_a && node_c != node_a_next) {
741
742
743
744
745
746 if (node_b_refs == 0) {
747 acl_free_node(context, node_c);
748 node_c = node_a;
749 } else {
750
751
752
753
754 if (node_a_refs == 0) {
755 acl_free_node(context, node_c);
756 node_c = node_b;
757 }
758 }
759 }
760
761 if (return_c != NULL)
762 *return_c = node_c;
763
764 if (level == 0)
765 acl_free_node(context, node_b);
766
767 return 0;
768}
769
770
771
772
773
774
775static void
776acl_build_reset(struct rte_acl_ctx *ctx)
777{
778 rte_free(ctx->mem);
779 memset(&ctx->num_categories, 0,
780 sizeof(*ctx) - offsetof(struct rte_acl_ctx, num_categories));
781}
782
783static void
784acl_gen_full_range(struct acl_build_context *context, struct rte_acl_node *root,
785 struct rte_acl_node *end, int size, int level)
786{
787 struct rte_acl_node *node, *prev;
788 uint32_t n;
789
790 prev = root;
791 for (n = size - 1; n > 0; n--) {
792 node = acl_alloc_node(context, level++);
793 acl_add_ptr_range(context, prev, node, 0, UINT8_MAX);
794 prev = node;
795 }
796 acl_add_ptr_range(context, prev, end, 0, UINT8_MAX);
797}
798
799static void
800acl_gen_range_mdl(struct acl_build_context *context, struct rte_acl_node *root,
801 struct rte_acl_node *end, uint8_t lo, uint8_t hi, int size, int level)
802{
803 struct rte_acl_node *node;
804
805 node = acl_alloc_node(context, level++);
806 acl_add_ptr_range(context, root, node, lo, hi);
807 acl_gen_full_range(context, node, end, size - 1, level);
808}
809
810static void
811acl_gen_range_low(struct acl_build_context *context, struct rte_acl_node *root,
812 struct rte_acl_node *end, const uint8_t *lo, int size, int level)
813{
814 struct rte_acl_node *node;
815 uint32_t n;
816
817 n = size - 1;
818 if (n == 0) {
819 acl_add_ptr_range(context, root, end, lo[0], UINT8_MAX);
820 return;
821 }
822
823 node = acl_alloc_node(context, level++);
824 acl_add_ptr_range(context, root, node, lo[n], lo[n]);
825
826
827 acl_gen_range_low(context, node, end, lo, n, level);
828
829
830 if (n > 1 && lo[n - 1] != UINT8_MAX)
831 acl_gen_range_mdl(context, node, end, lo[n - 1] + 1, UINT8_MAX,
832 n, level);
833}
834
835static void
836acl_gen_range_high(struct acl_build_context *context, struct rte_acl_node *root,
837 struct rte_acl_node *end, const uint8_t *hi, int size, int level)
838{
839 struct rte_acl_node *node;
840 uint32_t n;
841
842 n = size - 1;
843 if (n == 0) {
844 acl_add_ptr_range(context, root, end, 0, hi[0]);
845 return;
846 }
847
848 node = acl_alloc_node(context, level++);
849 acl_add_ptr_range(context, root, node, hi[n], hi[n]);
850
851
852 acl_gen_range_high(context, node, end, hi, n, level);
853
854
855 if (n > 1 && hi[n - 1] != 0)
856 acl_gen_range_mdl(context, node, end, 0, hi[n - 1] - 1,
857 n, level);
858}
859
860static struct rte_acl_node *
861acl_gen_range_trie(struct acl_build_context *context,
862 const void *min, const void *max,
863 int size, int level, struct rte_acl_node **pend)
864{
865 int32_t k, n;
866 uint8_t hi_ff, lo_00;
867 struct rte_acl_node *node, *prev, *root;
868 const uint8_t *lo;
869 const uint8_t *hi;
870
871 lo = min;
872 hi = max;
873
874 *pend = acl_alloc_node(context, level + size);
875 root = acl_alloc_node(context, level++);
876 prev = root;
877
878
879 for (n = size - 1; n > 0 && lo[n] == hi[n]; n--) {
880 node = acl_alloc_node(context, level++);
881 acl_add_ptr_range(context, prev, node, lo[n], hi[n]);
882 prev = node;
883 }
884
885
886 if (n == 0) {
887 acl_add_ptr_range(context, prev, *pend, lo[0], hi[0]);
888 return root;
889 }
890
891
892 lo_00 = 0;
893 hi_ff = UINT8_MAX;
894 for (k = n - 1; k >= 0; k--) {
895 hi_ff &= hi[k];
896 lo_00 |= lo[k];
897 }
898
899
900 if (lo_00 != 0)
901 acl_gen_range_low(context, prev, *pend, lo, n + 1, level);
902
903
904 if (hi_ff != UINT8_MAX)
905 acl_gen_range_high(context, prev, *pend, hi, n + 1, level);
906
907
908 if (lo[n] + 1 != hi[n] || lo_00 == 0 || hi_ff == UINT8_MAX) {
909 lo_00 = lo[n] + (lo_00 != 0);
910 hi_ff = hi[n] - (hi_ff != UINT8_MAX);
911 acl_gen_range_mdl(context, prev, *pend, lo_00, hi_ff,
912 n + 1, level);
913 }
914
915 return root;
916}
917
918static struct rte_acl_node *
919acl_gen_mask_trie(struct acl_build_context *context,
920 const void *value, const void *mask,
921 int size, int level, struct rte_acl_node **pend)
922{
923 int32_t n;
924 struct rte_acl_node *root;
925 struct rte_acl_node *node, *prev;
926 struct rte_acl_bitset bits;
927 const uint8_t *val = value;
928 const uint8_t *msk = mask;
929
930 root = acl_alloc_node(context, level++);
931 prev = root;
932
933 for (n = size - 1; n >= 0; n--) {
934 node = acl_alloc_node(context, level++);
935 acl_gen_mask(&bits, val[n] & msk[n], msk[n]);
936 acl_add_ptr(context, prev, node, &bits);
937 prev = node;
938 }
939
940 *pend = prev;
941 return root;
942}
943
944static struct rte_acl_node *
945build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head,
946 struct rte_acl_build_rule **last, uint32_t *count)
947{
948 uint32_t n, m;
949 int field_index, node_count;
950 struct rte_acl_node *trie;
951 struct rte_acl_build_rule *prev, *rule;
952 struct rte_acl_node *end, *merge, *root, *end_prev;
953 const struct rte_acl_field *fld;
954
955 prev = head;
956 rule = head;
957 *last = prev;
958
959 trie = acl_alloc_node(context, 0);
960
961 while (rule != NULL) {
962
963 root = acl_alloc_node(context, 0);
964
965 root->ref_count = 1;
966 end = root;
967
968 for (n = 0; n < rule->config->num_fields; n++) {
969
970 field_index = rule->config->defs[n].field_index;
971 fld = rule->f->field + field_index;
972 end_prev = end;
973
974
975 switch (rule->config->defs[n].type) {
976
977 case RTE_ACL_FIELD_TYPE_BITMASK:
978 merge = acl_gen_mask_trie(context,
979 &fld->value,
980 &fld->mask_range,
981 rule->config->defs[n].size,
982 end->level + 1,
983 &end);
984 break;
985
986 case RTE_ACL_FIELD_TYPE_MASK:
987 {
988
989
990
991
992 uint64_t mask;
993 mask = RTE_ACL_MASKLEN_TO_BITMASK(
994 fld->mask_range.u64,
995 rule->config->defs[n].size);
996
997
998 merge = acl_gen_mask_trie(context,
999 &fld->value,
1000 (char *)&mask,
1001 rule->config->defs[n].size,
1002 end->level + 1,
1003 &end);
1004 }
1005 break;
1006
1007 case RTE_ACL_FIELD_TYPE_RANGE:
1008 merge = acl_gen_range_trie(context,
1009 &rule->f->field[field_index].value,
1010 &rule->f->field[field_index].mask_range,
1011 rule->config->defs[n].size,
1012 end->level + 1,
1013 &end);
1014 break;
1015
1016 default:
1017 RTE_LOG(ERR, ACL,
1018 "Error in rule[%u] type - %hhu\n",
1019 rule->f->data.userdata,
1020 rule->config->defs[n].type);
1021 return NULL;
1022 }
1023
1024
1025 if (acl_merge_trie(context, end_prev, merge, 0,
1026 NULL) != 0) {
1027 return NULL;
1028 }
1029 }
1030
1031 end->match_flag = ++context->num_build_rules;
1032
1033
1034
1035
1036
1037 if (end->mrt == NULL)
1038 end->mrt = acl_build_alloc(context, 1,
1039 sizeof(*end->mrt));
1040
1041 for (m = context->cfg.num_categories; 0 != m--; ) {
1042 if (rule->f->data.category_mask & (1U << m)) {
1043 end->mrt->results[m] = rule->f->data.userdata;
1044 end->mrt->priority[m] = rule->f->data.priority;
1045 } else {
1046 end->mrt->results[m] = 0;
1047 end->mrt->priority[m] = 0;
1048 }
1049 }
1050
1051 node_count = context->num_nodes;
1052 (*count)++;
1053
1054
1055 if (acl_merge_trie(context, trie, root, 0, NULL))
1056 return NULL;
1057
1058 node_count = context->num_nodes - node_count;
1059 if (node_count > context->cur_node_max) {
1060 *last = prev;
1061 return trie;
1062 }
1063
1064 prev = rule;
1065 rule = rule->next;
1066 }
1067
1068 *last = NULL;
1069 return trie;
1070}
1071
1072static void
1073acl_calc_wildness(struct rte_acl_build_rule *head,
1074 const struct rte_acl_config *config)
1075{
1076 uint32_t n;
1077 struct rte_acl_build_rule *rule;
1078
1079 for (rule = head; rule != NULL; rule = rule->next) {
1080
1081 for (n = 0; n < config->num_fields; n++) {
1082
1083 double wild = 0;
1084 uint32_t bit_len = CHAR_BIT * config->defs[n].size;
1085 uint64_t msk_val = RTE_LEN2MASK(bit_len,
1086 typeof(msk_val));
1087 double size = bit_len;
1088 int field_index = config->defs[n].field_index;
1089 const struct rte_acl_field *fld = rule->f->field +
1090 field_index;
1091
1092 switch (rule->config->defs[n].type) {
1093 case RTE_ACL_FIELD_TYPE_BITMASK:
1094 wild = (size - __builtin_popcountll(
1095 fld->mask_range.u64 & msk_val)) /
1096 size;
1097 break;
1098
1099 case RTE_ACL_FIELD_TYPE_MASK:
1100 wild = (size - fld->mask_range.u32) / size;
1101 break;
1102
1103 case RTE_ACL_FIELD_TYPE_RANGE:
1104 wild = (fld->mask_range.u64 & msk_val) -
1105 (fld->value.u64 & msk_val);
1106 wild = wild / msk_val;
1107 break;
1108 }
1109
1110 rule->wildness[field_index] = (uint32_t)(wild * 100);
1111 }
1112 }
1113}
1114
1115static void
1116acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config)
1117{
1118 struct rte_acl_build_rule *rule;
1119 uint32_t n, m, fields_deactivated = 0;
1120 uint32_t start = 0, deactivate = 0;
1121 int tally[RTE_ACL_MAX_LEVELS][TALLY_NUM];
1122
1123 memset(tally, 0, sizeof(tally));
1124
1125 for (rule = head; rule != NULL; rule = rule->next) {
1126
1127 for (n = 0; n < config->num_fields; n++) {
1128 uint32_t field_index = config->defs[n].field_index;
1129
1130 tally[n][TALLY_0]++;
1131 for (m = 1; m < RTE_DIM(wild_limits); m++) {
1132 if (rule->wildness[field_index] >=
1133 wild_limits[m])
1134 tally[n][m]++;
1135 }
1136 }
1137
1138 for (n = config->num_fields - 1; n > 0; n--) {
1139 uint32_t field_index = config->defs[n].field_index;
1140
1141 if (rule->wildness[field_index] == 100)
1142 tally[n][TALLY_DEPTH]++;
1143 else
1144 break;
1145 }
1146 }
1147
1148
1149
1150
1151
1152 for (n = 1; n < config->num_fields; n++) {
1153 if (config->defs[n].input_index !=
1154 config->defs[n - 1].input_index) {
1155 for (m = start; m < n; m++)
1156 tally[m][TALLY_DEACTIVATED] = deactivate;
1157 fields_deactivated += deactivate;
1158 start = n;
1159 deactivate = 1;
1160 }
1161
1162
1163 if (tally[n][TALLY_100] != tally[n][TALLY_0])
1164 deactivate = 0;
1165 }
1166
1167 for (m = start; m < n; m++)
1168 tally[m][TALLY_DEACTIVATED] = deactivate;
1169
1170 fields_deactivated += deactivate;
1171
1172
1173 if (fields_deactivated) {
1174 uint32_t k, l = 0;
1175
1176 for (k = 0; k < config->num_fields; k++) {
1177 if (tally[k][TALLY_DEACTIVATED] == 0) {
1178 memmove(&tally[l][0], &tally[k][0],
1179 TALLY_NUM * sizeof(tally[0][0]));
1180 memmove(&config->defs[l++],
1181 &config->defs[k],
1182 sizeof(struct rte_acl_field_def));
1183 }
1184 }
1185 config->num_fields = l;
1186 }
1187}
1188
1189static int
1190rule_cmp_wildness(struct rte_acl_build_rule *r1, struct rte_acl_build_rule *r2)
1191{
1192 uint32_t n;
1193
1194 for (n = 1; n < r1->config->num_fields; n++) {
1195 int field_index = r1->config->defs[n].field_index;
1196
1197 if (r1->wildness[field_index] != r2->wildness[field_index])
1198 return r1->wildness[field_index] -
1199 r2->wildness[field_index];
1200 }
1201 return 0;
1202}
1203
1204
1205
1206
1207static void
1208rule_list_split(struct rte_acl_build_rule *source,
1209 struct rte_acl_build_rule **list_a,
1210 struct rte_acl_build_rule **list_b)
1211{
1212 struct rte_acl_build_rule *fast;
1213 struct rte_acl_build_rule *slow;
1214
1215 if (source == NULL || source->next == NULL) {
1216
1217 *list_a = source;
1218 *list_b = NULL;
1219 } else {
1220 slow = source;
1221 fast = source->next;
1222
1223 while (fast != NULL) {
1224 fast = fast->next;
1225 if (fast != NULL) {
1226 slow = slow->next;
1227 fast = fast->next;
1228 }
1229 }
1230
1231
1232 *list_a = source;
1233 *list_b = slow->next;
1234 slow->next = NULL;
1235 }
1236}
1237
1238
1239
1240
1241static struct rte_acl_build_rule *
1242rule_list_sorted_merge(struct rte_acl_build_rule *a,
1243 struct rte_acl_build_rule *b)
1244{
1245 struct rte_acl_build_rule *result = NULL;
1246 struct rte_acl_build_rule **last_next = &result;
1247
1248 while (1) {
1249 if (a == NULL) {
1250 *last_next = b;
1251 break;
1252 } else if (b == NULL) {
1253 *last_next = a;
1254 break;
1255 }
1256 if (rule_cmp_wildness(a, b) >= 0) {
1257 *last_next = a;
1258 last_next = &a->next;
1259 a = a->next;
1260 } else {
1261 *last_next = b;
1262 last_next = &b->next;
1263 b = b->next;
1264 }
1265 }
1266 return result;
1267}
1268
1269
1270
1271
1272
1273static struct rte_acl_build_rule *
1274sort_rules(struct rte_acl_build_rule *head)
1275{
1276 struct rte_acl_build_rule *a;
1277 struct rte_acl_build_rule *b;
1278
1279
1280 if (head == NULL || head->next == NULL)
1281 return head;
1282
1283
1284 rule_list_split(head, &a, &b);
1285
1286
1287 a = sort_rules(a);
1288 b = sort_rules(b);
1289
1290
1291 return rule_list_sorted_merge(a, b);
1292}
1293
1294static uint32_t
1295acl_build_index(const struct rte_acl_config *config, uint32_t *data_index)
1296{
1297 uint32_t n, m;
1298 int32_t last_header;
1299
1300 m = 0;
1301 last_header = -1;
1302
1303 for (n = 0; n < config->num_fields; n++) {
1304 if (last_header != config->defs[n].input_index) {
1305 last_header = config->defs[n].input_index;
1306 data_index[m++] = config->defs[n].offset;
1307 if (config->defs[n].size > sizeof(uint32_t))
1308 data_index[m++] = config->defs[n].offset +
1309 sizeof(uint32_t);
1310 }
1311 }
1312
1313 return m;
1314}
1315
1316static struct rte_acl_build_rule *
1317build_one_trie(struct acl_build_context *context,
1318 struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES],
1319 uint32_t n, int32_t node_max)
1320{
1321 struct rte_acl_build_rule *last;
1322 struct rte_acl_config *config;
1323
1324 config = rule_sets[n]->config;
1325
1326 acl_rule_stats(rule_sets[n], config);
1327 rule_sets[n] = sort_rules(rule_sets[n]);
1328
1329 context->tries[n].type = RTE_ACL_FULL_TRIE;
1330 context->tries[n].count = 0;
1331
1332 context->tries[n].num_data_indexes = acl_build_index(config,
1333 context->data_indexes[n]);
1334 context->tries[n].data_index = context->data_indexes[n];
1335
1336 context->cur_node_max = node_max;
1337
1338 context->bld_tries[n].trie = build_trie(context, rule_sets[n],
1339 &last, &context->tries[n].count);
1340
1341 return last;
1342}
1343
1344static int
1345acl_build_tries(struct acl_build_context *context,
1346 struct rte_acl_build_rule *head)
1347{
1348 uint32_t n, num_tries;
1349 struct rte_acl_config *config;
1350 struct rte_acl_build_rule *last;
1351 struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES];
1352
1353 config = head->config;
1354 rule_sets[0] = head;
1355
1356
1357 for (n = 0; n < RTE_DIM(context->tries); n++) {
1358 context->tries[n].type = RTE_ACL_UNUSED_TRIE;
1359 context->bld_tries[n].trie = NULL;
1360 context->tries[n].count = 0;
1361 }
1362
1363 context->tries[0].type = RTE_ACL_FULL_TRIE;
1364
1365
1366 acl_calc_wildness(head, config);
1367
1368 for (n = 0;; n = num_tries) {
1369
1370 num_tries = n + 1;
1371
1372 last = build_one_trie(context, rule_sets, n, context->node_max);
1373 if (context->bld_tries[n].trie == NULL) {
1374 RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
1375 return -ENOMEM;
1376 }
1377
1378
1379 if (last == NULL)
1380 break;
1381
1382 if (num_tries == RTE_DIM(context->tries)) {
1383 RTE_LOG(ERR, ACL,
1384 "Exceeded max number of tries: %u\n",
1385 num_tries);
1386 return -ENOMEM;
1387 }
1388
1389
1390 rule_sets[num_tries] = last->next;
1391 last->next = NULL;
1392 acl_free_node(context, context->bld_tries[n].trie);
1393
1394
1395 config = acl_build_alloc(context, 1, sizeof(*config));
1396 memcpy(config, rule_sets[n]->config, sizeof(*config));
1397
1398
1399 for (head = rule_sets[num_tries]; head != NULL;
1400 head = head->next)
1401 head->config = config;
1402
1403
1404
1405
1406
1407 last = build_one_trie(context, rule_sets, n, INT32_MAX);
1408 if (context->bld_tries[n].trie == NULL || last != NULL) {
1409 RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
1410 return -ENOMEM;
1411 }
1412
1413 }
1414
1415 context->num_tries = num_tries;
1416 return 0;
1417}
1418
1419static void
1420acl_build_log(const struct acl_build_context *ctx)
1421{
1422 uint32_t n;
1423
1424 RTE_LOG(DEBUG, ACL, "Build phase for ACL \"%s\":\n"
1425 "node limit for tree split: %u\n"
1426 "nodes created: %u\n"
1427 "memory consumed: %zu\n",
1428 ctx->acx->name,
1429 ctx->node_max,
1430 ctx->num_nodes,
1431 ctx->pool.alloc);
1432
1433 for (n = 0; n < RTE_DIM(ctx->tries); n++) {
1434 if (ctx->tries[n].count != 0)
1435 RTE_LOG(DEBUG, ACL,
1436 "trie %u: number of rules: %u, indexes: %u\n",
1437 n, ctx->tries[n].count,
1438 ctx->tries[n].num_data_indexes);
1439 }
1440}
1441
1442static int
1443acl_build_rules(struct acl_build_context *bcx)
1444{
1445 struct rte_acl_build_rule *br, *head;
1446 const struct rte_acl_rule *rule;
1447 uint32_t *wp;
1448 uint32_t fn, i, n, num;
1449 size_t ofs, sz;
1450
1451 fn = bcx->cfg.num_fields;
1452 n = bcx->acx->num_rules;
1453 ofs = n * sizeof(*br);
1454 sz = ofs + n * fn * sizeof(*wp);
1455
1456 br = tb_alloc(&bcx->pool, sz);
1457
1458 wp = (uint32_t *)((uintptr_t)br + ofs);
1459 num = 0;
1460 head = NULL;
1461
1462 for (i = 0; i != n; i++) {
1463 rule = (const struct rte_acl_rule *)
1464 ((uintptr_t)bcx->acx->rules + bcx->acx->rule_sz * i);
1465 if ((rule->data.category_mask & bcx->category_mask) != 0) {
1466 br[num].next = head;
1467 br[num].config = &bcx->cfg;
1468 br[num].f = rule;
1469 br[num].wildness = wp;
1470 wp += fn;
1471 head = br + num;
1472 num++;
1473 }
1474 }
1475
1476 bcx->num_rules = num;
1477 bcx->build_rules = head;
1478
1479 return 0;
1480}
1481
1482
1483
1484
1485static void
1486acl_set_data_indexes(struct rte_acl_ctx *ctx)
1487{
1488 uint32_t i, n, ofs;
1489
1490 ofs = 0;
1491 for (i = 0; i != ctx->num_tries; i++) {
1492 n = ctx->trie[i].num_data_indexes;
1493 memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index,
1494 n * sizeof(ctx->data_indexes[0]));
1495 ctx->trie[i].data_index = ctx->data_indexes + ofs;
1496 ofs += ACL_MAX_INDEXES;
1497 }
1498}
1499
1500
1501
1502
1503
1504
1505
1506static int
1507acl_bld(struct acl_build_context *bcx, struct rte_acl_ctx *ctx,
1508 const struct rte_acl_config *cfg, uint32_t node_max)
1509{
1510 int32_t rc;
1511
1512
1513 memset(bcx, 0, sizeof(*bcx));
1514 bcx->acx = ctx;
1515 bcx->pool.alignment = ACL_POOL_ALIGN;
1516 bcx->pool.min_alloc = ACL_POOL_ALLOC_MIN;
1517 bcx->cfg = *cfg;
1518 bcx->category_mask = RTE_LEN2MASK(bcx->cfg.num_categories,
1519 typeof(bcx->category_mask));
1520 bcx->node_max = node_max;
1521
1522 rc = sigsetjmp(bcx->pool.fail, 0);
1523
1524
1525 if (rc != 0) {
1526 RTE_LOG(ERR, ACL,
1527 "ACL context: %s, %s() failed with error code: %d\n",
1528 bcx->acx->name, __func__, rc);
1529 return rc;
1530 }
1531
1532
1533 rc = acl_build_rules(bcx);
1534 if (rc != 0)
1535 return rc;
1536
1537
1538 if (bcx->build_rules == NULL) {
1539 rc = -EINVAL;
1540 } else {
1541
1542 rc = acl_build_tries(bcx, bcx->build_rules);
1543 }
1544 return rc;
1545}
1546
1547
1548
1549
1550static int
1551acl_check_bld_param(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
1552{
1553 static const size_t field_sizes[] = {
1554 sizeof(uint8_t), sizeof(uint16_t),
1555 sizeof(uint32_t), sizeof(uint64_t),
1556 };
1557
1558 uint32_t i, j;
1559
1560 if (ctx == NULL || cfg == NULL || cfg->num_categories == 0 ||
1561 cfg->num_categories > RTE_ACL_MAX_CATEGORIES ||
1562 cfg->num_fields == 0 ||
1563 cfg->num_fields > RTE_ACL_MAX_FIELDS)
1564 return -EINVAL;
1565
1566 for (i = 0; i != cfg->num_fields; i++) {
1567 if (cfg->defs[i].type > RTE_ACL_FIELD_TYPE_BITMASK) {
1568 RTE_LOG(ERR, ACL,
1569 "ACL context: %s, invalid type: %hhu for %u-th field\n",
1570 ctx->name, cfg->defs[i].type, i);
1571 return -EINVAL;
1572 }
1573 for (j = 0;
1574 j != RTE_DIM(field_sizes) &&
1575 cfg->defs[i].size != field_sizes[j];
1576 j++)
1577 ;
1578
1579 if (j == RTE_DIM(field_sizes)) {
1580 RTE_LOG(ERR, ACL,
1581 "ACL context: %s, invalid size: %hhu for %u-th field\n",
1582 ctx->name, cfg->defs[i].size, i);
1583 return -EINVAL;
1584 }
1585 }
1586
1587 return 0;
1588}
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603static uint32_t
1604get_first_load_size(const struct rte_acl_config *cfg)
1605{
1606 uint32_t i, max_ofs, ofs;
1607
1608 ofs = 0;
1609 max_ofs = 0;
1610
1611 for (i = 0; i != cfg->num_fields; i++) {
1612 if (cfg->defs[i].field_index == 0)
1613 ofs = cfg->defs[i].offset;
1614 else if (max_ofs < cfg->defs[i].offset)
1615 max_ofs = cfg->defs[i].offset;
1616 }
1617
1618 return (ofs < max_ofs) ? sizeof(uint32_t) : sizeof(uint8_t);
1619}
1620
1621int
1622rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
1623{
1624 int32_t rc;
1625 uint32_t n;
1626 size_t max_size;
1627 struct acl_build_context bcx;
1628
1629 rc = acl_check_bld_param(ctx, cfg);
1630 if (rc != 0)
1631 return rc;
1632
1633 acl_build_reset(ctx);
1634
1635 if (cfg->max_size == 0) {
1636 n = NODE_MIN;
1637 max_size = SIZE_MAX;
1638 } else {
1639 n = NODE_MAX;
1640 max_size = cfg->max_size;
1641 }
1642
1643 for (rc = -ERANGE; n >= NODE_MIN && rc == -ERANGE; n /= 2) {
1644
1645
1646 rc = acl_bld(&bcx, ctx, cfg, n);
1647
1648 if (rc == 0) {
1649
1650 rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
1651 bcx.num_tries, bcx.cfg.num_categories,
1652 ACL_MAX_INDEXES * RTE_DIM(bcx.tries) *
1653 sizeof(ctx->data_indexes[0]), max_size);
1654 if (rc == 0) {
1655
1656 acl_set_data_indexes(ctx);
1657
1658
1659 ctx->first_load_sz = get_first_load_size(cfg);
1660
1661
1662 ctx->config = *cfg;
1663 }
1664 }
1665
1666 acl_build_log(&bcx);
1667
1668
1669 tb_free_pool(&bcx.pool);
1670 }
1671
1672 return rc;
1673}
1674