1
2
3
4
5#include <sys/queue.h>
6#include <stdalign.h>
7#include <stdint.h>
8#include <string.h>
9#include <unistd.h>
10
11#include <rte_common.h>
12#include <rte_ether.h>
13#include <ethdev_driver.h>
14#include <rte_flow.h>
15#include <rte_flow_driver.h>
16#include <rte_malloc.h>
17#include <rte_cycles.h>
18#include <rte_bus_pci.h>
19#include <rte_ip.h>
20#include <rte_gre.h>
21#include <rte_vxlan.h>
22#include <rte_gtp.h>
23#include <rte_eal_paging.h>
24#include <rte_mpls.h>
25#include <rte_mtr.h>
26#include <rte_mtr_driver.h>
27#include <rte_tailq.h>
28
29#include <mlx5_glue.h>
30#include <mlx5_devx_cmds.h>
31#include <mlx5_prm.h>
32#include <mlx5_malloc.h>
33
34#include "mlx5_defs.h"
35#include "mlx5.h"
36#include "mlx5_common_os.h"
37#include "mlx5_flow.h"
38#include "mlx5_flow_os.h"
39#include "mlx5_rx.h"
40#include "mlx5_tx.h"
41#include "rte_pmd_mlx5.h"
42
43#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47#endif
48
49#ifndef HAVE_MLX5DV_DR_ESWITCH
50#ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51#define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52#endif
53#endif
54
55#ifndef HAVE_MLX5DV_DR
56#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57#endif
58
59
60#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62#define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63#define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64#define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66union flow_dv_attr {
67 struct {
68 uint32_t valid:1;
69 uint32_t ipv4:1;
70 uint32_t ipv6:1;
71 uint32_t tcp:1;
72 uint32_t udp:1;
73 uint32_t reserved:27;
74 };
75 uint32_t attr;
76};
77
78static int
79flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80 struct mlx5_flow_tbl_resource *tbl);
81
82static int
83flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84 uint32_t encap_decap_idx);
85
86static int
87flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88 uint32_t port_id);
89static void
90flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92static int
93flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94 uint32_t rix_jump);
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111static void
112flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
113 struct mlx5_flow *dev_flow, bool tunnel_decap)
114{
115 uint64_t layers = dev_flow->handle->layers;
116
117
118
119
120
121
122
123 if (layers) {
124 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
125 attr->ipv4 = 1;
126 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
127 attr->ipv6 = 1;
128 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
129 attr->tcp = 1;
130 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
131 attr->udp = 1;
132 attr->valid = 1;
133 return;
134 }
135 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
136 uint8_t next_protocol = 0xff;
137 switch (item->type) {
138 case RTE_FLOW_ITEM_TYPE_GRE:
139 case RTE_FLOW_ITEM_TYPE_NVGRE:
140 case RTE_FLOW_ITEM_TYPE_VXLAN:
141 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
142 case RTE_FLOW_ITEM_TYPE_GENEVE:
143 case RTE_FLOW_ITEM_TYPE_MPLS:
144 case RTE_FLOW_ITEM_TYPE_GTP:
145 if (tunnel_decap)
146 attr->attr = 0;
147 break;
148 case RTE_FLOW_ITEM_TYPE_IPV4:
149 if (!attr->ipv6)
150 attr->ipv4 = 1;
151 if (item->mask != NULL &&
152 ((const struct rte_flow_item_ipv4 *)
153 item->mask)->hdr.next_proto_id)
154 next_protocol =
155 ((const struct rte_flow_item_ipv4 *)
156 (item->spec))->hdr.next_proto_id &
157 ((const struct rte_flow_item_ipv4 *)
158 (item->mask))->hdr.next_proto_id;
159 if ((next_protocol == IPPROTO_IPIP ||
160 next_protocol == IPPROTO_IPV6) && tunnel_decap)
161 attr->attr = 0;
162 break;
163 case RTE_FLOW_ITEM_TYPE_IPV6:
164 if (!attr->ipv4)
165 attr->ipv6 = 1;
166 if (item->mask != NULL &&
167 ((const struct rte_flow_item_ipv6 *)
168 item->mask)->hdr.proto)
169 next_protocol =
170 ((const struct rte_flow_item_ipv6 *)
171 (item->spec))->hdr.proto &
172 ((const struct rte_flow_item_ipv6 *)
173 (item->mask))->hdr.proto;
174 if ((next_protocol == IPPROTO_IPIP ||
175 next_protocol == IPPROTO_IPV6) && tunnel_decap)
176 attr->attr = 0;
177 break;
178 case RTE_FLOW_ITEM_TYPE_UDP:
179 if (!attr->tcp)
180 attr->udp = 1;
181 break;
182 case RTE_FLOW_ITEM_TYPE_TCP:
183 if (!attr->udp)
184 attr->tcp = 1;
185 break;
186 default:
187 break;
188 }
189 }
190 attr->valid = 1;
191}
192
193
194
195
196
197
198
199
200
201
202static inline int
203rte_col_2_mlx5_col(enum rte_color rcol)
204{
205 switch (rcol) {
206 case RTE_COLOR_GREEN:
207 return MLX5_FLOW_COLOR_GREEN;
208 case RTE_COLOR_YELLOW:
209 return MLX5_FLOW_COLOR_YELLOW;
210 case RTE_COLOR_RED:
211 return MLX5_FLOW_COLOR_RED;
212 default:
213 break;
214 }
215 return MLX5_FLOW_COLOR_UNDEFINED;
216}
217
218struct field_modify_info {
219 uint32_t size;
220 uint32_t offset;
221 enum mlx5_modification_field id;
222};
223
224struct field_modify_info modify_eth[] = {
225 {4, 0, MLX5_MODI_OUT_DMAC_47_16},
226 {2, 4, MLX5_MODI_OUT_DMAC_15_0},
227 {4, 6, MLX5_MODI_OUT_SMAC_47_16},
228 {2, 10, MLX5_MODI_OUT_SMAC_15_0},
229 {0, 0, 0},
230};
231
232struct field_modify_info modify_vlan_out_first_vid[] = {
233
234 {12, 0, MLX5_MODI_OUT_FIRST_VID},
235 {0, 0, 0},
236};
237
238struct field_modify_info modify_ipv4[] = {
239 {1, 1, MLX5_MODI_OUT_IP_DSCP},
240 {1, 8, MLX5_MODI_OUT_IPV4_TTL},
241 {4, 12, MLX5_MODI_OUT_SIPV4},
242 {4, 16, MLX5_MODI_OUT_DIPV4},
243 {0, 0, 0},
244};
245
246struct field_modify_info modify_ipv6[] = {
247 {1, 0, MLX5_MODI_OUT_IP_DSCP},
248 {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
249 {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
250 {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
251 {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
252 {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
253 {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
254 {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
255 {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
256 {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
257 {0, 0, 0},
258};
259
260struct field_modify_info modify_udp[] = {
261 {2, 0, MLX5_MODI_OUT_UDP_SPORT},
262 {2, 2, MLX5_MODI_OUT_UDP_DPORT},
263 {0, 0, 0},
264};
265
266struct field_modify_info modify_tcp[] = {
267 {2, 0, MLX5_MODI_OUT_TCP_SPORT},
268 {2, 2, MLX5_MODI_OUT_TCP_DPORT},
269 {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
270 {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
271 {0, 0, 0},
272};
273
274static void
275mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
276 uint8_t next_protocol, uint64_t *item_flags,
277 int *tunnel)
278{
279 MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
280 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
281 if (next_protocol == IPPROTO_IPIP) {
282 *item_flags |= MLX5_FLOW_LAYER_IPIP;
283 *tunnel = 1;
284 }
285 if (next_protocol == IPPROTO_IPV6) {
286 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
287 *tunnel = 1;
288 }
289}
290
291static inline struct mlx5_hlist *
292flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
293 const char *name, uint32_t size, bool direct_key,
294 bool lcores_share, void *ctx,
295 mlx5_list_create_cb cb_create,
296 mlx5_list_match_cb cb_match,
297 mlx5_list_remove_cb cb_remove,
298 mlx5_list_clone_cb cb_clone,
299 mlx5_list_clone_free_cb cb_clone_free,
300 struct rte_flow_error *error)
301{
302 struct mlx5_hlist *hl;
303 struct mlx5_hlist *expected = NULL;
304 char s[MLX5_NAME_SIZE];
305
306 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
307 if (likely(hl))
308 return hl;
309 snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
310 hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
311 ctx, cb_create, cb_match, cb_remove, cb_clone,
312 cb_clone_free);
313 if (!hl) {
314 DRV_LOG(ERR, "%s hash creation failed", name);
315 rte_flow_error_set(error, ENOMEM,
316 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
317 "cannot allocate resource memory");
318 return NULL;
319 }
320 if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
321 __ATOMIC_SEQ_CST,
322 __ATOMIC_SEQ_CST)) {
323 mlx5_hlist_destroy(hl);
324 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
325 }
326 return hl;
327}
328
329
330
331
332
333
334
335
336static void
337mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
338 struct rte_vlan_hdr *vlan)
339{
340 uint16_t vlan_tci;
341 if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
342 vlan_tci =
343 ((const struct rte_flow_action_of_set_vlan_pcp *)
344 action->conf)->vlan_pcp;
345 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
346 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
347 vlan->vlan_tci |= vlan_tci;
348 } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
349 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
350 vlan->vlan_tci |= rte_be_to_cpu_16
351 (((const struct rte_flow_action_of_set_vlan_vid *)
352 action->conf)->vlan_vid);
353 }
354}
355
356
357
358
359
360
361
362
363
364
365
366
367
368static inline uint32_t
369flow_dv_fetch_field(const uint8_t *data, uint32_t size)
370{
371 uint32_t ret;
372
373 switch (size) {
374 case 1:
375 ret = *data;
376 break;
377 case 2:
378 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
379 break;
380 case 3:
381 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
382 ret = (ret << 8) | *(data + sizeof(uint16_t));
383 break;
384 case 4:
385 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
386 break;
387 default:
388 MLX5_ASSERT(false);
389 ret = 0;
390 break;
391 }
392 return ret;
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423static int
424flow_dv_convert_modify_action(struct rte_flow_item *item,
425 struct field_modify_info *field,
426 struct field_modify_info *dcopy,
427 struct mlx5_flow_dv_modify_hdr_resource *resource,
428 uint32_t type, struct rte_flow_error *error)
429{
430 uint32_t i = resource->actions_num;
431 struct mlx5_modification_cmd *actions = resource->actions;
432 uint32_t carry_b = 0;
433
434
435
436
437
438
439 MLX5_ASSERT(item->mask);
440 MLX5_ASSERT(field->size);
441 do {
442 uint32_t size_b;
443 uint32_t off_b;
444 uint32_t mask;
445 uint32_t data;
446 bool next_field = true;
447 bool next_dcopy = true;
448
449 if (i >= MLX5_MAX_MODIFY_NUM)
450 return rte_flow_error_set(error, EINVAL,
451 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
452 "too many items to modify");
453
454 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
455 field->offset, field->size);
456 if (!mask) {
457 ++field;
458 continue;
459 }
460
461 off_b = rte_bsf32(mask) + carry_b;
462 size_b = sizeof(uint32_t) * CHAR_BIT -
463 off_b - __builtin_clz(mask);
464 MLX5_ASSERT(size_b);
465 actions[i] = (struct mlx5_modification_cmd) {
466 .action_type = type,
467 .field = field->id,
468 .offset = off_b,
469 .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
470 0 : size_b,
471 };
472 if (type == MLX5_MODIFICATION_TYPE_COPY) {
473 MLX5_ASSERT(dcopy);
474 actions[i].dst_field = dcopy->id;
475 actions[i].dst_offset =
476 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
477
478 actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
479
480
481
482
483 carry_b = 0;
484 if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
485 dcopy->size != 0) {
486 actions[i].length =
487 dcopy->size * CHAR_BIT - dcopy->offset;
488 carry_b = actions[i].length;
489 next_field = false;
490 }
491
492
493
494
495 if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
496 (size_b == field->size * CHAR_BIT - off_b)) {
497 actions[i].length =
498 field->size * CHAR_BIT - off_b;
499 dcopy->offset += actions[i].length;
500 next_dcopy = false;
501 }
502 if (next_dcopy)
503 ++dcopy;
504 } else {
505 MLX5_ASSERT(item->spec);
506 data = flow_dv_fetch_field((const uint8_t *)item->spec +
507 field->offset, field->size);
508
509 data = (data & mask) >> off_b;
510 actions[i].data1 = rte_cpu_to_be_32(data);
511 }
512
513 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
514 if (next_field)
515 ++field;
516 ++i;
517 } while (field->size);
518 if (resource->actions_num == i)
519 return rte_flow_error_set(error, EINVAL,
520 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
521 "invalid modification flow item");
522 resource->actions_num = i;
523 return 0;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539static int
540flow_dv_convert_action_modify_ipv4
541 (struct mlx5_flow_dv_modify_hdr_resource *resource,
542 const struct rte_flow_action *action,
543 struct rte_flow_error *error)
544{
545 const struct rte_flow_action_set_ipv4 *conf =
546 (const struct rte_flow_action_set_ipv4 *)(action->conf);
547 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
548 struct rte_flow_item_ipv4 ipv4;
549 struct rte_flow_item_ipv4 ipv4_mask;
550
551 memset(&ipv4, 0, sizeof(ipv4));
552 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
553 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
554 ipv4.hdr.src_addr = conf->ipv4_addr;
555 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
556 } else {
557 ipv4.hdr.dst_addr = conf->ipv4_addr;
558 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
559 }
560 item.spec = &ipv4;
561 item.mask = &ipv4_mask;
562 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
563 MLX5_MODIFICATION_TYPE_SET, error);
564}
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static int
580flow_dv_convert_action_modify_ipv6
581 (struct mlx5_flow_dv_modify_hdr_resource *resource,
582 const struct rte_flow_action *action,
583 struct rte_flow_error *error)
584{
585 const struct rte_flow_action_set_ipv6 *conf =
586 (const struct rte_flow_action_set_ipv6 *)(action->conf);
587 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
588 struct rte_flow_item_ipv6 ipv6;
589 struct rte_flow_item_ipv6 ipv6_mask;
590
591 memset(&ipv6, 0, sizeof(ipv6));
592 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
593 if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
594 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
595 sizeof(ipv6.hdr.src_addr));
596 memcpy(&ipv6_mask.hdr.src_addr,
597 &rte_flow_item_ipv6_mask.hdr.src_addr,
598 sizeof(ipv6.hdr.src_addr));
599 } else {
600 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
601 sizeof(ipv6.hdr.dst_addr));
602 memcpy(&ipv6_mask.hdr.dst_addr,
603 &rte_flow_item_ipv6_mask.hdr.dst_addr,
604 sizeof(ipv6.hdr.dst_addr));
605 }
606 item.spec = &ipv6;
607 item.mask = &ipv6_mask;
608 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
609 MLX5_MODIFICATION_TYPE_SET, error);
610}
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625static int
626flow_dv_convert_action_modify_mac
627 (struct mlx5_flow_dv_modify_hdr_resource *resource,
628 const struct rte_flow_action *action,
629 struct rte_flow_error *error)
630{
631 const struct rte_flow_action_set_mac *conf =
632 (const struct rte_flow_action_set_mac *)(action->conf);
633 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
634 struct rte_flow_item_eth eth;
635 struct rte_flow_item_eth eth_mask;
636
637 memset(ð, 0, sizeof(eth));
638 memset(ð_mask, 0, sizeof(eth_mask));
639 if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
640 memcpy(ð.src.addr_bytes, &conf->mac_addr,
641 sizeof(eth.src.addr_bytes));
642 memcpy(ð_mask.src.addr_bytes,
643 &rte_flow_item_eth_mask.src.addr_bytes,
644 sizeof(eth_mask.src.addr_bytes));
645 } else {
646 memcpy(ð.dst.addr_bytes, &conf->mac_addr,
647 sizeof(eth.dst.addr_bytes));
648 memcpy(ð_mask.dst.addr_bytes,
649 &rte_flow_item_eth_mask.dst.addr_bytes,
650 sizeof(eth_mask.dst.addr_bytes));
651 }
652 item.spec = ð
653 item.mask = ð_mask;
654 return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
655 MLX5_MODIFICATION_TYPE_SET, error);
656}
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671static int
672flow_dv_convert_action_modify_vlan_vid
673 (struct mlx5_flow_dv_modify_hdr_resource *resource,
674 const struct rte_flow_action *action,
675 struct rte_flow_error *error)
676{
677 const struct rte_flow_action_of_set_vlan_vid *conf =
678 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
679 int i = resource->actions_num;
680 struct mlx5_modification_cmd *actions = resource->actions;
681 struct field_modify_info *field = modify_vlan_out_first_vid;
682
683 if (i >= MLX5_MAX_MODIFY_NUM)
684 return rte_flow_error_set(error, EINVAL,
685 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
686 "too many items to modify");
687 actions[i] = (struct mlx5_modification_cmd) {
688 .action_type = MLX5_MODIFICATION_TYPE_SET,
689 .field = field->id,
690 .length = field->size,
691 .offset = field->offset,
692 };
693 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
694 actions[i].data1 = conf->vlan_vid;
695 actions[i].data1 = actions[i].data1 << 16;
696 resource->actions_num = ++i;
697 return 0;
698}
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721static int
722flow_dv_convert_action_modify_tp
723 (struct mlx5_flow_dv_modify_hdr_resource *resource,
724 const struct rte_flow_action *action,
725 const struct rte_flow_item *items,
726 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
727 bool tunnel_decap, struct rte_flow_error *error)
728{
729 const struct rte_flow_action_set_tp *conf =
730 (const struct rte_flow_action_set_tp *)(action->conf);
731 struct rte_flow_item item;
732 struct rte_flow_item_udp udp;
733 struct rte_flow_item_udp udp_mask;
734 struct rte_flow_item_tcp tcp;
735 struct rte_flow_item_tcp tcp_mask;
736 struct field_modify_info *field;
737
738 if (!attr->valid)
739 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
740 if (attr->udp) {
741 memset(&udp, 0, sizeof(udp));
742 memset(&udp_mask, 0, sizeof(udp_mask));
743 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
744 udp.hdr.src_port = conf->port;
745 udp_mask.hdr.src_port =
746 rte_flow_item_udp_mask.hdr.src_port;
747 } else {
748 udp.hdr.dst_port = conf->port;
749 udp_mask.hdr.dst_port =
750 rte_flow_item_udp_mask.hdr.dst_port;
751 }
752 item.type = RTE_FLOW_ITEM_TYPE_UDP;
753 item.spec = &udp;
754 item.mask = &udp_mask;
755 field = modify_udp;
756 } else {
757 MLX5_ASSERT(attr->tcp);
758 memset(&tcp, 0, sizeof(tcp));
759 memset(&tcp_mask, 0, sizeof(tcp_mask));
760 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
761 tcp.hdr.src_port = conf->port;
762 tcp_mask.hdr.src_port =
763 rte_flow_item_tcp_mask.hdr.src_port;
764 } else {
765 tcp.hdr.dst_port = conf->port;
766 tcp_mask.hdr.dst_port =
767 rte_flow_item_tcp_mask.hdr.dst_port;
768 }
769 item.type = RTE_FLOW_ITEM_TYPE_TCP;
770 item.spec = &tcp;
771 item.mask = &tcp_mask;
772 field = modify_tcp;
773 }
774 return flow_dv_convert_modify_action(&item, field, NULL, resource,
775 MLX5_MODIFICATION_TYPE_SET, error);
776}
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799static int
800flow_dv_convert_action_modify_ttl
801 (struct mlx5_flow_dv_modify_hdr_resource *resource,
802 const struct rte_flow_action *action,
803 const struct rte_flow_item *items,
804 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
805 bool tunnel_decap, struct rte_flow_error *error)
806{
807 const struct rte_flow_action_set_ttl *conf =
808 (const struct rte_flow_action_set_ttl *)(action->conf);
809 struct rte_flow_item item;
810 struct rte_flow_item_ipv4 ipv4;
811 struct rte_flow_item_ipv4 ipv4_mask;
812 struct rte_flow_item_ipv6 ipv6;
813 struct rte_flow_item_ipv6 ipv6_mask;
814 struct field_modify_info *field;
815
816 if (!attr->valid)
817 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
818 if (attr->ipv4) {
819 memset(&ipv4, 0, sizeof(ipv4));
820 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
821 ipv4.hdr.time_to_live = conf->ttl_value;
822 ipv4_mask.hdr.time_to_live = 0xFF;
823 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
824 item.spec = &ipv4;
825 item.mask = &ipv4_mask;
826 field = modify_ipv4;
827 } else {
828 MLX5_ASSERT(attr->ipv6);
829 memset(&ipv6, 0, sizeof(ipv6));
830 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
831 ipv6.hdr.hop_limits = conf->ttl_value;
832 ipv6_mask.hdr.hop_limits = 0xFF;
833 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
834 item.spec = &ipv6;
835 item.mask = &ipv6_mask;
836 field = modify_ipv6;
837 }
838 return flow_dv_convert_modify_action(&item, field, NULL, resource,
839 MLX5_MODIFICATION_TYPE_SET, error);
840}
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863static int
864flow_dv_convert_action_modify_dec_ttl
865 (struct mlx5_flow_dv_modify_hdr_resource *resource,
866 const struct rte_flow_item *items,
867 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
868 bool tunnel_decap, struct rte_flow_error *error)
869{
870 struct rte_flow_item item;
871 struct rte_flow_item_ipv4 ipv4;
872 struct rte_flow_item_ipv4 ipv4_mask;
873 struct rte_flow_item_ipv6 ipv6;
874 struct rte_flow_item_ipv6 ipv6_mask;
875 struct field_modify_info *field;
876
877 if (!attr->valid)
878 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
879 if (attr->ipv4) {
880 memset(&ipv4, 0, sizeof(ipv4));
881 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
882 ipv4.hdr.time_to_live = 0xFF;
883 ipv4_mask.hdr.time_to_live = 0xFF;
884 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
885 item.spec = &ipv4;
886 item.mask = &ipv4_mask;
887 field = modify_ipv4;
888 } else {
889 MLX5_ASSERT(attr->ipv6);
890 memset(&ipv6, 0, sizeof(ipv6));
891 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
892 ipv6.hdr.hop_limits = 0xFF;
893 ipv6_mask.hdr.hop_limits = 0xFF;
894 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
895 item.spec = &ipv6;
896 item.mask = &ipv6_mask;
897 field = modify_ipv6;
898 }
899 return flow_dv_convert_modify_action(&item, field, NULL, resource,
900 MLX5_MODIFICATION_TYPE_ADD, error);
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static int
918flow_dv_convert_action_modify_tcp_seq
919 (struct mlx5_flow_dv_modify_hdr_resource *resource,
920 const struct rte_flow_action *action,
921 struct rte_flow_error *error)
922{
923 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
924 uint64_t value = rte_be_to_cpu_32(*conf);
925 struct rte_flow_item item;
926 struct rte_flow_item_tcp tcp;
927 struct rte_flow_item_tcp tcp_mask;
928
929 memset(&tcp, 0, sizeof(tcp));
930 memset(&tcp_mask, 0, sizeof(tcp_mask));
931 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
932
933
934
935
936
937
938 value *= UINT32_MAX;
939 tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
940 tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
941 item.type = RTE_FLOW_ITEM_TYPE_TCP;
942 item.spec = &tcp;
943 item.mask = &tcp_mask;
944 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
945 MLX5_MODIFICATION_TYPE_ADD, error);
946}
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962static int
963flow_dv_convert_action_modify_tcp_ack
964 (struct mlx5_flow_dv_modify_hdr_resource *resource,
965 const struct rte_flow_action *action,
966 struct rte_flow_error *error)
967{
968 const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
969 uint64_t value = rte_be_to_cpu_32(*conf);
970 struct rte_flow_item item;
971 struct rte_flow_item_tcp tcp;
972 struct rte_flow_item_tcp tcp_mask;
973
974 memset(&tcp, 0, sizeof(tcp));
975 memset(&tcp_mask, 0, sizeof(tcp_mask));
976 if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
977
978
979
980
981
982
983 value *= UINT32_MAX;
984 tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
985 tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
986 item.type = RTE_FLOW_ITEM_TYPE_TCP;
987 item.spec = &tcp;
988 item.mask = &tcp_mask;
989 return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
990 MLX5_MODIFICATION_TYPE_ADD, error);
991}
992
993static enum mlx5_modification_field reg_to_field[] = {
994 [REG_NON] = MLX5_MODI_OUT_NONE,
995 [REG_A] = MLX5_MODI_META_DATA_REG_A,
996 [REG_B] = MLX5_MODI_META_DATA_REG_B,
997 [REG_C_0] = MLX5_MODI_META_REG_C_0,
998 [REG_C_1] = MLX5_MODI_META_REG_C_1,
999 [REG_C_2] = MLX5_MODI_META_REG_C_2,
1000 [REG_C_3] = MLX5_MODI_META_REG_C_3,
1001 [REG_C_4] = MLX5_MODI_META_REG_C_4,
1002 [REG_C_5] = MLX5_MODI_META_REG_C_5,
1003 [REG_C_6] = MLX5_MODI_META_REG_C_6,
1004 [REG_C_7] = MLX5_MODI_META_REG_C_7,
1005};
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static int
1021flow_dv_convert_action_set_reg
1022 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1023 const struct rte_flow_action *action,
1024 struct rte_flow_error *error)
1025{
1026 const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1027 struct mlx5_modification_cmd *actions = resource->actions;
1028 uint32_t i = resource->actions_num;
1029
1030 if (i >= MLX5_MAX_MODIFY_NUM)
1031 return rte_flow_error_set(error, EINVAL,
1032 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1033 "too many items to modify");
1034 MLX5_ASSERT(conf->id != REG_NON);
1035 MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1036 actions[i] = (struct mlx5_modification_cmd) {
1037 .action_type = MLX5_MODIFICATION_TYPE_SET,
1038 .field = reg_to_field[conf->id],
1039 .offset = conf->offset,
1040 .length = conf->length,
1041 };
1042 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1043 actions[i].data1 = rte_cpu_to_be_32(conf->data);
1044 ++i;
1045 resource->actions_num = i;
1046 return 0;
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static int
1065flow_dv_convert_action_set_tag
1066 (struct rte_eth_dev *dev,
1067 struct mlx5_flow_dv_modify_hdr_resource *resource,
1068 const struct rte_flow_action_set_tag *conf,
1069 struct rte_flow_error *error)
1070{
1071 rte_be32_t data = rte_cpu_to_be_32(conf->data);
1072 rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1073 struct rte_flow_item item = {
1074 .spec = &data,
1075 .mask = &mask,
1076 };
1077 struct field_modify_info reg_c_x[] = {
1078 [1] = {0, 0, 0},
1079 };
1080 enum mlx5_modification_field reg_type;
1081 int ret;
1082
1083 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1084 if (ret < 0)
1085 return ret;
1086 MLX5_ASSERT(ret != REG_NON);
1087 MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1088 reg_type = reg_to_field[ret];
1089 MLX5_ASSERT(reg_type > 0);
1090 reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1091 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1092 MLX5_MODIFICATION_TYPE_SET, error);
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110static int
1111flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1112 struct mlx5_flow_dv_modify_hdr_resource *res,
1113 const struct rte_flow_action *action,
1114 struct rte_flow_error *error)
1115{
1116 const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1117 rte_be32_t mask = RTE_BE32(UINT32_MAX);
1118 struct rte_flow_item item = {
1119 .spec = NULL,
1120 .mask = &mask,
1121 };
1122 struct field_modify_info reg_src[] = {
1123 {4, 0, reg_to_field[conf->src]},
1124 {0, 0, 0},
1125 };
1126 struct field_modify_info reg_dst = {
1127 .offset = 0,
1128 .id = reg_to_field[conf->dst],
1129 };
1130
1131 if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1132 struct mlx5_priv *priv = dev->data->dev_private;
1133 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1134
1135 MLX5_ASSERT(reg_c0);
1136 MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
1137 MLX5_XMETA_MODE_LEGACY);
1138 if (conf->dst == REG_C_0) {
1139
1140 reg_dst.offset = rte_bsf32(reg_c0);
1141 mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1142 } else {
1143 reg_dst.offset = 0;
1144 mask = rte_cpu_to_be_32(reg_c0);
1145 }
1146 }
1147 return flow_dv_convert_modify_action(&item,
1148 reg_src, ®_dst, res,
1149 MLX5_MODIFICATION_TYPE_COPY,
1150 error);
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170static int
1171flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1172 const struct rte_flow_action_mark *conf,
1173 struct mlx5_flow_dv_modify_hdr_resource *resource,
1174 struct rte_flow_error *error)
1175{
1176 struct mlx5_priv *priv = dev->data->dev_private;
1177 rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1178 priv->sh->dv_mark_mask);
1179 rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1180 struct rte_flow_item item = {
1181 .spec = &data,
1182 .mask = &mask,
1183 };
1184 struct field_modify_info reg_c_x[] = {
1185 [1] = {0, 0, 0},
1186 };
1187 int reg;
1188
1189 if (!mask)
1190 return rte_flow_error_set(error, EINVAL,
1191 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1192 NULL, "zero mark action mask");
1193 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1194 if (reg < 0)
1195 return reg;
1196 MLX5_ASSERT(reg > 0);
1197 if (reg == REG_C_0) {
1198 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1199 uint32_t shl_c0 = rte_bsf32(msk_c0);
1200
1201 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1202 mask = rte_cpu_to_be_32(mask) & msk_c0;
1203 mask = rte_cpu_to_be_32(mask << shl_c0);
1204 }
1205 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1206 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1207 MLX5_MODIFICATION_TYPE_SET, error);
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static enum modify_reg
1225flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1226 const struct rte_flow_attr *attr,
1227 struct rte_flow_error *error)
1228{
1229 int reg =
1230 mlx5_flow_get_reg_id(dev, attr->transfer ?
1231 MLX5_METADATA_FDB :
1232 attr->egress ?
1233 MLX5_METADATA_TX :
1234 MLX5_METADATA_RX, 0, error);
1235 if (reg < 0)
1236 return rte_flow_error_set(error,
1237 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1238 NULL, "unavailable "
1239 "metadata register");
1240 return reg;
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260static int
1261flow_dv_convert_action_set_meta
1262 (struct rte_eth_dev *dev,
1263 struct mlx5_flow_dv_modify_hdr_resource *resource,
1264 const struct rte_flow_attr *attr,
1265 const struct rte_flow_action_set_meta *conf,
1266 struct rte_flow_error *error)
1267{
1268 uint32_t mask = rte_cpu_to_be_32(conf->mask);
1269 uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1270 struct rte_flow_item item = {
1271 .spec = &data,
1272 .mask = &mask,
1273 };
1274 struct field_modify_info reg_c_x[] = {
1275 [1] = {0, 0, 0},
1276 };
1277 int reg = flow_dv_get_metadata_reg(dev, attr, error);
1278
1279 if (reg < 0)
1280 return reg;
1281 MLX5_ASSERT(reg != REG_NON);
1282 if (reg == REG_C_0) {
1283 struct mlx5_priv *priv = dev->data->dev_private;
1284 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1285 uint32_t shl_c0 = rte_bsf32(msk_c0);
1286
1287 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1288 mask = rte_cpu_to_be_32(mask) & msk_c0;
1289 mask = rte_cpu_to_be_32(mask << shl_c0);
1290 }
1291 reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1292
1293 return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1294 MLX5_MODIFICATION_TYPE_SET, error);
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static int
1311flow_dv_convert_action_modify_ipv4_dscp
1312 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1313 const struct rte_flow_action *action,
1314 struct rte_flow_error *error)
1315{
1316 const struct rte_flow_action_set_dscp *conf =
1317 (const struct rte_flow_action_set_dscp *)(action->conf);
1318 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1319 struct rte_flow_item_ipv4 ipv4;
1320 struct rte_flow_item_ipv4 ipv4_mask;
1321
1322 memset(&ipv4, 0, sizeof(ipv4));
1323 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1324 ipv4.hdr.type_of_service = conf->dscp;
1325 ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1326 item.spec = &ipv4;
1327 item.mask = &ipv4_mask;
1328 return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1329 MLX5_MODIFICATION_TYPE_SET, error);
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345static int
1346flow_dv_convert_action_modify_ipv6_dscp
1347 (struct mlx5_flow_dv_modify_hdr_resource *resource,
1348 const struct rte_flow_action *action,
1349 struct rte_flow_error *error)
1350{
1351 const struct rte_flow_action_set_dscp *conf =
1352 (const struct rte_flow_action_set_dscp *)(action->conf);
1353 struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1354 struct rte_flow_item_ipv6 ipv6;
1355 struct rte_flow_item_ipv6 ipv6_mask;
1356
1357 memset(&ipv6, 0, sizeof(ipv6));
1358 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1359
1360
1361
1362
1363
1364
1365 ipv6.hdr.vtc_flow = conf->dscp;
1366 ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1367 item.spec = &ipv6;
1368 item.mask = &ipv6_mask;
1369 return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1370 MLX5_MODIFICATION_TYPE_SET, error);
1371}
1372
1373static int
1374mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1375 enum rte_flow_field_id field, int inherit,
1376 const struct rte_flow_attr *attr,
1377 struct rte_flow_error *error)
1378{
1379 struct mlx5_priv *priv = dev->data->dev_private;
1380
1381 switch (field) {
1382 case RTE_FLOW_FIELD_START:
1383 return 32;
1384 case RTE_FLOW_FIELD_MAC_DST:
1385 case RTE_FLOW_FIELD_MAC_SRC:
1386 return 48;
1387 case RTE_FLOW_FIELD_VLAN_TYPE:
1388 return 16;
1389 case RTE_FLOW_FIELD_VLAN_ID:
1390 return 12;
1391 case RTE_FLOW_FIELD_MAC_TYPE:
1392 return 16;
1393 case RTE_FLOW_FIELD_IPV4_DSCP:
1394 return 6;
1395 case RTE_FLOW_FIELD_IPV4_TTL:
1396 return 8;
1397 case RTE_FLOW_FIELD_IPV4_SRC:
1398 case RTE_FLOW_FIELD_IPV4_DST:
1399 return 32;
1400 case RTE_FLOW_FIELD_IPV6_DSCP:
1401 return 6;
1402 case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1403 return 8;
1404 case RTE_FLOW_FIELD_IPV6_SRC:
1405 case RTE_FLOW_FIELD_IPV6_DST:
1406 return 128;
1407 case RTE_FLOW_FIELD_TCP_PORT_SRC:
1408 case RTE_FLOW_FIELD_TCP_PORT_DST:
1409 return 16;
1410 case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1411 case RTE_FLOW_FIELD_TCP_ACK_NUM:
1412 return 32;
1413 case RTE_FLOW_FIELD_TCP_FLAGS:
1414 return 9;
1415 case RTE_FLOW_FIELD_UDP_PORT_SRC:
1416 case RTE_FLOW_FIELD_UDP_PORT_DST:
1417 return 16;
1418 case RTE_FLOW_FIELD_VXLAN_VNI:
1419 case RTE_FLOW_FIELD_GENEVE_VNI:
1420 return 24;
1421 case RTE_FLOW_FIELD_GTP_TEID:
1422 case RTE_FLOW_FIELD_TAG:
1423 return 32;
1424 case RTE_FLOW_FIELD_MARK:
1425 return __builtin_popcount(priv->sh->dv_mark_mask);
1426 case RTE_FLOW_FIELD_META:
1427 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1428 __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1429 case RTE_FLOW_FIELD_POINTER:
1430 case RTE_FLOW_FIELD_VALUE:
1431 return inherit < 0 ? 0 : inherit;
1432 case RTE_FLOW_FIELD_IPV4_ECN:
1433 case RTE_FLOW_FIELD_IPV6_ECN:
1434 return 2;
1435 default:
1436 MLX5_ASSERT(false);
1437 }
1438 return 0;
1439}
1440
1441static void
1442mlx5_flow_field_id_to_modify_info
1443 (const struct rte_flow_action_modify_data *data,
1444 struct field_modify_info *info, uint32_t *mask,
1445 uint32_t width, struct rte_eth_dev *dev,
1446 const struct rte_flow_attr *attr, struct rte_flow_error *error)
1447{
1448 struct mlx5_priv *priv = dev->data->dev_private;
1449 uint32_t idx = 0;
1450 uint32_t off = 0;
1451
1452 switch (data->field) {
1453 case RTE_FLOW_FIELD_START:
1454
1455 MLX5_ASSERT(false);
1456 break;
1457 case RTE_FLOW_FIELD_MAC_DST:
1458 off = data->offset > 16 ? data->offset - 16 : 0;
1459 if (mask) {
1460 if (data->offset < 16) {
1461 info[idx] = (struct field_modify_info){2, 4,
1462 MLX5_MODI_OUT_DMAC_15_0};
1463 if (width < 16) {
1464 mask[1] = rte_cpu_to_be_16(0xffff >>
1465 (16 - width));
1466 width = 0;
1467 } else {
1468 mask[1] = RTE_BE16(0xffff);
1469 width -= 16;
1470 }
1471 if (!width)
1472 break;
1473 ++idx;
1474 }
1475 info[idx] = (struct field_modify_info){4, 0,
1476 MLX5_MODI_OUT_DMAC_47_16};
1477 mask[0] = rte_cpu_to_be_32((0xffffffff >>
1478 (32 - width)) << off);
1479 } else {
1480 if (data->offset < 16)
1481 info[idx++] = (struct field_modify_info){2, 0,
1482 MLX5_MODI_OUT_DMAC_15_0};
1483 info[idx] = (struct field_modify_info){4, off,
1484 MLX5_MODI_OUT_DMAC_47_16};
1485 }
1486 break;
1487 case RTE_FLOW_FIELD_MAC_SRC:
1488 off = data->offset > 16 ? data->offset - 16 : 0;
1489 if (mask) {
1490 if (data->offset < 16) {
1491 info[idx] = (struct field_modify_info){2, 4,
1492 MLX5_MODI_OUT_SMAC_15_0};
1493 if (width < 16) {
1494 mask[1] = rte_cpu_to_be_16(0xffff >>
1495 (16 - width));
1496 width = 0;
1497 } else {
1498 mask[1] = RTE_BE16(0xffff);
1499 width -= 16;
1500 }
1501 if (!width)
1502 break;
1503 ++idx;
1504 }
1505 info[idx] = (struct field_modify_info){4, 0,
1506 MLX5_MODI_OUT_SMAC_47_16};
1507 mask[0] = rte_cpu_to_be_32((0xffffffff >>
1508 (32 - width)) << off);
1509 } else {
1510 if (data->offset < 16)
1511 info[idx++] = (struct field_modify_info){2, 0,
1512 MLX5_MODI_OUT_SMAC_15_0};
1513 info[idx] = (struct field_modify_info){4, off,
1514 MLX5_MODI_OUT_SMAC_47_16};
1515 }
1516 break;
1517 case RTE_FLOW_FIELD_VLAN_TYPE:
1518
1519 break;
1520 case RTE_FLOW_FIELD_VLAN_ID:
1521 info[idx] = (struct field_modify_info){2, 0,
1522 MLX5_MODI_OUT_FIRST_VID};
1523 if (mask)
1524 mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1525 break;
1526 case RTE_FLOW_FIELD_MAC_TYPE:
1527 info[idx] = (struct field_modify_info){2, 0,
1528 MLX5_MODI_OUT_ETHERTYPE};
1529 if (mask)
1530 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1531 break;
1532 case RTE_FLOW_FIELD_IPV4_DSCP:
1533 info[idx] = (struct field_modify_info){1, 0,
1534 MLX5_MODI_OUT_IP_DSCP};
1535 if (mask)
1536 mask[idx] = 0x3f >> (6 - width);
1537 break;
1538 case RTE_FLOW_FIELD_IPV4_TTL:
1539 info[idx] = (struct field_modify_info){1, 0,
1540 MLX5_MODI_OUT_IPV4_TTL};
1541 if (mask)
1542 mask[idx] = 0xff >> (8 - width);
1543 break;
1544 case RTE_FLOW_FIELD_IPV4_SRC:
1545 info[idx] = (struct field_modify_info){4, 0,
1546 MLX5_MODI_OUT_SIPV4};
1547 if (mask)
1548 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1549 (32 - width));
1550 break;
1551 case RTE_FLOW_FIELD_IPV4_DST:
1552 info[idx] = (struct field_modify_info){4, 0,
1553 MLX5_MODI_OUT_DIPV4};
1554 if (mask)
1555 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1556 (32 - width));
1557 break;
1558 case RTE_FLOW_FIELD_IPV6_DSCP:
1559 info[idx] = (struct field_modify_info){1, 0,
1560 MLX5_MODI_OUT_IP_DSCP};
1561 if (mask)
1562 mask[idx] = 0x3f >> (6 - width);
1563 break;
1564 case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1565 info[idx] = (struct field_modify_info){1, 0,
1566 MLX5_MODI_OUT_IPV6_HOPLIMIT};
1567 if (mask)
1568 mask[idx] = 0xff >> (8 - width);
1569 break;
1570 case RTE_FLOW_FIELD_IPV6_SRC:
1571 if (mask) {
1572 if (data->offset < 32) {
1573 info[idx] = (struct field_modify_info){4, 12,
1574 MLX5_MODI_OUT_SIPV6_31_0};
1575 if (width < 32) {
1576 mask[3] =
1577 rte_cpu_to_be_32(0xffffffff >>
1578 (32 - width));
1579 width = 0;
1580 } else {
1581 mask[3] = RTE_BE32(0xffffffff);
1582 width -= 32;
1583 }
1584 if (!width)
1585 break;
1586 ++idx;
1587 }
1588 if (data->offset < 64) {
1589 info[idx] = (struct field_modify_info){4, 8,
1590 MLX5_MODI_OUT_SIPV6_63_32};
1591 if (width < 32) {
1592 mask[2] =
1593 rte_cpu_to_be_32(0xffffffff >>
1594 (32 - width));
1595 width = 0;
1596 } else {
1597 mask[2] = RTE_BE32(0xffffffff);
1598 width -= 32;
1599 }
1600 if (!width)
1601 break;
1602 ++idx;
1603 }
1604 if (data->offset < 96) {
1605 info[idx] = (struct field_modify_info){4, 4,
1606 MLX5_MODI_OUT_SIPV6_95_64};
1607 if (width < 32) {
1608 mask[1] =
1609 rte_cpu_to_be_32(0xffffffff >>
1610 (32 - width));
1611 width = 0;
1612 } else {
1613 mask[1] = RTE_BE32(0xffffffff);
1614 width -= 32;
1615 }
1616 if (!width)
1617 break;
1618 ++idx;
1619 }
1620 info[idx] = (struct field_modify_info){4, 0,
1621 MLX5_MODI_OUT_SIPV6_127_96};
1622 mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1623 } else {
1624 if (data->offset < 32)
1625 info[idx++] = (struct field_modify_info){4, 0,
1626 MLX5_MODI_OUT_SIPV6_31_0};
1627 if (data->offset < 64)
1628 info[idx++] = (struct field_modify_info){4, 0,
1629 MLX5_MODI_OUT_SIPV6_63_32};
1630 if (data->offset < 96)
1631 info[idx++] = (struct field_modify_info){4, 0,
1632 MLX5_MODI_OUT_SIPV6_95_64};
1633 if (data->offset < 128)
1634 info[idx++] = (struct field_modify_info){4, 0,
1635 MLX5_MODI_OUT_SIPV6_127_96};
1636 }
1637 break;
1638 case RTE_FLOW_FIELD_IPV6_DST:
1639 if (mask) {
1640 if (data->offset < 32) {
1641 info[idx] = (struct field_modify_info){4, 12,
1642 MLX5_MODI_OUT_DIPV6_31_0};
1643 if (width < 32) {
1644 mask[3] =
1645 rte_cpu_to_be_32(0xffffffff >>
1646 (32 - width));
1647 width = 0;
1648 } else {
1649 mask[3] = RTE_BE32(0xffffffff);
1650 width -= 32;
1651 }
1652 if (!width)
1653 break;
1654 ++idx;
1655 }
1656 if (data->offset < 64) {
1657 info[idx] = (struct field_modify_info){4, 8,
1658 MLX5_MODI_OUT_DIPV6_63_32};
1659 if (width < 32) {
1660 mask[2] =
1661 rte_cpu_to_be_32(0xffffffff >>
1662 (32 - width));
1663 width = 0;
1664 } else {
1665 mask[2] = RTE_BE32(0xffffffff);
1666 width -= 32;
1667 }
1668 if (!width)
1669 break;
1670 ++idx;
1671 }
1672 if (data->offset < 96) {
1673 info[idx] = (struct field_modify_info){4, 4,
1674 MLX5_MODI_OUT_DIPV6_95_64};
1675 if (width < 32) {
1676 mask[1] =
1677 rte_cpu_to_be_32(0xffffffff >>
1678 (32 - width));
1679 width = 0;
1680 } else {
1681 mask[1] = RTE_BE32(0xffffffff);
1682 width -= 32;
1683 }
1684 if (!width)
1685 break;
1686 ++idx;
1687 }
1688 info[idx] = (struct field_modify_info){4, 0,
1689 MLX5_MODI_OUT_DIPV6_127_96};
1690 mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1691 } else {
1692 if (data->offset < 32)
1693 info[idx++] = (struct field_modify_info){4, 0,
1694 MLX5_MODI_OUT_DIPV6_31_0};
1695 if (data->offset < 64)
1696 info[idx++] = (struct field_modify_info){4, 0,
1697 MLX5_MODI_OUT_DIPV6_63_32};
1698 if (data->offset < 96)
1699 info[idx++] = (struct field_modify_info){4, 0,
1700 MLX5_MODI_OUT_DIPV6_95_64};
1701 if (data->offset < 128)
1702 info[idx++] = (struct field_modify_info){4, 0,
1703 MLX5_MODI_OUT_DIPV6_127_96};
1704 }
1705 break;
1706 case RTE_FLOW_FIELD_TCP_PORT_SRC:
1707 info[idx] = (struct field_modify_info){2, 0,
1708 MLX5_MODI_OUT_TCP_SPORT};
1709 if (mask)
1710 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1711 break;
1712 case RTE_FLOW_FIELD_TCP_PORT_DST:
1713 info[idx] = (struct field_modify_info){2, 0,
1714 MLX5_MODI_OUT_TCP_DPORT};
1715 if (mask)
1716 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1717 break;
1718 case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1719 info[idx] = (struct field_modify_info){4, 0,
1720 MLX5_MODI_OUT_TCP_SEQ_NUM};
1721 if (mask)
1722 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1723 (32 - width));
1724 break;
1725 case RTE_FLOW_FIELD_TCP_ACK_NUM:
1726 info[idx] = (struct field_modify_info){4, 0,
1727 MLX5_MODI_OUT_TCP_ACK_NUM};
1728 if (mask)
1729 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1730 (32 - width));
1731 break;
1732 case RTE_FLOW_FIELD_TCP_FLAGS:
1733 info[idx] = (struct field_modify_info){2, 0,
1734 MLX5_MODI_OUT_TCP_FLAGS};
1735 if (mask)
1736 mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1737 break;
1738 case RTE_FLOW_FIELD_UDP_PORT_SRC:
1739 info[idx] = (struct field_modify_info){2, 0,
1740 MLX5_MODI_OUT_UDP_SPORT};
1741 if (mask)
1742 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1743 break;
1744 case RTE_FLOW_FIELD_UDP_PORT_DST:
1745 info[idx] = (struct field_modify_info){2, 0,
1746 MLX5_MODI_OUT_UDP_DPORT};
1747 if (mask)
1748 mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1749 break;
1750 case RTE_FLOW_FIELD_VXLAN_VNI:
1751
1752 break;
1753 case RTE_FLOW_FIELD_GENEVE_VNI:
1754
1755 break;
1756 case RTE_FLOW_FIELD_GTP_TEID:
1757 info[idx] = (struct field_modify_info){4, 0,
1758 MLX5_MODI_GTP_TEID};
1759 if (mask)
1760 mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1761 (32 - width));
1762 break;
1763 case RTE_FLOW_FIELD_TAG:
1764 {
1765 int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1766 data->level, error);
1767 if (reg < 0)
1768 return;
1769 MLX5_ASSERT(reg != REG_NON);
1770 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1771 info[idx] = (struct field_modify_info){4, 0,
1772 reg_to_field[reg]};
1773 if (mask)
1774 mask[idx] =
1775 rte_cpu_to_be_32(0xffffffff >>
1776 (32 - width));
1777 }
1778 break;
1779 case RTE_FLOW_FIELD_MARK:
1780 {
1781 uint32_t mark_mask = priv->sh->dv_mark_mask;
1782 uint32_t mark_count = __builtin_popcount(mark_mask);
1783 int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1784 0, error);
1785 if (reg < 0)
1786 return;
1787 MLX5_ASSERT(reg != REG_NON);
1788 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1789 info[idx] = (struct field_modify_info){4, 0,
1790 reg_to_field[reg]};
1791 if (mask)
1792 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1793 (mark_count - width)) & mark_mask);
1794 }
1795 break;
1796 case RTE_FLOW_FIELD_META:
1797 {
1798 uint32_t meta_mask = priv->sh->dv_meta_mask;
1799 uint32_t meta_count = __builtin_popcount(meta_mask);
1800 int reg = flow_dv_get_metadata_reg(dev, attr, error);
1801 if (reg < 0)
1802 return;
1803 MLX5_ASSERT(reg != REG_NON);
1804 MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1805 info[idx] = (struct field_modify_info){4, 0,
1806 reg_to_field[reg]};
1807 if (mask)
1808 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1809 (meta_count - width)) & meta_mask);
1810 }
1811 break;
1812 case RTE_FLOW_FIELD_IPV4_ECN:
1813 case RTE_FLOW_FIELD_IPV6_ECN:
1814 info[idx] = (struct field_modify_info){1, 0,
1815 MLX5_MODI_OUT_IP_ECN};
1816 if (mask)
1817 mask[idx] = 0x3 >> (2 - width);
1818 break;
1819 case RTE_FLOW_FIELD_POINTER:
1820 case RTE_FLOW_FIELD_VALUE:
1821 default:
1822 MLX5_ASSERT(false);
1823 break;
1824 }
1825}
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static int
1845flow_dv_convert_action_modify_field
1846 (struct rte_eth_dev *dev,
1847 struct mlx5_flow_dv_modify_hdr_resource *resource,
1848 const struct rte_flow_action *action,
1849 const struct rte_flow_attr *attr,
1850 struct rte_flow_error *error)
1851{
1852 const struct rte_flow_action_modify_field *conf =
1853 (const struct rte_flow_action_modify_field *)(action->conf);
1854 struct rte_flow_item item = {
1855 .spec = NULL,
1856 .mask = NULL
1857 };
1858 struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1859 {0, 0, 0} };
1860 struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1861 {0, 0, 0} };
1862 uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1863 uint32_t type, meta = 0;
1864
1865 if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1866 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1867 type = MLX5_MODIFICATION_TYPE_SET;
1868
1869 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1870 conf->width, dev,
1871 attr, error);
1872 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1873 (void *)(uintptr_t)conf->src.pvalue :
1874 (void *)(uintptr_t)&conf->src.value;
1875 if (conf->dst.field == RTE_FLOW_FIELD_META) {
1876 meta = *(const unaligned_uint32_t *)item.spec;
1877 meta = rte_cpu_to_be_32(meta);
1878 item.spec = &meta;
1879 }
1880 } else {
1881 type = MLX5_MODIFICATION_TYPE_COPY;
1882
1883 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1884 conf->width, dev,
1885 attr, error);
1886
1887 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1888 conf->width, dev,
1889 attr, error);
1890 }
1891 item.mask = &mask;
1892 return flow_dv_convert_modify_action(&item,
1893 field, dcopy, resource, type, error);
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911static int
1912flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1913 const struct rte_flow_item *item,
1914 const struct rte_flow_attr *attr __rte_unused,
1915 struct rte_flow_error *error)
1916{
1917 struct mlx5_priv *priv = dev->data->dev_private;
1918 struct mlx5_sh_config *config = &priv->sh->config;
1919 const struct rte_flow_item_mark *spec = item->spec;
1920 const struct rte_flow_item_mark *mask = item->mask;
1921 const struct rte_flow_item_mark nic_mask = {
1922 .id = priv->sh->dv_mark_mask,
1923 };
1924 int ret;
1925
1926 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1927 return rte_flow_error_set(error, ENOTSUP,
1928 RTE_FLOW_ERROR_TYPE_ITEM, item,
1929 "extended metadata feature"
1930 " isn't enabled");
1931 if (!mlx5_flow_ext_mreg_supported(dev))
1932 return rte_flow_error_set(error, ENOTSUP,
1933 RTE_FLOW_ERROR_TYPE_ITEM, item,
1934 "extended metadata register"
1935 " isn't supported");
1936 if (!nic_mask.id)
1937 return rte_flow_error_set(error, ENOTSUP,
1938 RTE_FLOW_ERROR_TYPE_ITEM, item,
1939 "extended metadata register"
1940 " isn't available");
1941 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1942 if (ret < 0)
1943 return ret;
1944 if (!spec)
1945 return rte_flow_error_set(error, EINVAL,
1946 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1947 item->spec,
1948 "data cannot be empty");
1949 if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1950 return rte_flow_error_set(error, EINVAL,
1951 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1952 &spec->id,
1953 "mark id exceeds the limit");
1954 if (!mask)
1955 mask = &nic_mask;
1956 if (!mask->id)
1957 return rte_flow_error_set(error, EINVAL,
1958 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1959 "mask cannot be zero");
1960
1961 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1962 (const uint8_t *)&nic_mask,
1963 sizeof(struct rte_flow_item_mark),
1964 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1965 if (ret < 0)
1966 return ret;
1967 return 0;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985static int
1986flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1987 const struct rte_flow_item *item,
1988 const struct rte_flow_attr *attr,
1989 struct rte_flow_error *error)
1990{
1991 struct mlx5_priv *priv = dev->data->dev_private;
1992 struct mlx5_sh_config *config = &priv->sh->config;
1993 const struct rte_flow_item_meta *spec = item->spec;
1994 const struct rte_flow_item_meta *mask = item->mask;
1995 struct rte_flow_item_meta nic_mask = {
1996 .data = UINT32_MAX
1997 };
1998 int reg;
1999 int ret;
2000
2001 if (!spec)
2002 return rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2004 item->spec,
2005 "data cannot be empty");
2006 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2007 if (!mlx5_flow_ext_mreg_supported(dev))
2008 return rte_flow_error_set(error, ENOTSUP,
2009 RTE_FLOW_ERROR_TYPE_ITEM, item,
2010 "extended metadata register"
2011 " isn't supported");
2012 reg = flow_dv_get_metadata_reg(dev, attr, error);
2013 if (reg < 0)
2014 return reg;
2015 if (reg == REG_NON)
2016 return rte_flow_error_set(error, ENOTSUP,
2017 RTE_FLOW_ERROR_TYPE_ITEM, item,
2018 "unavailable extended metadata register");
2019 if (reg == REG_B)
2020 return rte_flow_error_set(error, ENOTSUP,
2021 RTE_FLOW_ERROR_TYPE_ITEM, item,
2022 "match on reg_b "
2023 "isn't supported");
2024 if (reg != REG_A)
2025 nic_mask.data = priv->sh->dv_meta_mask;
2026 } else {
2027 if (attr->transfer)
2028 return rte_flow_error_set(error, ENOTSUP,
2029 RTE_FLOW_ERROR_TYPE_ITEM, item,
2030 "extended metadata feature "
2031 "should be enabled when "
2032 "meta item is requested "
2033 "with e-switch mode ");
2034 if (attr->ingress)
2035 return rte_flow_error_set(error, ENOTSUP,
2036 RTE_FLOW_ERROR_TYPE_ITEM, item,
2037 "match on metadata for ingress "
2038 "is not supported in legacy "
2039 "metadata mode");
2040 }
2041 if (!mask)
2042 mask = &rte_flow_item_meta_mask;
2043 if (!mask->data)
2044 return rte_flow_error_set(error, EINVAL,
2045 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2046 "mask cannot be zero");
2047
2048 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2049 (const uint8_t *)&nic_mask,
2050 sizeof(struct rte_flow_item_meta),
2051 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2052 return ret;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070static int
2071flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2072 const struct rte_flow_item *item,
2073 const struct rte_flow_attr *attr __rte_unused,
2074 struct rte_flow_error *error)
2075{
2076 const struct rte_flow_item_tag *spec = item->spec;
2077 const struct rte_flow_item_tag *mask = item->mask;
2078 const struct rte_flow_item_tag nic_mask = {
2079 .data = RTE_BE32(UINT32_MAX),
2080 .index = 0xff,
2081 };
2082 int ret;
2083
2084 if (!mlx5_flow_ext_mreg_supported(dev))
2085 return rte_flow_error_set(error, ENOTSUP,
2086 RTE_FLOW_ERROR_TYPE_ITEM, item,
2087 "extensive metadata register"
2088 " isn't supported");
2089 if (!spec)
2090 return rte_flow_error_set(error, EINVAL,
2091 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2092 item->spec,
2093 "data cannot be empty");
2094 if (!mask)
2095 mask = &rte_flow_item_tag_mask;
2096 if (!mask->data)
2097 return rte_flow_error_set(error, EINVAL,
2098 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2099 "mask cannot be zero");
2100
2101 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2102 (const uint8_t *)&nic_mask,
2103 sizeof(struct rte_flow_item_tag),
2104 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2105 if (ret < 0)
2106 return ret;
2107 if (mask->index != 0xff)
2108 return rte_flow_error_set(error, EINVAL,
2109 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2110 "partial mask for tag index"
2111 " is not supported");
2112 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2113 if (ret < 0)
2114 return ret;
2115 MLX5_ASSERT(ret != REG_NON);
2116 return 0;
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136static int
2137flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2138 const struct rte_flow_item *item,
2139 const struct rte_flow_attr *attr,
2140 uint64_t item_flags,
2141 struct rte_flow_error *error)
2142{
2143 const struct rte_flow_item_port_id *spec = item->spec;
2144 const struct rte_flow_item_port_id *mask = item->mask;
2145 const struct rte_flow_item_port_id switch_mask = {
2146 .id = 0xffffffff,
2147 };
2148 struct mlx5_priv *esw_priv;
2149 struct mlx5_priv *dev_priv;
2150 int ret;
2151
2152 if (!attr->transfer)
2153 return rte_flow_error_set(error, EINVAL,
2154 RTE_FLOW_ERROR_TYPE_ITEM,
2155 NULL,
2156 "match on port id is valid only"
2157 " when transfer flag is enabled");
2158 if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2159 return rte_flow_error_set(error, ENOTSUP,
2160 RTE_FLOW_ERROR_TYPE_ITEM, item,
2161 "multiple source ports are not"
2162 " supported");
2163 if (!mask)
2164 mask = &switch_mask;
2165 if (mask->id != 0xffffffff)
2166 return rte_flow_error_set(error, ENOTSUP,
2167 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2168 mask,
2169 "no support for partial mask on"
2170 " \"id\" field");
2171 ret = mlx5_flow_item_acceptable
2172 (item, (const uint8_t *)mask,
2173 (const uint8_t *)&rte_flow_item_port_id_mask,
2174 sizeof(struct rte_flow_item_port_id),
2175 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2176 if (ret)
2177 return ret;
2178 if (!spec)
2179 return 0;
2180 if (spec->id == MLX5_PORT_ESW_MGR)
2181 return 0;
2182 esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2183 if (!esw_priv)
2184 return rte_flow_error_set(error, rte_errno,
2185 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2186 "failed to obtain E-Switch info for"
2187 " port");
2188 dev_priv = mlx5_dev_to_eswitch_info(dev);
2189 if (!dev_priv)
2190 return rte_flow_error_set(error, rte_errno,
2191 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2192 NULL,
2193 "failed to obtain E-Switch info");
2194 if (esw_priv->domain_id != dev_priv->domain_id)
2195 return rte_flow_error_set(error, EINVAL,
2196 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2197 "cannot match on a port from a"
2198 " different E-Switch");
2199 return 0;
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219static int
2220flow_dv_validate_item_represented_port(struct rte_eth_dev *dev,
2221 const struct rte_flow_item *item,
2222 const struct rte_flow_attr *attr,
2223 uint64_t item_flags,
2224 struct rte_flow_error *error)
2225{
2226 const struct rte_flow_item_ethdev *spec = item->spec;
2227 const struct rte_flow_item_ethdev *mask = item->mask;
2228 const struct rte_flow_item_ethdev switch_mask = {
2229 .port_id = UINT16_MAX,
2230 };
2231 struct mlx5_priv *esw_priv;
2232 struct mlx5_priv *dev_priv;
2233 int ret;
2234
2235 if (!attr->transfer)
2236 return rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2238 "match on port id is valid only when transfer flag is enabled");
2239 if (item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT)
2240 return rte_flow_error_set(error, ENOTSUP,
2241 RTE_FLOW_ERROR_TYPE_ITEM, item,
2242 "multiple source ports are not supported");
2243 if (!mask)
2244 mask = &switch_mask;
2245 if (mask->port_id != UINT16_MAX)
2246 return rte_flow_error_set(error, ENOTSUP,
2247 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2248 "no support for partial mask on \"id\" field");
2249 ret = mlx5_flow_item_acceptable
2250 (item, (const uint8_t *)mask,
2251 (const uint8_t *)&rte_flow_item_ethdev_mask,
2252 sizeof(struct rte_flow_item_ethdev),
2253 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2254 if (ret)
2255 return ret;
2256 if (!spec || spec->port_id == UINT16_MAX)
2257 return 0;
2258 esw_priv = mlx5_port_to_eswitch_info(spec->port_id, false);
2259 if (!esw_priv)
2260 return rte_flow_error_set(error, rte_errno,
2261 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2262 "failed to obtain E-Switch info for port");
2263 dev_priv = mlx5_dev_to_eswitch_info(dev);
2264 if (!dev_priv)
2265 return rte_flow_error_set(error, rte_errno,
2266 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2267 NULL,
2268 "failed to obtain E-Switch info");
2269 if (esw_priv->domain_id != dev_priv->domain_id)
2270 return rte_flow_error_set(error, EINVAL,
2271 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2272 "cannot match on a port from a different E-Switch");
2273 return 0;
2274}
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291static int
2292flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2293 uint64_t item_flags,
2294 struct rte_eth_dev *dev,
2295 struct rte_flow_error *error)
2296{
2297 const struct rte_flow_item_vlan *mask = item->mask;
2298 const struct rte_flow_item_vlan nic_mask = {
2299 .tci = RTE_BE16(UINT16_MAX),
2300 .inner_type = RTE_BE16(UINT16_MAX),
2301 .has_more_vlan = 1,
2302 };
2303 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2304 int ret;
2305 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2306 MLX5_FLOW_LAYER_INNER_L4) :
2307 (MLX5_FLOW_LAYER_OUTER_L3 |
2308 MLX5_FLOW_LAYER_OUTER_L4);
2309 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2310 MLX5_FLOW_LAYER_OUTER_VLAN;
2311
2312 if (item_flags & vlanm)
2313 return rte_flow_error_set(error, EINVAL,
2314 RTE_FLOW_ERROR_TYPE_ITEM, item,
2315 "multiple VLAN layers not supported");
2316 else if ((item_flags & l34m) != 0)
2317 return rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_ITEM, item,
2319 "VLAN cannot follow L3/L4 layer");
2320 if (!mask)
2321 mask = &rte_flow_item_vlan_mask;
2322 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2323 (const uint8_t *)&nic_mask,
2324 sizeof(struct rte_flow_item_vlan),
2325 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2326 if (ret)
2327 return ret;
2328 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2329 struct mlx5_priv *priv = dev->data->dev_private;
2330
2331 if (priv->vmwa_context) {
2332
2333
2334
2335
2336
2337
2338
2339 return rte_flow_error_set(error, EINVAL,
2340 RTE_FLOW_ERROR_TYPE_ITEM,
2341 item,
2342 "VLAN tag mask is not"
2343 " supported in virtual"
2344 " environment");
2345 }
2346 }
2347 return 0;
2348}
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360#define MLX5_GTP_FLAGS_MASK 0x07
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377static int
2378flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2379 const struct rte_flow_item *item,
2380 uint64_t item_flags,
2381 struct rte_flow_error *error)
2382{
2383 struct mlx5_priv *priv = dev->data->dev_private;
2384 const struct rte_flow_item_gtp *spec = item->spec;
2385 const struct rte_flow_item_gtp *mask = item->mask;
2386 const struct rte_flow_item_gtp nic_mask = {
2387 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2388 .msg_type = 0xff,
2389 .teid = RTE_BE32(0xffffffff),
2390 };
2391
2392 if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
2393 return rte_flow_error_set(error, ENOTSUP,
2394 RTE_FLOW_ERROR_TYPE_ITEM, item,
2395 "GTP support is not enabled");
2396 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2397 return rte_flow_error_set(error, ENOTSUP,
2398 RTE_FLOW_ERROR_TYPE_ITEM, item,
2399 "multiple tunnel layers not"
2400 " supported");
2401 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2402 return rte_flow_error_set(error, EINVAL,
2403 RTE_FLOW_ERROR_TYPE_ITEM, item,
2404 "no outer UDP layer found");
2405 if (!mask)
2406 mask = &rte_flow_item_gtp_mask;
2407 if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2408 return rte_flow_error_set(error, ENOTSUP,
2409 RTE_FLOW_ERROR_TYPE_ITEM, item,
2410 "Match is supported for GTP"
2411 " flags only");
2412 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2413 (const uint8_t *)&nic_mask,
2414 sizeof(struct rte_flow_item_gtp),
2415 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435static int
2436flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2437 uint64_t last_item,
2438 const struct rte_flow_item *gtp_item,
2439 const struct rte_flow_attr *attr,
2440 struct rte_flow_error *error)
2441{
2442 const struct rte_flow_item_gtp *gtp_spec;
2443 const struct rte_flow_item_gtp *gtp_mask;
2444 const struct rte_flow_item_gtp_psc *mask;
2445 const struct rte_flow_item_gtp_psc nic_mask = {
2446 .hdr.type = 0xF,
2447 .hdr.qfi = 0x3F,
2448 };
2449
2450 if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2451 return rte_flow_error_set
2452 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2453 "GTP PSC item must be preceded with GTP item");
2454 gtp_spec = gtp_item->spec;
2455 gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2456
2457 if (gtp_spec &&
2458 (gtp_mask->v_pt_rsv_flags &
2459 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2460 return rte_flow_error_set
2461 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2462 "GTP E flag must be 1 to match GTP PSC");
2463
2464 if (!attr->transfer && !attr->group)
2465 return rte_flow_error_set
2466 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2467 "GTP PSC is not supported for group 0");
2468
2469 if (!item->spec)
2470 return 0;
2471 mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2472 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2473 (const uint8_t *)&nic_mask,
2474 sizeof(struct rte_flow_item_gtp_psc),
2475 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493static int
2494flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2495 const struct rte_flow_item *item,
2496 uint64_t item_flags, uint64_t last_item,
2497 uint16_t ether_type, struct rte_flow_error *error)
2498{
2499 int ret;
2500 struct mlx5_priv *priv = dev->data->dev_private;
2501 struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
2502 const struct rte_flow_item_ipv4 *spec = item->spec;
2503 const struct rte_flow_item_ipv4 *last = item->last;
2504 const struct rte_flow_item_ipv4 *mask = item->mask;
2505 rte_be16_t fragment_offset_spec = 0;
2506 rte_be16_t fragment_offset_last = 0;
2507 struct rte_flow_item_ipv4 nic_ipv4_mask = {
2508 .hdr = {
2509 .src_addr = RTE_BE32(0xffffffff),
2510 .dst_addr = RTE_BE32(0xffffffff),
2511 .type_of_service = 0xff,
2512 .fragment_offset = RTE_BE16(0xffff),
2513 .next_proto_id = 0xff,
2514 .time_to_live = 0xff,
2515 },
2516 };
2517
2518 if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2519 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2520 bool ihl_cap = !tunnel ?
2521 attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
2522 if (!ihl_cap)
2523 return rte_flow_error_set(error, ENOTSUP,
2524 RTE_FLOW_ERROR_TYPE_ITEM,
2525 item,
2526 "IPV4 ihl offload not supported");
2527 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2528 }
2529 ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2530 ether_type, &nic_ipv4_mask,
2531 MLX5_ITEM_RANGE_ACCEPTED, error);
2532 if (ret < 0)
2533 return ret;
2534 if (spec && mask)
2535 fragment_offset_spec = spec->hdr.fragment_offset &
2536 mask->hdr.fragment_offset;
2537 if (!fragment_offset_spec)
2538 return 0;
2539
2540
2541
2542
2543 if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2544 != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2545 return rte_flow_error_set(error, EINVAL,
2546 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2547 item, "must use full mask for"
2548 " fragment_offset");
2549
2550
2551
2552
2553
2554 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2555 return rte_flow_error_set(error, ENOTSUP,
2556 RTE_FLOW_ERROR_TYPE_ITEM, item,
2557 "match on first fragment not "
2558 "supported");
2559 if (fragment_offset_spec && !last)
2560 return rte_flow_error_set(error, ENOTSUP,
2561 RTE_FLOW_ERROR_TYPE_ITEM, item,
2562 "specified value not supported");
2563
2564 fragment_offset_last = last->hdr.fragment_offset &
2565 mask->hdr.fragment_offset;
2566
2567
2568
2569
2570
2571
2572
2573 if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2574 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2575 return rte_flow_error_set(error, ENOTSUP,
2576 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2577 last, "match on following "
2578 "fragments not supported");
2579
2580
2581
2582
2583
2584
2585
2586 if (fragment_offset_spec == RTE_BE16(1) &&
2587 fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2588 return rte_flow_error_set(error, ENOTSUP,
2589 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2590 last, "match on last "
2591 "fragment not supported");
2592
2593
2594
2595
2596
2597
2598 if (!(fragment_offset_spec == RTE_BE16(1) &&
2599 fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2600 return rte_flow_error_set(error, ENOTSUP,
2601 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2602 "specified range not supported");
2603 return 0;
2604}
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619static int
2620flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2621 uint64_t item_flags,
2622 struct rte_flow_error *error)
2623{
2624 const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2625 const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2626 const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2627 rte_be16_t frag_data_spec = 0;
2628 rte_be16_t frag_data_last = 0;
2629 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2630 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2631 MLX5_FLOW_LAYER_OUTER_L4;
2632 int ret = 0;
2633 struct rte_flow_item_ipv6_frag_ext nic_mask = {
2634 .hdr = {
2635 .next_header = 0xff,
2636 .frag_data = RTE_BE16(0xffff),
2637 },
2638 };
2639
2640 if (item_flags & l4m)
2641 return rte_flow_error_set(error, EINVAL,
2642 RTE_FLOW_ERROR_TYPE_ITEM, item,
2643 "ipv6 fragment extension item cannot "
2644 "follow L4 item.");
2645 if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2646 (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2647 return rte_flow_error_set(error, EINVAL,
2648 RTE_FLOW_ERROR_TYPE_ITEM, item,
2649 "ipv6 fragment extension item must "
2650 "follow ipv6 item");
2651 if (spec && mask)
2652 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2653 if (!frag_data_spec)
2654 return 0;
2655
2656
2657
2658
2659 if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2660 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2661 return rte_flow_error_set(error, EINVAL,
2662 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2663 item, "must use full mask for"
2664 " frag_data");
2665
2666
2667
2668
2669 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2670 return rte_flow_error_set(error, ENOTSUP,
2671 RTE_FLOW_ERROR_TYPE_ITEM, item,
2672 "match on first fragment not "
2673 "supported");
2674 if (frag_data_spec && !last)
2675 return rte_flow_error_set(error, EINVAL,
2676 RTE_FLOW_ERROR_TYPE_ITEM, item,
2677 "specified value not supported");
2678 ret = mlx5_flow_item_acceptable
2679 (item, (const uint8_t *)mask,
2680 (const uint8_t *)&nic_mask,
2681 sizeof(struct rte_flow_item_ipv6_frag_ext),
2682 MLX5_ITEM_RANGE_ACCEPTED, error);
2683 if (ret)
2684 return ret;
2685
2686 frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2687
2688
2689
2690
2691
2692
2693
2694 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2695 RTE_IPV6_EHDR_MF_MASK) &&
2696 frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2697 return rte_flow_error_set(error, ENOTSUP,
2698 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2699 last, "match on following "
2700 "fragments not supported");
2701
2702
2703
2704
2705
2706
2707
2708 if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2709 frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2710 return rte_flow_error_set(error, ENOTSUP,
2711 RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2712 last, "match on last "
2713 "fragment not supported");
2714
2715 return rte_flow_error_set(error, EINVAL,
2716 RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2717 "specified range not supported");
2718}
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735static int
2736flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2737 const struct rte_flow_item *item,
2738 uint64_t *item_flags,
2739 struct rte_flow_error *error)
2740{
2741 const struct rte_flow_item_conntrack *spec = item->spec;
2742 const struct rte_flow_item_conntrack *mask = item->mask;
2743 RTE_SET_USED(dev);
2744 uint32_t flags;
2745
2746 if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2747 return rte_flow_error_set(error, EINVAL,
2748 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2749 "Only one CT is supported");
2750 if (!mask)
2751 mask = &rte_flow_item_conntrack_mask;
2752 flags = spec->flags & mask->flags;
2753 if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2754 ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2755 (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2756 (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2757 return rte_flow_error_set(error, EINVAL,
2758 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2759 "Conflict status bits");
2760
2761 *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2762 return 0;
2763}
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784static int
2785flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2786 uint64_t action_flags,
2787 const struct rte_flow_action *action,
2788 uint64_t item_flags,
2789 const struct rte_flow_attr *attr,
2790 struct rte_flow_error *error)
2791{
2792 const struct mlx5_priv *priv = dev->data->dev_private;
2793 struct mlx5_dev_ctx_shared *sh = priv->sh;
2794 bool direction_error = false;
2795
2796 if (!priv->sh->pop_vlan_action)
2797 return rte_flow_error_set(error, ENOTSUP,
2798 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2799 NULL,
2800 "pop vlan action is not supported");
2801
2802 if (attr->transfer) {
2803 bool fdb_tx = priv->representor_id != UINT16_MAX;
2804 bool is_cx5 = sh->steering_format_version ==
2805 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2806
2807 if (fdb_tx && is_cx5)
2808 direction_error = true;
2809 } else if (attr->egress) {
2810 direction_error = true;
2811 }
2812 if (direction_error)
2813 return rte_flow_error_set(error, ENOTSUP,
2814 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2815 NULL,
2816 "pop vlan action not supported for egress");
2817 if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2818 return rte_flow_error_set(error, ENOTSUP,
2819 RTE_FLOW_ERROR_TYPE_ACTION, action,
2820 "no support for multiple VLAN "
2821 "actions");
2822
2823 if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2824 !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2825 return rte_flow_error_set(error, ENOTSUP,
2826 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2827 NULL,
2828 "cannot pop vlan after decap without "
2829 "match on inner vlan in the flow");
2830
2831 if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2832 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2833 return rte_flow_error_set(error, ENOTSUP,
2834 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2835 NULL,
2836 "cannot pop vlan without a "
2837 "match on (outer) vlan in the flow");
2838 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2839 return rte_flow_error_set(error, EINVAL,
2840 RTE_FLOW_ERROR_TYPE_ACTION, action,
2841 "wrong action order, port_id should "
2842 "be after pop VLAN action");
2843 if (!attr->transfer && priv->representor)
2844 return rte_flow_error_set(error, ENOTSUP,
2845 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2846 "pop vlan action for VF representor "
2847 "not supported on NIC table");
2848 return 0;
2849}
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862static void
2863flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2864 struct rte_vlan_hdr *vlan)
2865{
2866 const struct rte_flow_item_vlan nic_mask = {
2867 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2868 MLX5DV_FLOW_VLAN_VID_MASK),
2869 .inner_type = RTE_BE16(0xffff),
2870 };
2871
2872 if (items == NULL)
2873 return;
2874 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2875 int type = items->type;
2876
2877 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2878 type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2879 break;
2880 }
2881 if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2882 const struct rte_flow_item_vlan *vlan_m = items->mask;
2883 const struct rte_flow_item_vlan *vlan_v = items->spec;
2884
2885
2886 if (!vlan_v)
2887 return;
2888 if (!vlan_m)
2889 vlan_m = &nic_mask;
2890
2891 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2892 MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2893 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2894 vlan->vlan_tci |=
2895 rte_be_to_cpu_16(vlan_v->tci &
2896 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2897 }
2898 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2899 MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2900 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2901 vlan->vlan_tci |=
2902 rte_be_to_cpu_16(vlan_v->tci &
2903 MLX5DV_FLOW_VLAN_VID_MASK_BE);
2904 }
2905 if (vlan_m->inner_type == nic_mask.inner_type)
2906 vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2907 vlan_m->inner_type);
2908 }
2909}
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930static int
2931flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2932 uint64_t action_flags,
2933 const struct rte_flow_item_vlan *vlan_m,
2934 const struct rte_flow_action *action,
2935 const struct rte_flow_attr *attr,
2936 struct rte_flow_error *error)
2937{
2938 const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2939 const struct mlx5_priv *priv = dev->data->dev_private;
2940
2941 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2942 push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2943 return rte_flow_error_set(error, EINVAL,
2944 RTE_FLOW_ERROR_TYPE_ACTION, action,
2945 "invalid vlan ethertype");
2946 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2947 return rte_flow_error_set(error, EINVAL,
2948 RTE_FLOW_ERROR_TYPE_ACTION, action,
2949 "wrong action order, port_id should "
2950 "be after push VLAN");
2951 if (!attr->transfer && priv->representor)
2952 return rte_flow_error_set(error, ENOTSUP,
2953 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2954 "push vlan action for VF representor "
2955 "not supported on NIC table");
2956 if (vlan_m &&
2957 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2958 (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2959 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2960 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2961 !(mlx5_flow_find_action
2962 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2963 return rte_flow_error_set(error, EINVAL,
2964 RTE_FLOW_ERROR_TYPE_ACTION, action,
2965 "not full match mask on VLAN PCP and "
2966 "there is no of_set_vlan_pcp action, "
2967 "push VLAN action cannot figure out "
2968 "PCP value");
2969 if (vlan_m &&
2970 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2971 (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2972 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2973 !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2974 !(mlx5_flow_find_action
2975 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2976 return rte_flow_error_set(error, EINVAL,
2977 RTE_FLOW_ERROR_TYPE_ACTION, action,
2978 "not full match mask on VLAN VID and "
2979 "there is no of_set_vlan_vid action, "
2980 "push VLAN action cannot figure out "
2981 "VID value");
2982 (void)attr;
2983 return 0;
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999static int
3000flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
3001 const struct rte_flow_action actions[],
3002 struct rte_flow_error *error)
3003{
3004 const struct rte_flow_action *action = actions;
3005 const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
3006
3007 if (conf->vlan_pcp > 7)
3008 return rte_flow_error_set(error, EINVAL,
3009 RTE_FLOW_ERROR_TYPE_ACTION, action,
3010 "VLAN PCP value is too big");
3011 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
3012 return rte_flow_error_set(error, ENOTSUP,
3013 RTE_FLOW_ERROR_TYPE_ACTION, action,
3014 "set VLAN PCP action must follow "
3015 "the push VLAN action");
3016 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
3017 return rte_flow_error_set(error, ENOTSUP,
3018 RTE_FLOW_ERROR_TYPE_ACTION, action,
3019 "Multiple VLAN PCP modification are "
3020 "not supported");
3021 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3022 return rte_flow_error_set(error, EINVAL,
3023 RTE_FLOW_ERROR_TYPE_ACTION, action,
3024 "wrong action order, port_id should "
3025 "be after set VLAN PCP");
3026 return 0;
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044static int
3045flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
3046 uint64_t action_flags,
3047 const struct rte_flow_action actions[],
3048 struct rte_flow_error *error)
3049{
3050 const struct rte_flow_action *action = actions;
3051 const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3052
3053 if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3054 return rte_flow_error_set(error, EINVAL,
3055 RTE_FLOW_ERROR_TYPE_ACTION, action,
3056 "VLAN VID value is too big");
3057 if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3058 !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3059 return rte_flow_error_set(error, ENOTSUP,
3060 RTE_FLOW_ERROR_TYPE_ACTION, action,
3061 "set VLAN VID action must follow push"
3062 " VLAN action or match on VLAN item");
3063 if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3064 return rte_flow_error_set(error, ENOTSUP,
3065 RTE_FLOW_ERROR_TYPE_ACTION, action,
3066 "Multiple VLAN VID modifications are "
3067 "not supported");
3068 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3069 return rte_flow_error_set(error, EINVAL,
3070 RTE_FLOW_ERROR_TYPE_ACTION, action,
3071 "wrong action order, port_id should "
3072 "be after set VLAN VID");
3073 return 0;
3074}
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091static int
3092flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3093 uint64_t action_flags,
3094 const struct rte_flow_attr *attr,
3095 struct rte_flow_error *error)
3096{
3097 struct mlx5_priv *priv = dev->data->dev_private;
3098 struct mlx5_sh_config *config = &priv->sh->config;
3099 int ret;
3100
3101
3102 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3103 return mlx5_flow_validate_action_flag(action_flags, attr,
3104 error);
3105
3106 if (!mlx5_flow_ext_mreg_supported(dev))
3107 return rte_flow_error_set(error, ENOTSUP,
3108 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3109 "no metadata registers "
3110 "to support flag action");
3111 if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3112 return rte_flow_error_set(error, ENOTSUP,
3113 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3114 "extended metadata register"
3115 " isn't available");
3116 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3117 if (ret < 0)
3118 return ret;
3119 MLX5_ASSERT(ret > 0);
3120 if (action_flags & MLX5_FLOW_ACTION_MARK)
3121 return rte_flow_error_set(error, EINVAL,
3122 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123 "can't mark and flag in same flow");
3124 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3125 return rte_flow_error_set(error, EINVAL,
3126 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3127 "can't have 2 flag"
3128 " actions in same flow");
3129 return 0;
3130}
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149static int
3150flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3151 const struct rte_flow_action *action,
3152 uint64_t action_flags,
3153 const struct rte_flow_attr *attr,
3154 struct rte_flow_error *error)
3155{
3156 struct mlx5_priv *priv = dev->data->dev_private;
3157 struct mlx5_sh_config *config = &priv->sh->config;
3158 const struct rte_flow_action_mark *mark = action->conf;
3159 int ret;
3160
3161 if (is_tunnel_offload_active(dev))
3162 return rte_flow_error_set(error, ENOTSUP,
3163 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3164 "no mark action "
3165 "if tunnel offload active");
3166
3167 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3168 return mlx5_flow_validate_action_mark(action, action_flags,
3169 attr, error);
3170
3171 if (!mlx5_flow_ext_mreg_supported(dev))
3172 return rte_flow_error_set(error, ENOTSUP,
3173 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3174 "no metadata registers "
3175 "to support mark action");
3176 if (!priv->sh->dv_mark_mask)
3177 return rte_flow_error_set(error, ENOTSUP,
3178 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3179 "extended metadata register"
3180 " isn't available");
3181 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3182 if (ret < 0)
3183 return ret;
3184 MLX5_ASSERT(ret > 0);
3185 if (!mark)
3186 return rte_flow_error_set(error, EINVAL,
3187 RTE_FLOW_ERROR_TYPE_ACTION, action,
3188 "configuration cannot be null");
3189 if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3190 return rte_flow_error_set(error, EINVAL,
3191 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3192 &mark->id,
3193 "mark id exceeds the limit");
3194 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3195 return rte_flow_error_set(error, EINVAL,
3196 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3197 "can't flag and mark in same flow");
3198 if (action_flags & MLX5_FLOW_ACTION_MARK)
3199 return rte_flow_error_set(error, EINVAL,
3200 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3201 "can't have 2 mark actions in same"
3202 " flow");
3203 return 0;
3204}
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223static int
3224flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3225 const struct rte_flow_action *action,
3226 uint64_t action_flags __rte_unused,
3227 const struct rte_flow_attr *attr,
3228 struct rte_flow_error *error)
3229{
3230 struct mlx5_priv *priv = dev->data->dev_private;
3231 struct mlx5_sh_config *config = &priv->sh->config;
3232 const struct rte_flow_action_set_meta *conf;
3233 uint32_t nic_mask = UINT32_MAX;
3234 int reg;
3235
3236 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3237 !mlx5_flow_ext_mreg_supported(dev))
3238 return rte_flow_error_set(error, ENOTSUP,
3239 RTE_FLOW_ERROR_TYPE_ACTION, action,
3240 "extended metadata register"
3241 " isn't supported");
3242 reg = flow_dv_get_metadata_reg(dev, attr, error);
3243 if (reg < 0)
3244 return reg;
3245 if (reg == REG_NON)
3246 return rte_flow_error_set(error, ENOTSUP,
3247 RTE_FLOW_ERROR_TYPE_ACTION, action,
3248 "unavailable extended metadata register");
3249 if (reg != REG_A && reg != REG_B) {
3250 struct mlx5_priv *priv = dev->data->dev_private;
3251
3252 nic_mask = priv->sh->dv_meta_mask;
3253 }
3254 if (!(action->conf))
3255 return rte_flow_error_set(error, EINVAL,
3256 RTE_FLOW_ERROR_TYPE_ACTION, action,
3257 "configuration cannot be null");
3258 conf = (const struct rte_flow_action_set_meta *)action->conf;
3259 if (!conf->mask)
3260 return rte_flow_error_set(error, EINVAL,
3261 RTE_FLOW_ERROR_TYPE_ACTION, action,
3262 "zero mask doesn't have any effect");
3263 if (conf->mask & ~nic_mask)
3264 return rte_flow_error_set(error, EINVAL,
3265 RTE_FLOW_ERROR_TYPE_ACTION, action,
3266 "meta data must be within reg C0");
3267 return 0;
3268}
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287static int
3288flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3289 const struct rte_flow_action *action,
3290 uint64_t action_flags,
3291 const struct rte_flow_attr *attr,
3292 struct rte_flow_error *error)
3293{
3294 const struct rte_flow_action_set_tag *conf;
3295 const uint64_t terminal_action_flags =
3296 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3297 MLX5_FLOW_ACTION_RSS;
3298 int ret;
3299
3300 if (!mlx5_flow_ext_mreg_supported(dev))
3301 return rte_flow_error_set(error, ENOTSUP,
3302 RTE_FLOW_ERROR_TYPE_ACTION, action,
3303 "extensive metadata register"
3304 " isn't supported");
3305 if (!(action->conf))
3306 return rte_flow_error_set(error, EINVAL,
3307 RTE_FLOW_ERROR_TYPE_ACTION, action,
3308 "configuration cannot be null");
3309 conf = (const struct rte_flow_action_set_tag *)action->conf;
3310 if (!conf->mask)
3311 return rte_flow_error_set(error, EINVAL,
3312 RTE_FLOW_ERROR_TYPE_ACTION, action,
3313 "zero mask doesn't have any effect");
3314 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3315 if (ret < 0)
3316 return ret;
3317 if (!attr->transfer && attr->ingress &&
3318 (action_flags & terminal_action_flags))
3319 return rte_flow_error_set(error, EINVAL,
3320 RTE_FLOW_ERROR_TYPE_ACTION, action,
3321 "set_tag has no effect"
3322 " with terminal actions");
3323 return 0;
3324}
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337static inline bool
3338flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
3339 const struct rte_flow_attr *attr)
3340{
3341 MLX5_ASSERT(sh && attr);
3342 return (sh->flow_hit_aso_en && (attr->transfer || attr->group));
3343}
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362static int
3363flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3364 uint64_t action_flags,
3365 const struct rte_flow_attr *attr,
3366 struct rte_flow_error *error)
3367{
3368 struct mlx5_priv *priv = dev->data->dev_private;
3369
3370 if (!priv->sh->cdev->config.devx)
3371 goto notsup_err;
3372 if (action_flags & MLX5_FLOW_ACTION_COUNT)
3373 return rte_flow_error_set(error, EINVAL,
3374 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3375 "duplicate count actions set");
3376 if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3377 !flow_hit_aso_supported(priv->sh, attr))
3378 return rte_flow_error_set(error, EINVAL,
3379 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3380 "old age and indirect count combination is not supported");
3381#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3382 return 0;
3383#endif
3384notsup_err:
3385 return rte_flow_error_set
3386 (error, ENOTSUP,
3387 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3388 NULL,
3389 "count action not supported");
3390}
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409static int
3410flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3411 uint64_t action_flags,
3412 const struct rte_flow_action *action,
3413 const struct rte_flow_attr *attr,
3414 struct rte_flow_error *error)
3415{
3416 const struct mlx5_priv *priv = dev->data->dev_private;
3417
3418 if (!(action->conf))
3419 return rte_flow_error_set(error, EINVAL,
3420 RTE_FLOW_ERROR_TYPE_ACTION, action,
3421 "configuration cannot be null");
3422 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3423 return rte_flow_error_set(error, EINVAL,
3424 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3425 "can only have a single encap action "
3426 "in a flow");
3427 if (!attr->transfer && priv->representor)
3428 return rte_flow_error_set(error, ENOTSUP,
3429 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3430 "encap action for VF representor "
3431 "not supported on NIC table");
3432 return 0;
3433}
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454static int
3455flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3456 uint64_t action_flags,
3457 const struct rte_flow_action *action,
3458 const uint64_t item_flags,
3459 const struct rte_flow_attr *attr,
3460 struct rte_flow_error *error)
3461{
3462 const struct mlx5_priv *priv = dev->data->dev_private;
3463
3464 if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
3465 !priv->sh->config.decap_en)
3466 return rte_flow_error_set(error, ENOTSUP,
3467 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3468 "decap is not enabled");
3469 if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3470 return rte_flow_error_set(error, ENOTSUP,
3471 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3472 action_flags &
3473 MLX5_FLOW_ACTION_DECAP ? "can only "
3474 "have a single decap action" : "decap "
3475 "after encap is not supported");
3476 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3477 return rte_flow_error_set(error, EINVAL,
3478 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3479 "can't have decap action after"
3480 " modify action");
3481 if (attr->egress)
3482 return rte_flow_error_set(error, ENOTSUP,
3483 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3484 NULL,
3485 "decap action not supported for "
3486 "egress");
3487 if (!attr->transfer && priv->representor)
3488 return rte_flow_error_set(error, ENOTSUP,
3489 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3490 "decap action for VF representor "
3491 "not supported on NIC table");
3492 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3493 !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3494 return rte_flow_error_set(error, ENOTSUP,
3495 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3496 "VXLAN item should be present for VXLAN decap");
3497 return 0;
3498}
3499
3500const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527static int
3528flow_dv_validate_action_raw_encap_decap
3529 (struct rte_eth_dev *dev,
3530 const struct rte_flow_action_raw_decap *decap,
3531 const struct rte_flow_action_raw_encap *encap,
3532 const struct rte_flow_attr *attr, uint64_t *action_flags,
3533 int *actions_n, const struct rte_flow_action *action,
3534 uint64_t item_flags, struct rte_flow_error *error)
3535{
3536 const struct mlx5_priv *priv = dev->data->dev_private;
3537 int ret;
3538
3539 if (encap && (!encap->size || !encap->data))
3540 return rte_flow_error_set(error, EINVAL,
3541 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3542 "raw encap data cannot be empty");
3543 if (decap && encap) {
3544 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3545 encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3546
3547 decap = NULL;
3548 else if (encap->size <=
3549 MLX5_ENCAPSULATION_DECISION_SIZE &&
3550 decap->size >
3551 MLX5_ENCAPSULATION_DECISION_SIZE)
3552
3553 encap = NULL;
3554 else if (encap->size >
3555 MLX5_ENCAPSULATION_DECISION_SIZE &&
3556 decap->size >
3557 MLX5_ENCAPSULATION_DECISION_SIZE)
3558
3559 ;
3560 else
3561 return rte_flow_error_set(error,
3562 ENOTSUP,
3563 RTE_FLOW_ERROR_TYPE_ACTION,
3564 NULL, "unsupported too small "
3565 "raw decap and too small raw "
3566 "encap combination");
3567 }
3568 if (decap) {
3569 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3570 item_flags, attr, error);
3571 if (ret < 0)
3572 return ret;
3573 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3574 ++(*actions_n);
3575 }
3576 if (encap) {
3577 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3578 return rte_flow_error_set(error, ENOTSUP,
3579 RTE_FLOW_ERROR_TYPE_ACTION,
3580 NULL,
3581 "small raw encap size");
3582 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3583 return rte_flow_error_set(error, EINVAL,
3584 RTE_FLOW_ERROR_TYPE_ACTION,
3585 NULL,
3586 "more than one encap action");
3587 if (!attr->transfer && priv->representor)
3588 return rte_flow_error_set
3589 (error, ENOTSUP,
3590 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3591 "encap action for VF representor "
3592 "not supported on NIC table");
3593 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3594 ++(*actions_n);
3595 }
3596 return 0;
3597}
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616static int
3617flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3618 uint64_t action_flags,
3619 uint64_t item_flags,
3620 const struct rte_flow_attr *attr,
3621 struct rte_flow_error *error)
3622{
3623 RTE_SET_USED(dev);
3624
3625 if (attr->group == 0 && !attr->transfer)
3626 return rte_flow_error_set(error, ENOTSUP,
3627 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3628 NULL,
3629 "Only support non-root table");
3630 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3631 return rte_flow_error_set(error, ENOTSUP,
3632 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3633 "CT cannot follow a fate action");
3634 if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3635 (action_flags & MLX5_FLOW_ACTION_AGE))
3636 return rte_flow_error_set(error, EINVAL,
3637 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3638 "Only one ASO action is supported");
3639 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3640 return rte_flow_error_set(error, EINVAL,
3641 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3642 "Encap cannot exist before CT");
3643 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3644 return rte_flow_error_set(error, EINVAL,
3645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3646 "Not a outer TCP packet");
3647 return 0;
3648}
3649
3650int
3651flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3652 struct mlx5_list_entry *entry, void *cb_ctx)
3653{
3654 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3655 struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3656 struct mlx5_flow_dv_encap_decap_resource *resource;
3657
3658 resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3659 entry);
3660 if (resource->reformat_type == ctx_resource->reformat_type &&
3661 resource->ft_type == ctx_resource->ft_type &&
3662 resource->flags == ctx_resource->flags &&
3663 resource->size == ctx_resource->size &&
3664 !memcmp((const void *)resource->buf,
3665 (const void *)ctx_resource->buf,
3666 resource->size))
3667 return 0;
3668 return -1;
3669}
3670
3671struct mlx5_list_entry *
3672flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3673{
3674 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3675 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3676 struct mlx5dv_dr_domain *domain;
3677 struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3678 struct mlx5_flow_dv_encap_decap_resource *resource;
3679 uint32_t idx;
3680 int ret;
3681
3682 if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3683 domain = sh->fdb_domain;
3684 else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3685 domain = sh->rx_domain;
3686 else
3687 domain = sh->tx_domain;
3688
3689 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3690 if (!resource) {
3691 rte_flow_error_set(ctx->error, ENOMEM,
3692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3693 "cannot allocate resource memory");
3694 return NULL;
3695 }
3696 *resource = *ctx_resource;
3697 resource->idx = idx;
3698 ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3699 domain, resource,
3700 &resource->action);
3701 if (ret) {
3702 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3703 rte_flow_error_set(ctx->error, ENOMEM,
3704 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3705 NULL, "cannot create action");
3706 return NULL;
3707 }
3708
3709 return &resource->entry;
3710}
3711
3712struct mlx5_list_entry *
3713flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3714 void *cb_ctx)
3715{
3716 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3717 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3718 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3719 uint32_t idx;
3720
3721 cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3722 &idx);
3723 if (!cache_resource) {
3724 rte_flow_error_set(ctx->error, ENOMEM,
3725 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3726 "cannot allocate resource memory");
3727 return NULL;
3728 }
3729 memcpy(cache_resource, oentry, sizeof(*cache_resource));
3730 cache_resource->idx = idx;
3731 return &cache_resource->entry;
3732}
3733
3734void
3735flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3736{
3737 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3738 struct mlx5_flow_dv_encap_decap_resource *res =
3739 container_of(entry, typeof(*res), entry);
3740
3741 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3742}
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759static int
3760flow_dv_encap_decap_resource_register
3761 (struct rte_eth_dev *dev,
3762 struct mlx5_flow_dv_encap_decap_resource *resource,
3763 struct mlx5_flow *dev_flow,
3764 struct rte_flow_error *error)
3765{
3766 struct mlx5_priv *priv = dev->data->dev_private;
3767 struct mlx5_dev_ctx_shared *sh = priv->sh;
3768 struct mlx5_list_entry *entry;
3769 union {
3770 struct {
3771 uint32_t ft_type:8;
3772 uint32_t refmt_type:8;
3773
3774
3775
3776
3777
3778 uint32_t is_root:1;
3779 uint32_t reserve:15;
3780 };
3781 uint32_t v32;
3782 } encap_decap_key = {
3783 {
3784 .ft_type = resource->ft_type,
3785 .refmt_type = resource->reformat_type,
3786 .is_root = !!dev_flow->dv.group,
3787 .reserve = 0,
3788 }
3789 };
3790 struct mlx5_flow_cb_ctx ctx = {
3791 .error = error,
3792 .data = resource,
3793 };
3794 struct mlx5_hlist *encaps_decaps;
3795 uint64_t key64;
3796
3797 encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3798 "encaps_decaps",
3799 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3800 true, true, sh,
3801 flow_dv_encap_decap_create_cb,
3802 flow_dv_encap_decap_match_cb,
3803 flow_dv_encap_decap_remove_cb,
3804 flow_dv_encap_decap_clone_cb,
3805 flow_dv_encap_decap_clone_free_cb,
3806 error);
3807 if (unlikely(!encaps_decaps))
3808 return -rte_errno;
3809 resource->flags = dev_flow->dv.group ? 0 : 1;
3810 key64 = __rte_raw_cksum(&encap_decap_key.v32,
3811 sizeof(encap_decap_key.v32), 0);
3812 if (resource->reformat_type !=
3813 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3814 resource->size)
3815 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3816 entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3817 if (!entry)
3818 return -rte_errno;
3819 resource = container_of(entry, typeof(*resource), entry);
3820 dev_flow->dv.encap_decap = resource;
3821 dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3822 return 0;
3823}
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840static int
3841flow_dv_jump_tbl_resource_register
3842 (struct rte_eth_dev *dev __rte_unused,
3843 struct mlx5_flow_tbl_resource *tbl,
3844 struct mlx5_flow *dev_flow,
3845 struct rte_flow_error *error __rte_unused)
3846{
3847 struct mlx5_flow_tbl_data_entry *tbl_data =
3848 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3849
3850 MLX5_ASSERT(tbl);
3851 MLX5_ASSERT(tbl_data->jump.action);
3852 dev_flow->handle->rix_jump = tbl_data->idx;
3853 dev_flow->dv.jump = &tbl_data->jump;
3854 return 0;
3855}
3856
3857int
3858flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3859 struct mlx5_list_entry *entry, void *cb_ctx)
3860{
3861 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3862 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3863 struct mlx5_flow_dv_port_id_action_resource *res =
3864 container_of(entry, typeof(*res), entry);
3865
3866 return ref->port_id != res->port_id;
3867}
3868
3869struct mlx5_list_entry *
3870flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3871{
3872 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3873 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3874 struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3875 struct mlx5_flow_dv_port_id_action_resource *resource;
3876 uint32_t idx;
3877 int ret;
3878
3879
3880 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3881 if (!resource) {
3882 rte_flow_error_set(ctx->error, ENOMEM,
3883 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3884 "cannot allocate port_id action memory");
3885 return NULL;
3886 }
3887 *resource = *ref;
3888 ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3889 ref->port_id,
3890 &resource->action);
3891 if (ret) {
3892 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3893 rte_flow_error_set(ctx->error, ENOMEM,
3894 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3895 "cannot create action");
3896 return NULL;
3897 }
3898 resource->idx = idx;
3899 return &resource->entry;
3900}
3901
3902struct mlx5_list_entry *
3903flow_dv_port_id_clone_cb(void *tool_ctx,
3904 struct mlx5_list_entry *entry __rte_unused,
3905 void *cb_ctx)
3906{
3907 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3908 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3909 struct mlx5_flow_dv_port_id_action_resource *resource;
3910 uint32_t idx;
3911
3912 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3913 if (!resource) {
3914 rte_flow_error_set(ctx->error, ENOMEM,
3915 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3916 "cannot allocate port_id action memory");
3917 return NULL;
3918 }
3919 memcpy(resource, entry, sizeof(*resource));
3920 resource->idx = idx;
3921 return &resource->entry;
3922}
3923
3924void
3925flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3926{
3927 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3928 struct mlx5_flow_dv_port_id_action_resource *resource =
3929 container_of(entry, typeof(*resource), entry);
3930
3931 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3932}
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949static int
3950flow_dv_port_id_action_resource_register
3951 (struct rte_eth_dev *dev,
3952 struct mlx5_flow_dv_port_id_action_resource *ref,
3953 struct mlx5_flow *dev_flow,
3954 struct rte_flow_error *error)
3955{
3956 struct mlx5_priv *priv = dev->data->dev_private;
3957 struct mlx5_list_entry *entry;
3958 struct mlx5_flow_dv_port_id_action_resource *resource;
3959 struct mlx5_flow_cb_ctx ctx = {
3960 .error = error,
3961 .data = ref,
3962 };
3963
3964 entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3965 if (!entry)
3966 return -rte_errno;
3967 resource = container_of(entry, typeof(*resource), entry);
3968 dev_flow->dv.port_id_action = resource;
3969 dev_flow->handle->rix_port_id_action = resource->idx;
3970 return 0;
3971}
3972
3973int
3974flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3975 struct mlx5_list_entry *entry, void *cb_ctx)
3976{
3977 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3978 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3979 struct mlx5_flow_dv_push_vlan_action_resource *res =
3980 container_of(entry, typeof(*res), entry);
3981
3982 return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3983}
3984
3985struct mlx5_list_entry *
3986flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3987{
3988 struct mlx5_dev_ctx_shared *sh = tool_ctx;
3989 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3990 struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3991 struct mlx5_flow_dv_push_vlan_action_resource *resource;
3992 struct mlx5dv_dr_domain *domain;
3993 uint32_t idx;
3994 int ret;
3995
3996
3997 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3998 if (!resource) {
3999 rte_flow_error_set(ctx->error, ENOMEM,
4000 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4001 "cannot allocate push_vlan action memory");
4002 return NULL;
4003 }
4004 *resource = *ref;
4005 if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4006 domain = sh->fdb_domain;
4007 else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
4008 domain = sh->rx_domain;
4009 else
4010 domain = sh->tx_domain;
4011 ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
4012 &resource->action);
4013 if (ret) {
4014 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
4015 rte_flow_error_set(ctx->error, ENOMEM,
4016 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4017 "cannot create push vlan action");
4018 return NULL;
4019 }
4020 resource->idx = idx;
4021 return &resource->entry;
4022}
4023
4024struct mlx5_list_entry *
4025flow_dv_push_vlan_clone_cb(void *tool_ctx,
4026 struct mlx5_list_entry *entry __rte_unused,
4027 void *cb_ctx)
4028{
4029 struct mlx5_dev_ctx_shared *sh = tool_ctx;
4030 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4031 struct mlx5_flow_dv_push_vlan_action_resource *resource;
4032 uint32_t idx;
4033
4034 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
4035 if (!resource) {
4036 rte_flow_error_set(ctx->error, ENOMEM,
4037 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4038 "cannot allocate push_vlan action memory");
4039 return NULL;
4040 }
4041 memcpy(resource, entry, sizeof(*resource));
4042 resource->idx = idx;
4043 return &resource->entry;
4044}
4045
4046void
4047flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
4048{
4049 struct mlx5_dev_ctx_shared *sh = tool_ctx;
4050 struct mlx5_flow_dv_push_vlan_action_resource *resource =
4051 container_of(entry, typeof(*resource), entry);
4052
4053 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
4054}
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071static int
4072flow_dv_push_vlan_action_resource_register
4073 (struct rte_eth_dev *dev,
4074 struct mlx5_flow_dv_push_vlan_action_resource *ref,
4075 struct mlx5_flow *dev_flow,
4076 struct rte_flow_error *error)
4077{
4078 struct mlx5_priv *priv = dev->data->dev_private;
4079 struct mlx5_flow_dv_push_vlan_action_resource *resource;
4080 struct mlx5_list_entry *entry;
4081 struct mlx5_flow_cb_ctx ctx = {
4082 .error = error,
4083 .data = ref,
4084 };
4085
4086 entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4087 if (!entry)
4088 return -rte_errno;
4089 resource = container_of(entry, typeof(*resource), entry);
4090
4091 dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4092 dev_flow->dv.push_vlan_res = resource;
4093 return 0;
4094}
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105size_t
4106flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4107{
4108 size_t retval;
4109
4110 switch (item_type) {
4111 case RTE_FLOW_ITEM_TYPE_ETH:
4112 retval = sizeof(struct rte_ether_hdr);
4113 break;
4114 case RTE_FLOW_ITEM_TYPE_VLAN:
4115 retval = sizeof(struct rte_vlan_hdr);
4116 break;
4117 case RTE_FLOW_ITEM_TYPE_IPV4:
4118 retval = sizeof(struct rte_ipv4_hdr);
4119 break;
4120 case RTE_FLOW_ITEM_TYPE_IPV6:
4121 retval = sizeof(struct rte_ipv6_hdr);
4122 break;
4123 case RTE_FLOW_ITEM_TYPE_UDP:
4124 retval = sizeof(struct rte_udp_hdr);
4125 break;
4126 case RTE_FLOW_ITEM_TYPE_TCP:
4127 retval = sizeof(struct rte_tcp_hdr);
4128 break;
4129 case RTE_FLOW_ITEM_TYPE_VXLAN:
4130 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4131 retval = sizeof(struct rte_vxlan_hdr);
4132 break;
4133 case RTE_FLOW_ITEM_TYPE_GRE:
4134 case RTE_FLOW_ITEM_TYPE_NVGRE:
4135 retval = sizeof(struct rte_gre_hdr);
4136 break;
4137 case RTE_FLOW_ITEM_TYPE_MPLS:
4138 retval = sizeof(struct rte_mpls_hdr);
4139 break;
4140 case RTE_FLOW_ITEM_TYPE_VOID:
4141 default:
4142 retval = 0;
4143 break;
4144 }
4145 return retval;
4146}
4147
4148#define MLX5_ENCAP_IPV4_VERSION 0x40
4149#define MLX5_ENCAP_IPV4_IHL_MIN 0x05
4150#define MLX5_ENCAP_IPV4_TTL_DEF 0x40
4151#define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
4152#define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
4153#define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
4154#define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171int
4172flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4173 size_t *size, struct rte_flow_error *error)
4174{
4175 struct rte_ether_hdr *eth = NULL;
4176 struct rte_vlan_hdr *vlan = NULL;
4177 struct rte_ipv4_hdr *ipv4 = NULL;
4178 struct rte_ipv6_hdr *ipv6 = NULL;
4179 struct rte_udp_hdr *udp = NULL;
4180 struct rte_vxlan_hdr *vxlan = NULL;
4181 struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4182 struct rte_gre_hdr *gre = NULL;
4183 size_t len;
4184 size_t temp_size = 0;
4185
4186 if (!items)
4187 return rte_flow_error_set(error, EINVAL,
4188 RTE_FLOW_ERROR_TYPE_ACTION,
4189 NULL, "invalid empty data");
4190 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4191 len = flow_dv_get_item_hdr_len(items->type);
4192 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4193 return rte_flow_error_set(error, EINVAL,
4194 RTE_FLOW_ERROR_TYPE_ACTION,
4195 (void *)items->type,
4196 "items total size is too big"
4197 " for encap action");
4198 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4199 switch (items->type) {
4200 case RTE_FLOW_ITEM_TYPE_ETH:
4201 eth = (struct rte_ether_hdr *)&buf[temp_size];
4202 break;
4203 case RTE_FLOW_ITEM_TYPE_VLAN:
4204 vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4205 if (!eth)
4206 return rte_flow_error_set(error, EINVAL,
4207 RTE_FLOW_ERROR_TYPE_ACTION,
4208 (void *)items->type,
4209 "eth header not found");
4210 if (!eth->ether_type)
4211 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4212 break;
4213 case RTE_FLOW_ITEM_TYPE_IPV4:
4214 ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4215 if (!vlan && !eth)
4216 return rte_flow_error_set(error, EINVAL,
4217 RTE_FLOW_ERROR_TYPE_ACTION,
4218 (void *)items->type,
4219 "neither eth nor vlan"
4220 " header found");
4221 if (vlan && !vlan->eth_proto)
4222 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4223 else if (eth && !eth->ether_type)
4224 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4225 if (!ipv4->version_ihl)
4226 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4227 MLX5_ENCAP_IPV4_IHL_MIN;
4228 if (!ipv4->time_to_live)
4229 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4230 break;
4231 case RTE_FLOW_ITEM_TYPE_IPV6:
4232 ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4233 if (!vlan && !eth)
4234 return rte_flow_error_set(error, EINVAL,
4235 RTE_FLOW_ERROR_TYPE_ACTION,
4236 (void *)items->type,
4237 "neither eth nor vlan"
4238 " header found");
4239 if (vlan && !vlan->eth_proto)
4240 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4241 else if (eth && !eth->ether_type)
4242 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4243 if (!ipv6->vtc_flow)
4244 ipv6->vtc_flow =
4245 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4246 if (!ipv6->hop_limits)
4247 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4248 break;
4249 case RTE_FLOW_ITEM_TYPE_UDP:
4250 udp = (struct rte_udp_hdr *)&buf[temp_size];
4251 if (!ipv4 && !ipv6)
4252 return rte_flow_error_set(error, EINVAL,
4253 RTE_FLOW_ERROR_TYPE_ACTION,
4254 (void *)items->type,
4255 "ip header not found");
4256 if (ipv4 && !ipv4->next_proto_id)
4257 ipv4->next_proto_id = IPPROTO_UDP;
4258 else if (ipv6 && !ipv6->proto)
4259 ipv6->proto = IPPROTO_UDP;
4260 break;
4261 case RTE_FLOW_ITEM_TYPE_VXLAN:
4262 vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4263 if (!udp)
4264 return rte_flow_error_set(error, EINVAL,
4265 RTE_FLOW_ERROR_TYPE_ACTION,
4266 (void *)items->type,
4267 "udp header not found");
4268 if (!udp->dst_port)
4269 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4270 if (!vxlan->vx_flags)
4271 vxlan->vx_flags =
4272 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4273 break;
4274 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4275 vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4276 if (!udp)
4277 return rte_flow_error_set(error, EINVAL,
4278 RTE_FLOW_ERROR_TYPE_ACTION,
4279 (void *)items->type,
4280 "udp header not found");
4281 if (!vxlan_gpe->proto)
4282 return rte_flow_error_set(error, EINVAL,
4283 RTE_FLOW_ERROR_TYPE_ACTION,
4284 (void *)items->type,
4285 "next protocol not found");
4286 if (!udp->dst_port)
4287 udp->dst_port =
4288 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4289 if (!vxlan_gpe->vx_flags)
4290 vxlan_gpe->vx_flags =
4291 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4292 break;
4293 case RTE_FLOW_ITEM_TYPE_GRE:
4294 case RTE_FLOW_ITEM_TYPE_NVGRE:
4295 gre = (struct rte_gre_hdr *)&buf[temp_size];
4296 if (!gre->proto)
4297 return rte_flow_error_set(error, EINVAL,
4298 RTE_FLOW_ERROR_TYPE_ACTION,
4299 (void *)items->type,
4300 "next protocol not found");
4301 if (!ipv4 && !ipv6)
4302 return rte_flow_error_set(error, EINVAL,
4303 RTE_FLOW_ERROR_TYPE_ACTION,
4304 (void *)items->type,
4305 "ip header not found");
4306 if (ipv4 && !ipv4->next_proto_id)
4307 ipv4->next_proto_id = IPPROTO_GRE;
4308 else if (ipv6 && !ipv6->proto)
4309 ipv6->proto = IPPROTO_GRE;
4310 break;
4311 case RTE_FLOW_ITEM_TYPE_VOID:
4312 break;
4313 default:
4314 return rte_flow_error_set(error, EINVAL,
4315 RTE_FLOW_ERROR_TYPE_ACTION,
4316 (void *)items->type,
4317 "unsupported item type");
4318 break;
4319 }
4320 temp_size += len;
4321 }
4322 *size = temp_size;
4323 return 0;
4324}
4325
4326static int
4327flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4328{
4329 struct rte_ether_hdr *eth = NULL;
4330 struct rte_vlan_hdr *vlan = NULL;
4331 struct rte_ipv6_hdr *ipv6 = NULL;
4332 struct rte_udp_hdr *udp = NULL;
4333 char *next_hdr;
4334 uint16_t proto;
4335
4336 eth = (struct rte_ether_hdr *)data;
4337 next_hdr = (char *)(eth + 1);
4338 proto = RTE_BE16(eth->ether_type);
4339
4340
4341 while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4342 vlan = (struct rte_vlan_hdr *)next_hdr;
4343 proto = RTE_BE16(vlan->eth_proto);
4344 next_hdr += sizeof(struct rte_vlan_hdr);
4345 }
4346
4347
4348 if (proto == RTE_ETHER_TYPE_IPV4)
4349 return 0;
4350
4351
4352 if (proto != RTE_ETHER_TYPE_IPV6) {
4353 return rte_flow_error_set(error, ENOTSUP,
4354 RTE_FLOW_ERROR_TYPE_ACTION,
4355 NULL, "Cannot offload non IPv4/IPv6");
4356 }
4357
4358 ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4359
4360
4361 if (ipv6->proto != IPPROTO_UDP)
4362 return 0;
4363
4364 udp = (struct rte_udp_hdr *)(ipv6 + 1);
4365 udp->dgram_cksum = 0;
4366
4367 return 0;
4368}
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387static int
4388flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4389 const struct rte_flow_action *action,
4390 struct mlx5_flow *dev_flow,
4391 uint8_t transfer,
4392 struct rte_flow_error *error)
4393{
4394 const struct rte_flow_item *encap_data;
4395 const struct rte_flow_action_raw_encap *raw_encap_data;
4396 struct mlx5_flow_dv_encap_decap_resource res = {
4397 .reformat_type =
4398 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4399 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4400 MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4401 };
4402
4403 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4404 raw_encap_data =
4405 (const struct rte_flow_action_raw_encap *)action->conf;
4406 res.size = raw_encap_data->size;
4407 memcpy(res.buf, raw_encap_data->data, res.size);
4408 } else {
4409 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4410 encap_data =
4411 ((const struct rte_flow_action_vxlan_encap *)
4412 action->conf)->definition;
4413 else
4414 encap_data =
4415 ((const struct rte_flow_action_nvgre_encap *)
4416 action->conf)->definition;
4417 if (flow_dv_convert_encap_data(encap_data, res.buf,
4418 &res.size, error))
4419 return -rte_errno;
4420 }
4421 if (flow_dv_zero_encap_udp_csum(res.buf, error))
4422 return -rte_errno;
4423 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4424 return rte_flow_error_set(error, EINVAL,
4425 RTE_FLOW_ERROR_TYPE_ACTION,
4426 NULL, "can't create L2 encap action");
4427 return 0;
4428}
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445static int
4446flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4447 struct mlx5_flow *dev_flow,
4448 uint8_t transfer,
4449 struct rte_flow_error *error)
4450{
4451 struct mlx5_flow_dv_encap_decap_resource res = {
4452 .size = 0,
4453 .reformat_type =
4454 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4455 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4456 MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4457 };
4458
4459 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4460 return rte_flow_error_set(error, EINVAL,
4461 RTE_FLOW_ERROR_TYPE_ACTION,
4462 NULL, "can't create L2 decap action");
4463 return 0;
4464}
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483static int
4484flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4485 const struct rte_flow_action *action,
4486 struct mlx5_flow *dev_flow,
4487 const struct rte_flow_attr *attr,
4488 struct rte_flow_error *error)
4489{
4490 const struct rte_flow_action_raw_encap *encap_data;
4491 struct mlx5_flow_dv_encap_decap_resource res;
4492
4493 memset(&res, 0, sizeof(res));
4494 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4495 res.size = encap_data->size;
4496 memcpy(res.buf, encap_data->data, res.size);
4497 res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4498 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4499 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4500 if (attr->transfer)
4501 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4502 else
4503 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4504 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4505 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4506 return rte_flow_error_set(error, EINVAL,
4507 RTE_FLOW_ERROR_TYPE_ACTION,
4508 NULL, "can't create encap action");
4509 return 0;
4510}
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529static int
4530flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4531 const struct rte_flow_attr *attr,
4532 const struct rte_vlan_hdr *vlan,
4533 struct mlx5_flow *dev_flow,
4534 struct rte_flow_error *error)
4535{
4536 struct mlx5_flow_dv_push_vlan_action_resource res;
4537
4538 memset(&res, 0, sizeof(res));
4539 res.vlan_tag =
4540 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4541 vlan->vlan_tci);
4542 if (attr->transfer)
4543 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4544 else
4545 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4546 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4547 return flow_dv_push_vlan_action_resource_register
4548 (dev, &res, dev_flow, error);
4549}
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564static int
4565flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4566 const struct rte_flow_action *action,
4567 struct rte_flow_error *error)
4568{
4569 if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4570 return rte_flow_error_set(error, EINVAL,
4571 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4572 NULL, "action configuration not set");
4573 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4574 return rte_flow_error_set(error, EINVAL,
4575 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4576 "can't have encap action before"
4577 " modify action");
4578 return 0;
4579}
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596static int
4597flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4598 const struct rte_flow_action *action,
4599 const uint64_t item_flags,
4600 struct rte_flow_error *error)
4601{
4602 int ret = 0;
4603
4604 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4605 if (!ret) {
4606 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4607 return rte_flow_error_set(error, EINVAL,
4608 RTE_FLOW_ERROR_TYPE_ACTION,
4609 NULL,
4610 "no L2 item in pattern");
4611 }
4612 return ret;
4613}
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630static int
4631flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4632 const struct rte_flow_action *action,
4633 const uint64_t item_flags,
4634 struct rte_flow_error *error)
4635{
4636 int ret = 0;
4637 uint64_t layer;
4638
4639 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4640 if (!ret) {
4641 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4642 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4643 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4644 if (!(item_flags & layer))
4645 return rte_flow_error_set(error, EINVAL,
4646 RTE_FLOW_ERROR_TYPE_ACTION,
4647 NULL,
4648 "no ipv4 item in pattern");
4649 }
4650 return ret;
4651}
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668static int
4669flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4670 const struct rte_flow_action *action,
4671 const uint64_t item_flags,
4672 struct rte_flow_error *error)
4673{
4674 int ret = 0;
4675 uint64_t layer;
4676
4677 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4678 if (!ret) {
4679 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4680 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4681 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4682 if (!(item_flags & layer))
4683 return rte_flow_error_set(error, EINVAL,
4684 RTE_FLOW_ERROR_TYPE_ACTION,
4685 NULL,
4686 "no ipv6 item in pattern");
4687 }
4688 return ret;
4689}
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706static int
4707flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4708 const struct rte_flow_action *action,
4709 const uint64_t item_flags,
4710 struct rte_flow_error *error)
4711{
4712 int ret = 0;
4713 uint64_t layer;
4714
4715 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4716 if (!ret) {
4717 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4718 MLX5_FLOW_LAYER_INNER_L4 :
4719 MLX5_FLOW_LAYER_OUTER_L4;
4720 if (!(item_flags & layer))
4721 return rte_flow_error_set(error, EINVAL,
4722 RTE_FLOW_ERROR_TYPE_ACTION,
4723 NULL, "no transport layer "
4724 "in pattern");
4725 }
4726 return ret;
4727}
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745static int
4746flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4747 const struct rte_flow_action *action,
4748 const uint64_t item_flags,
4749 struct rte_flow_error *error)
4750{
4751 int ret = 0;
4752 uint64_t layer;
4753
4754 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4755 if (!ret) {
4756 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4757 MLX5_FLOW_LAYER_INNER_L4_TCP :
4758 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4759 if (!(item_flags & layer))
4760 return rte_flow_error_set(error, EINVAL,
4761 RTE_FLOW_ERROR_TYPE_ACTION,
4762 NULL, "no TCP item in"
4763 " pattern");
4764 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4765 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4766 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4767 (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4768 return rte_flow_error_set(error, EINVAL,
4769 RTE_FLOW_ERROR_TYPE_ACTION,
4770 NULL,
4771 "cannot decrease and increase"
4772 " TCP sequence number"
4773 " at the same time");
4774 }
4775 return ret;
4776}
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794static int
4795flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4796 const struct rte_flow_action *action,
4797 const uint64_t item_flags,
4798 struct rte_flow_error *error)
4799{
4800 int ret = 0;
4801 uint64_t layer;
4802
4803 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4804 if (!ret) {
4805 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4806 MLX5_FLOW_LAYER_INNER_L4_TCP :
4807 MLX5_FLOW_LAYER_OUTER_L4_TCP;
4808 if (!(item_flags & layer))
4809 return rte_flow_error_set(error, EINVAL,
4810 RTE_FLOW_ERROR_TYPE_ACTION,
4811 NULL, "no TCP item in"
4812 " pattern");
4813 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4814 (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4815 (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4816 (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4817 return rte_flow_error_set(error, EINVAL,
4818 RTE_FLOW_ERROR_TYPE_ACTION,
4819 NULL,
4820 "cannot decrease and increase"
4821 " TCP acknowledgment number"
4822 " at the same time");
4823 }
4824 return ret;
4825}
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842static int
4843flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4844 const struct rte_flow_action *action,
4845 const uint64_t item_flags,
4846 struct rte_flow_error *error)
4847{
4848 int ret = 0;
4849 uint64_t layer;
4850
4851 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4852 if (!ret) {
4853 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4854 MLX5_FLOW_LAYER_INNER_L3 :
4855 MLX5_FLOW_LAYER_OUTER_L3;
4856 if (!(item_flags & layer))
4857 return rte_flow_error_set(error, EINVAL,
4858 RTE_FLOW_ERROR_TYPE_ACTION,
4859 NULL,
4860 "no IP protocol in pattern");
4861 }
4862 return ret;
4863}
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882static int
4883flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4884 const uint64_t action_flags,
4885 const struct rte_flow_action *action,
4886 const struct rte_flow_attr *attr,
4887 struct rte_flow_error *error)
4888{
4889 int ret = 0;
4890 struct mlx5_priv *priv = dev->data->dev_private;
4891 struct mlx5_sh_config *config = &priv->sh->config;
4892 struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
4893 const struct rte_flow_action_modify_field *action_modify_field =
4894 action->conf;
4895 uint32_t dst_width = mlx5_flow_item_field_width(dev,
4896 action_modify_field->dst.field,
4897 -1, attr, error);
4898 uint32_t src_width = mlx5_flow_item_field_width(dev,
4899 action_modify_field->src.field,
4900 dst_width, attr, error);
4901
4902 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4903 if (ret)
4904 return ret;
4905
4906 if (action_modify_field->width == 0)
4907 return rte_flow_error_set(error, EINVAL,
4908 RTE_FLOW_ERROR_TYPE_ACTION, action,
4909 "no bits are requested to be modified");
4910 else if (action_modify_field->width > dst_width ||
4911 action_modify_field->width > src_width)
4912 return rte_flow_error_set(error, EINVAL,
4913 RTE_FLOW_ERROR_TYPE_ACTION, action,
4914 "cannot modify more bits than"
4915 " the width of a field");
4916 if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4917 action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4918 if ((action_modify_field->dst.offset +
4919 action_modify_field->width > dst_width) ||
4920 (action_modify_field->dst.offset % 32))
4921 return rte_flow_error_set(error, EINVAL,
4922 RTE_FLOW_ERROR_TYPE_ACTION, action,
4923 "destination offset is too big"
4924 " or not aligned to 4 bytes");
4925 if (action_modify_field->dst.level &&
4926 action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4927 return rte_flow_error_set(error, ENOTSUP,
4928 RTE_FLOW_ERROR_TYPE_ACTION, action,
4929 "inner header fields modification"
4930 " is not supported");
4931 }
4932 if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4933 action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4934 if (!attr->transfer && !attr->group)
4935 return rte_flow_error_set(error, ENOTSUP,
4936 RTE_FLOW_ERROR_TYPE_ACTION, action,
4937 "modify field action is not"
4938 " supported for group 0");
4939 if ((action_modify_field->src.offset +
4940 action_modify_field->width > src_width) ||
4941 (action_modify_field->src.offset % 32))
4942 return rte_flow_error_set(error, EINVAL,
4943 RTE_FLOW_ERROR_TYPE_ACTION, action,
4944 "source offset is too big"
4945 " or not aligned to 4 bytes");
4946 if (action_modify_field->src.level &&
4947 action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4948 return rte_flow_error_set(error, ENOTSUP,
4949 RTE_FLOW_ERROR_TYPE_ACTION, action,
4950 "inner header fields modification"
4951 " is not supported");
4952 }
4953 if ((action_modify_field->dst.field ==
4954 action_modify_field->src.field) &&
4955 (action_modify_field->dst.level ==
4956 action_modify_field->src.level))
4957 return rte_flow_error_set(error, EINVAL,
4958 RTE_FLOW_ERROR_TYPE_ACTION, action,
4959 "source and destination fields"
4960 " cannot be the same");
4961 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4962 action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4963 action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4964 return rte_flow_error_set(error, EINVAL,
4965 RTE_FLOW_ERROR_TYPE_ACTION, action,
4966 "mark, immediate value or a pointer to it"
4967 " cannot be used as a destination");
4968 if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4969 action_modify_field->src.field == RTE_FLOW_FIELD_START)
4970 return rte_flow_error_set(error, ENOTSUP,
4971 RTE_FLOW_ERROR_TYPE_ACTION, action,
4972 "modifications of an arbitrary"
4973 " place in a packet is not supported");
4974 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4975 action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4976 return rte_flow_error_set(error, ENOTSUP,
4977 RTE_FLOW_ERROR_TYPE_ACTION, action,
4978 "modifications of the 802.1Q Tag"
4979 " Identifier is not supported");
4980 if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4981 action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4982 return rte_flow_error_set(error, ENOTSUP,
4983 RTE_FLOW_ERROR_TYPE_ACTION, action,
4984 "modifications of the VXLAN Network"
4985 " Identifier is not supported");
4986 if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4987 action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4988 return rte_flow_error_set(error, ENOTSUP,
4989 RTE_FLOW_ERROR_TYPE_ACTION, action,
4990 "modifications of the GENEVE Network"
4991 " Identifier is not supported");
4992 if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4993 action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4994 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4995 !mlx5_flow_ext_mreg_supported(dev))
4996 return rte_flow_error_set(error, ENOTSUP,
4997 RTE_FLOW_ERROR_TYPE_ACTION, action,
4998 "cannot modify mark in legacy mode"
4999 " or without extensive registers");
5000 if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
5001 action_modify_field->src.field == RTE_FLOW_FIELD_META) {
5002 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
5003 !mlx5_flow_ext_mreg_supported(dev))
5004 return rte_flow_error_set(error, ENOTSUP,
5005 RTE_FLOW_ERROR_TYPE_ACTION, action,
5006 "cannot modify meta without"
5007 " extensive registers support");
5008 ret = flow_dv_get_metadata_reg(dev, attr, error);
5009 if (ret < 0 || ret == REG_NON)
5010 return rte_flow_error_set(error, ENOTSUP,
5011 RTE_FLOW_ERROR_TYPE_ACTION, action,
5012 "cannot modify meta without"
5013 " extensive registers available");
5014 }
5015 if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
5016 return rte_flow_error_set(error, ENOTSUP,
5017 RTE_FLOW_ERROR_TYPE_ACTION, action,
5018 "add and sub operations"
5019 " are not supported");
5020 if (action_modify_field->dst.field == RTE_FLOW_FIELD_IPV4_ECN ||
5021 action_modify_field->src.field == RTE_FLOW_FIELD_IPV4_ECN ||
5022 action_modify_field->dst.field == RTE_FLOW_FIELD_IPV6_ECN ||
5023 action_modify_field->src.field == RTE_FLOW_FIELD_IPV6_ECN)
5024 if (!hca_attr->modify_outer_ip_ecn &&
5025 !attr->transfer && !attr->group)
5026 return rte_flow_error_set(error, ENOTSUP,
5027 RTE_FLOW_ERROR_TYPE_ACTION, action,
5028 "modifications of the ECN for current firmware is not supported");
5029 return (action_modify_field->width / 32) +
5030 !!(action_modify_field->width % 32);
5031}
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050static int
5051flow_dv_validate_action_jump(struct rte_eth_dev *dev,
5052 const struct mlx5_flow_tunnel *tunnel,
5053 const struct rte_flow_action *action,
5054 uint64_t action_flags,
5055 const struct rte_flow_attr *attributes,
5056 bool external, struct rte_flow_error *error)
5057{
5058 uint32_t target_group, table = 0;
5059 int ret = 0;
5060 struct flow_grp_info grp_info = {
5061 .external = !!external,
5062 .transfer = !!attributes->transfer,
5063 .fdb_def_rule = 1,
5064 .std_tbl_fix = 0
5065 };
5066 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5067 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5068 return rte_flow_error_set(error, EINVAL,
5069 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5070 "can't have 2 fate actions in"
5071 " same flow");
5072 if (!action->conf)
5073 return rte_flow_error_set(error, EINVAL,
5074 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5075 NULL, "action configuration not set");
5076 target_group =
5077 ((const struct rte_flow_action_jump *)action->conf)->group;
5078 ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
5079 &grp_info, error);
5080 if (ret)
5081 return ret;
5082 if (attributes->group == target_group &&
5083 !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
5084 MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5085 return rte_flow_error_set(error, EINVAL,
5086 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5087 "target group must be other than"
5088 " the current flow group");
5089 if (table == 0)
5090 return rte_flow_error_set(error, EINVAL,
5091 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5092 NULL, "root table shouldn't be destination");
5093 return 0;
5094}
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113static int
5114flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5115 uint64_t action_flags,
5116 const struct rte_flow_action *action,
5117 const struct rte_flow_attr *attr,
5118 struct rte_flow_error *error)
5119{
5120 const struct rte_flow_action_port_id *port_id;
5121 const struct rte_flow_action_ethdev *ethdev;
5122 struct mlx5_priv *act_priv;
5123 struct mlx5_priv *dev_priv;
5124 uint16_t port;
5125
5126 if (!attr->transfer)
5127 return rte_flow_error_set(error, ENOTSUP,
5128 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5129 NULL,
5130 "port action is valid in transfer"
5131 " mode only");
5132 if (!action || !action->conf)
5133 return rte_flow_error_set(error, ENOTSUP,
5134 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5135 NULL,
5136 "port action parameters must be"
5137 " specified");
5138 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5139 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5140 return rte_flow_error_set(error, EINVAL,
5141 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5142 "can have only one fate actions in"
5143 " a flow");
5144 dev_priv = mlx5_dev_to_eswitch_info(dev);
5145 if (!dev_priv)
5146 return rte_flow_error_set(error, rte_errno,
5147 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5148 NULL,
5149 "failed to obtain E-Switch info");
5150 switch (action->type) {
5151 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5152 port_id = action->conf;
5153 port = port_id->original ? dev->data->port_id : port_id->id;
5154 break;
5155 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5156 ethdev = action->conf;
5157 port = ethdev->port_id;
5158 break;
5159 default:
5160 MLX5_ASSERT(false);
5161 return rte_flow_error_set
5162 (error, EINVAL,
5163 RTE_FLOW_ERROR_TYPE_ACTION, action,
5164 "unknown E-Switch action");
5165 }
5166 act_priv = mlx5_port_to_eswitch_info(port, false);
5167 if (!act_priv)
5168 return rte_flow_error_set
5169 (error, rte_errno,
5170 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5171 "failed to obtain E-Switch port id for port");
5172 if (act_priv->domain_id != dev_priv->domain_id)
5173 return rte_flow_error_set
5174 (error, EINVAL,
5175 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5176 "port does not belong to"
5177 " E-Switch being configured");
5178 return 0;
5179}
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192static inline unsigned int
5193flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5194 bool root)
5195{
5196
5197
5198
5199
5200 if (!root)
5201 return MLX5_MAX_MODIFY_NUM;
5202 else
5203 return MLX5_ROOT_TBL_MODIFY_NUM;
5204}
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227static int
5228mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5229 uint64_t action_flags, uint64_t item_flags,
5230 const struct rte_flow_action *action,
5231 const struct rte_flow_attr *attr,
5232 const struct rte_flow_item *port_id_item,
5233 bool *def_policy,
5234 struct rte_flow_error *error)
5235{
5236 struct mlx5_priv *priv = dev->data->dev_private;
5237 const struct rte_flow_action_meter *am = action->conf;
5238 struct mlx5_flow_meter_info *fm;
5239 struct mlx5_flow_meter_policy *mtr_policy;
5240 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5241
5242 if (!am)
5243 return rte_flow_error_set(error, EINVAL,
5244 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5245 "meter action conf is NULL");
5246
5247 if (action_flags & MLX5_FLOW_ACTION_METER)
5248 return rte_flow_error_set(error, ENOTSUP,
5249 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5250 "meter chaining not support");
5251 if (action_flags & MLX5_FLOW_ACTION_JUMP)
5252 return rte_flow_error_set(error, ENOTSUP,
5253 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5254 "meter with jump not support");
5255 if (!priv->mtr_en)
5256 return rte_flow_error_set(error, ENOTSUP,
5257 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5258 NULL,
5259 "meter action not supported");
5260 fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5261 if (!fm)
5262 return rte_flow_error_set(error, EINVAL,
5263 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5264 "Meter not found");
5265
5266 if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5267 !(fm->transfer == attr->transfer ||
5268 (!fm->ingress && !attr->ingress && attr->egress) ||
5269 (!fm->egress && !attr->egress && attr->ingress)))
5270 return rte_flow_error_set(error, EINVAL,
5271 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5272 "Flow attributes domain are either invalid "
5273 "or have a domain conflict with current "
5274 "meter attributes");
5275 if (fm->def_policy) {
5276 if (!((attr->transfer &&
5277 mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5278 (attr->egress &&
5279 mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5280 (attr->ingress &&
5281 mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5282 return rte_flow_error_set(error, EINVAL,
5283 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5284 "Flow attributes domain "
5285 "have a conflict with current "
5286 "meter domain attributes");
5287 *def_policy = true;
5288 } else {
5289 mtr_policy = mlx5_flow_meter_policy_find(dev,
5290 fm->policy_id, NULL);
5291 if (!mtr_policy)
5292 return rte_flow_error_set(error, EINVAL,
5293 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5294 "Invalid policy id for meter ");
5295 if (!((attr->transfer && mtr_policy->transfer) ||
5296 (attr->egress && mtr_policy->egress) ||
5297 (attr->ingress && mtr_policy->ingress)))
5298 return rte_flow_error_set(error, EINVAL,
5299 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5300 "Flow attributes domain "
5301 "have a conflict with current "
5302 "meter domain attributes");
5303 if (attr->transfer && mtr_policy->dev) {
5304
5305
5306
5307
5308 struct mlx5_priv *policy_port_priv =
5309 mtr_policy->dev->data->dev_private;
5310 uint16_t flow_src_port = priv->representor_id;
5311
5312 if (port_id_item) {
5313 if (mlx5_flow_get_item_vport_id(dev, port_id_item,
5314 &flow_src_port, error))
5315 return -rte_errno;
5316 }
5317 if (flow_src_port != policy_port_priv->representor_id)
5318 return rte_flow_error_set(error,
5319 rte_errno,
5320 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5321 NULL,
5322 "Flow and meter policy "
5323 "have different src port.");
5324 } else if (mtr_policy->is_rss) {
5325 struct mlx5_flow_meter_policy *fp;
5326 struct mlx5_meter_policy_action_container *acg;
5327 struct mlx5_meter_policy_action_container *acy;
5328 const struct rte_flow_action *rss_act;
5329 int ret;
5330
5331 fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5332 mtr_policy);
5333 if (fp == NULL)
5334 return rte_flow_error_set(error, EINVAL,
5335 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5336 "Unable to get the final "
5337 "policy in the hierarchy");
5338 acg = &fp->act_cnt[RTE_COLOR_GREEN];
5339 acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5340 MLX5_ASSERT(acg->fate_action ==
5341 MLX5_FLOW_FATE_SHARED_RSS ||
5342 acy->fate_action ==
5343 MLX5_FLOW_FATE_SHARED_RSS);
5344 if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5345 rss_act = acg->rss;
5346 else
5347 rss_act = acy->rss;
5348 ret = mlx5_flow_validate_action_rss(rss_act,
5349 action_flags, dev, attr,
5350 item_flags, error);
5351 if (ret)
5352 return ret;
5353 }
5354 *def_policy = false;
5355 }
5356 return 0;
5357}
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374static int
5375flow_dv_validate_action_age(uint64_t action_flags,
5376 const struct rte_flow_action *action,
5377 struct rte_eth_dev *dev,
5378 struct rte_flow_error *error)
5379{
5380 struct mlx5_priv *priv = dev->data->dev_private;
5381 const struct rte_flow_action_age *age = action->conf;
5382
5383 if (!priv->sh->cdev->config.devx ||
5384 (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
5385 return rte_flow_error_set(error, ENOTSUP,
5386 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5387 NULL,
5388 "age action not supported");
5389 if (!(action->conf))
5390 return rte_flow_error_set(error, EINVAL,
5391 RTE_FLOW_ERROR_TYPE_ACTION, action,
5392 "configuration cannot be null");
5393 if (!(age->timeout))
5394 return rte_flow_error_set(error, EINVAL,
5395 RTE_FLOW_ERROR_TYPE_ACTION, action,
5396 "invalid timeout value 0");
5397 if (action_flags & MLX5_FLOW_ACTION_AGE)
5398 return rte_flow_error_set(error, EINVAL,
5399 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5400 "duplicate age actions set");
5401 return 0;
5402}
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419static int
5420flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5421 const struct rte_flow_action *action,
5422 const uint64_t item_flags,
5423 struct rte_flow_error *error)
5424{
5425 int ret = 0;
5426
5427 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5428 if (!ret) {
5429 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5430 return rte_flow_error_set(error, EINVAL,
5431 RTE_FLOW_ERROR_TYPE_ACTION,
5432 NULL,
5433 "no ipv4 item in pattern");
5434 }
5435 return ret;
5436}
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453static int
5454flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5455 const struct rte_flow_action *action,
5456 const uint64_t item_flags,
5457 struct rte_flow_error *error)
5458{
5459 int ret = 0;
5460
5461 ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5462 if (!ret) {
5463 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5464 return rte_flow_error_set(error, EINVAL,
5465 RTE_FLOW_ERROR_TYPE_ACTION,
5466 NULL,
5467 "no ipv6 item in pattern");
5468 }
5469 return ret;
5470}
5471
5472int
5473flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5474 struct mlx5_list_entry *entry, void *cb_ctx)
5475{
5476 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5477 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5478 struct mlx5_flow_dv_modify_hdr_resource *resource =
5479 container_of(entry, typeof(*resource), entry);
5480 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5481
5482 key_len += ref->actions_num * sizeof(ref->actions[0]);
5483 return ref->actions_num != resource->actions_num ||
5484 memcmp(&ref->ft_type, &resource->ft_type, key_len);
5485}
5486
5487static struct mlx5_indexed_pool *
5488flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5489{
5490 struct mlx5_indexed_pool *ipool = __atomic_load_n
5491 (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5492
5493 if (!ipool) {
5494 struct mlx5_indexed_pool *expected = NULL;
5495 struct mlx5_indexed_pool_config cfg =
5496 (struct mlx5_indexed_pool_config) {
5497 .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5498 (index + 1) *
5499 sizeof(struct mlx5_modification_cmd),
5500 .trunk_size = 64,
5501 .grow_trunk = 3,
5502 .grow_shift = 2,
5503 .need_lock = 1,
5504 .release_mem_en = !!sh->config.reclaim_mode,
5505 .per_core_cache =
5506 sh->config.reclaim_mode ? 0 : (1 << 16),
5507 .malloc = mlx5_malloc,
5508 .free = mlx5_free,
5509 .type = "mlx5_modify_action_resource",
5510 };
5511
5512 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5513 ipool = mlx5_ipool_create(&cfg);
5514 if (!ipool)
5515 return NULL;
5516 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5517 &expected, ipool, false,
5518 __ATOMIC_SEQ_CST,
5519 __ATOMIC_SEQ_CST)) {
5520 mlx5_ipool_destroy(ipool);
5521 ipool = __atomic_load_n(&sh->mdh_ipools[index],
5522 __ATOMIC_SEQ_CST);
5523 }
5524 }
5525 return ipool;
5526}
5527
5528struct mlx5_list_entry *
5529flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5530{
5531 struct mlx5_dev_ctx_shared *sh = tool_ctx;
5532 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5533 struct mlx5dv_dr_domain *ns;
5534 struct mlx5_flow_dv_modify_hdr_resource *entry;
5535 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5536 struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5537 ref->actions_num - 1);
5538 int ret;
5539 uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5540 uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5541 uint32_t idx;
5542
5543 if (unlikely(!ipool)) {
5544 rte_flow_error_set(ctx->error, ENOMEM,
5545 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5546 NULL, "cannot allocate modify ipool");
5547 return NULL;
5548 }
5549 entry = mlx5_ipool_zmalloc(ipool, &idx);
5550 if (!entry) {
5551 rte_flow_error_set(ctx->error, ENOMEM,
5552 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5553 "cannot allocate resource memory");
5554 return NULL;
5555 }
5556 rte_memcpy(&entry->ft_type,
5557 RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5558 key_len + data_len);
5559 if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5560 ns = sh->fdb_domain;
5561 else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5562 ns = sh->tx_domain;
5563 else
5564 ns = sh->rx_domain;
5565 ret = mlx5_flow_os_create_flow_action_modify_header
5566 (sh->cdev->ctx, ns, entry,
5567 data_len, &entry->action);
5568 if (ret) {
5569 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5570 rte_flow_error_set(ctx->error, ENOMEM,
5571 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5572 NULL, "cannot create modification action");
5573 return NULL;
5574 }
5575 entry->idx = idx;
5576 return &entry->entry;
5577}
5578
5579struct mlx5_list_entry *
5580flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5581 void *cb_ctx)
5582{
5583 struct mlx5_dev_ctx_shared *sh = tool_ctx;
5584 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5585 struct mlx5_flow_dv_modify_hdr_resource *entry;
5586 struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5587 uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5588 uint32_t idx;
5589
5590 entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5591 &idx);
5592 if (!entry) {
5593 rte_flow_error_set(ctx->error, ENOMEM,
5594 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5595 "cannot allocate resource memory");
5596 return NULL;
5597 }
5598 memcpy(entry, oentry, sizeof(*entry) + data_len);
5599 entry->idx = idx;
5600 return &entry->entry;
5601}
5602
5603void
5604flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5605{
5606 struct mlx5_dev_ctx_shared *sh = tool_ctx;
5607 struct mlx5_flow_dv_modify_hdr_resource *res =
5608 container_of(entry, typeof(*res), entry);
5609
5610 mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5611}
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640static int
5641flow_dv_validate_action_sample(uint64_t *action_flags,
5642 const struct rte_flow_action *action,
5643 struct rte_eth_dev *dev,
5644 const struct rte_flow_attr *attr,
5645 uint64_t item_flags,
5646 const struct rte_flow_action_rss *rss,
5647 const struct rte_flow_action_rss **sample_rss,
5648 const struct rte_flow_action_count **count,
5649 int *fdb_mirror_limit,
5650 struct rte_flow_error *error)
5651{
5652 struct mlx5_priv *priv = dev->data->dev_private;
5653 struct mlx5_sh_config *dev_conf = &priv->sh->config;
5654 const struct rte_flow_action_sample *sample = action->conf;
5655 const struct rte_flow_action *act;
5656 uint64_t sub_action_flags = 0;
5657 uint16_t queue_index = 0xFFFF;
5658 int actions_n = 0;
5659 int ret;
5660
5661 if (!sample)
5662 return rte_flow_error_set(error, EINVAL,
5663 RTE_FLOW_ERROR_TYPE_ACTION, action,
5664 "configuration cannot be NULL");
5665 if (sample->ratio == 0)
5666 return rte_flow_error_set(error, EINVAL,
5667 RTE_FLOW_ERROR_TYPE_ACTION, action,
5668 "ratio value starts from 1");
5669 if (!priv->sh->cdev->config.devx ||
5670 (sample->ratio > 0 && !priv->sampler_en))
5671 return rte_flow_error_set(error, ENOTSUP,
5672 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5673 NULL,
5674 "sample action not supported");
5675 if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5676 return rte_flow_error_set(error, EINVAL,
5677 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5678 "Multiple sample actions not "
5679 "supported");
5680 if (*action_flags & MLX5_FLOW_ACTION_METER)
5681 return rte_flow_error_set(error, EINVAL,
5682 RTE_FLOW_ERROR_TYPE_ACTION, action,
5683 "wrong action order, meter should "
5684 "be after sample action");
5685 if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5686 return rte_flow_error_set(error, EINVAL,
5687 RTE_FLOW_ERROR_TYPE_ACTION, action,
5688 "wrong action order, jump should "
5689 "be after sample action");
5690 if (*action_flags & MLX5_FLOW_ACTION_CT)
5691 return rte_flow_error_set(error, EINVAL,
5692 RTE_FLOW_ERROR_TYPE_ACTION, action,
5693 "Sample after CT not supported");
5694 act = sample->actions;
5695 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5696 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5697 return rte_flow_error_set(error, ENOTSUP,
5698 RTE_FLOW_ERROR_TYPE_ACTION,
5699 act, "too many actions");
5700 switch (act->type) {
5701 case RTE_FLOW_ACTION_TYPE_QUEUE:
5702 ret = mlx5_flow_validate_action_queue(act,
5703 sub_action_flags,
5704 dev,
5705 attr, error);
5706 if (ret < 0)
5707 return ret;
5708 queue_index = ((const struct rte_flow_action_queue *)
5709 (act->conf))->index;
5710 sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5711 ++actions_n;
5712 break;
5713 case RTE_FLOW_ACTION_TYPE_RSS:
5714 *sample_rss = act->conf;
5715 ret = mlx5_flow_validate_action_rss(act,
5716 sub_action_flags,
5717 dev, attr,
5718 item_flags,
5719 error);
5720 if (ret < 0)
5721 return ret;
5722 if (rss && *sample_rss &&
5723 ((*sample_rss)->level != rss->level ||
5724 (*sample_rss)->types != rss->types))
5725 return rte_flow_error_set(error, ENOTSUP,
5726 RTE_FLOW_ERROR_TYPE_ACTION,
5727 NULL,
5728 "Can't use the different RSS types "
5729 "or level in the same flow");
5730 if (*sample_rss != NULL && (*sample_rss)->queue_num)
5731 queue_index = (*sample_rss)->queue[0];
5732 sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5733 ++actions_n;
5734 break;
5735 case RTE_FLOW_ACTION_TYPE_MARK:
5736 ret = flow_dv_validate_action_mark(dev, act,
5737 sub_action_flags,
5738 attr, error);
5739 if (ret < 0)
5740 return ret;
5741 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5742 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5743 MLX5_FLOW_ACTION_MARK_EXT;
5744 else
5745 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5746 ++actions_n;
5747 break;
5748 case RTE_FLOW_ACTION_TYPE_COUNT:
5749 ret = flow_dv_validate_action_count
5750 (dev, false, *action_flags | sub_action_flags,
5751 attr, error);
5752 if (ret < 0)
5753 return ret;
5754 *count = act->conf;
5755 sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5756 *action_flags |= MLX5_FLOW_ACTION_COUNT;
5757 ++actions_n;
5758 break;
5759 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5760 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5761 ret = flow_dv_validate_action_port_id(dev,
5762 sub_action_flags,
5763 act,
5764 attr,
5765 error);
5766 if (ret)
5767 return ret;
5768 sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5769 ++actions_n;
5770 break;
5771 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5772 ret = flow_dv_validate_action_raw_encap_decap
5773 (dev, NULL, act->conf, attr, &sub_action_flags,
5774 &actions_n, action, item_flags, error);
5775 if (ret < 0)
5776 return ret;
5777 ++actions_n;
5778 break;
5779 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5780 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5781 ret = flow_dv_validate_action_l2_encap(dev,
5782 sub_action_flags,
5783 act, attr,
5784 error);
5785 if (ret < 0)
5786 return ret;
5787 sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5788 ++actions_n;
5789 break;
5790 default:
5791 return rte_flow_error_set(error, ENOTSUP,
5792 RTE_FLOW_ERROR_TYPE_ACTION,
5793 NULL,
5794 "Doesn't support optional "
5795 "action");
5796 }
5797 }
5798 if (attr->ingress && !attr->transfer) {
5799 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5800 MLX5_FLOW_ACTION_RSS)))
5801 return rte_flow_error_set(error, EINVAL,
5802 RTE_FLOW_ERROR_TYPE_ACTION,
5803 NULL,
5804 "Ingress must has a dest "
5805 "QUEUE for Sample");
5806 } else if (attr->egress && !attr->transfer) {
5807 return rte_flow_error_set(error, ENOTSUP,
5808 RTE_FLOW_ERROR_TYPE_ACTION,
5809 NULL,
5810 "Sample Only support Ingress "
5811 "or E-Switch");
5812 } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5813 MLX5_ASSERT(attr->transfer);
5814 if (sample->ratio > 1)
5815 return rte_flow_error_set(error, ENOTSUP,
5816 RTE_FLOW_ERROR_TYPE_ACTION,
5817 NULL,
5818 "E-Switch doesn't support "
5819 "any optional action "
5820 "for sampling");
5821 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5822 return rte_flow_error_set(error, ENOTSUP,
5823 RTE_FLOW_ERROR_TYPE_ACTION,
5824 NULL,
5825 "unsupported action QUEUE");
5826 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5827 return rte_flow_error_set(error, ENOTSUP,
5828 RTE_FLOW_ERROR_TYPE_ACTION,
5829 NULL,
5830 "unsupported action QUEUE");
5831 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5832 return rte_flow_error_set(error, EINVAL,
5833 RTE_FLOW_ERROR_TYPE_ACTION,
5834 NULL,
5835 "E-Switch must has a dest "
5836 "port for mirroring");
5837 if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
5838 priv->representor_id != UINT16_MAX)
5839 *fdb_mirror_limit = 1;
5840 }
5841
5842 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5843 (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) {
5844 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5845 MLX5_FLOW_XCAP_ACTIONS)
5846 return rte_flow_error_set(error, ENOTSUP,
5847 RTE_FLOW_ERROR_TYPE_ACTION,
5848 NULL, "encap and decap "
5849 "combination aren't "
5850 "supported");
5851 if (!attr->transfer && attr->ingress && (sub_action_flags &
5852 MLX5_FLOW_ACTION_ENCAP))
5853 return rte_flow_error_set(error, ENOTSUP,
5854 RTE_FLOW_ERROR_TYPE_ACTION,
5855 NULL, "encap is not supported"
5856 " for ingress traffic");
5857 }
5858 return 0;
5859}
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876static int
5877flow_dv_modify_hdr_resource_register
5878 (struct rte_eth_dev *dev,
5879 struct mlx5_flow_dv_modify_hdr_resource *resource,
5880 struct mlx5_flow *dev_flow,
5881 struct rte_flow_error *error)
5882{
5883 struct mlx5_priv *priv = dev->data->dev_private;
5884 struct mlx5_dev_ctx_shared *sh = priv->sh;
5885 uint32_t key_len = sizeof(*resource) -
5886 offsetof(typeof(*resource), ft_type) +
5887 resource->actions_num * sizeof(resource->actions[0]);
5888 struct mlx5_list_entry *entry;
5889 struct mlx5_flow_cb_ctx ctx = {
5890 .error = error,
5891 .data = resource,
5892 };
5893 struct mlx5_hlist *modify_cmds;
5894 uint64_t key64;
5895
5896 modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5897 "hdr_modify",
5898 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5899 true, false, sh,
5900 flow_dv_modify_create_cb,
5901 flow_dv_modify_match_cb,
5902 flow_dv_modify_remove_cb,
5903 flow_dv_modify_clone_cb,
5904 flow_dv_modify_clone_free_cb,
5905 error);
5906 if (unlikely(!modify_cmds))
5907 return -rte_errno;
5908 resource->root = !dev_flow->dv.group;
5909 if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5910 resource->root))
5911 return rte_flow_error_set(error, EOVERFLOW,
5912 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5913 "too many modify header items");
5914 key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5915 entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5916 if (!entry)
5917 return -rte_errno;
5918 resource = container_of(entry, typeof(*resource), entry);
5919 dev_flow->handle->dvh.modify_hdr = resource;
5920 return 0;
5921}
5922
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936static struct mlx5_flow_counter *
5937flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5938 uint32_t idx,
5939 struct mlx5_flow_counter_pool **ppool)
5940{
5941 struct mlx5_priv *priv = dev->data->dev_private;
5942 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5943 struct mlx5_flow_counter_pool *pool;
5944
5945
5946 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5947 MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5948 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5949 MLX5_ASSERT(pool);
5950 if (ppool)
5951 *ppool = pool;
5952 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5953}
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966static bool
5967flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5968{
5969 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5970 MLX5_COUNTERS_PER_POOL;
5971
5972 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5973 return true;
5974 return false;
5975}
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988static struct mlx5_flow_counter_pool *
5989flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5990{
5991 uint32_t i;
5992 struct mlx5_flow_counter_pool *pool = NULL;
5993
5994 rte_spinlock_lock(&cmng->pool_update_sl);
5995
5996 if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5997 flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5998 pool = cmng->pools[cmng->last_pool_idx];
5999 goto out;
6000 }
6001
6002 if (id > cmng->max_id || id < cmng->min_id)
6003 goto out;
6004
6005
6006
6007
6008
6009 i = cmng->n_valid;
6010 while (i--) {
6011 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
6012
6013 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
6014 pool = pool_tmp;
6015 break;
6016 }
6017 }
6018out:
6019 rte_spinlock_unlock(&cmng->pool_update_sl);
6020 return pool;
6021}
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032static int
6033flow_dv_container_resize(struct rte_eth_dev *dev)
6034{
6035 struct mlx5_priv *priv = dev->data->dev_private;
6036 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6037 void *old_pools = cmng->pools;
6038 uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
6039 uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
6040 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6041
6042 if (!pools) {
6043 rte_errno = ENOMEM;
6044 return -ENOMEM;
6045 }
6046 if (old_pools)
6047 memcpy(pools, old_pools, cmng->n *
6048 sizeof(struct mlx5_flow_counter_pool *));
6049 cmng->n = resize;
6050 cmng->pools = pools;
6051 if (old_pools)
6052 mlx5_free(old_pools);
6053 return 0;
6054}
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070
6071static inline int
6072_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
6073 uint64_t *bytes)
6074{
6075 struct mlx5_priv *priv = dev->data->dev_private;
6076 struct mlx5_flow_counter_pool *pool = NULL;
6077 struct mlx5_flow_counter *cnt;
6078 int offset;
6079
6080 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6081 MLX5_ASSERT(pool);
6082 if (priv->sh->cmng.counter_fallback)
6083 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6084 0, pkts, bytes, 0, NULL, NULL, 0);
6085 rte_spinlock_lock(&pool->sl);
6086 if (!pool->raw) {
6087 *pkts = 0;
6088 *bytes = 0;
6089 } else {
6090 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6091 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6092 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6093 }
6094 rte_spinlock_unlock(&pool->sl);
6095 return 0;
6096}
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113static struct mlx5_flow_counter_pool *
6114flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6115 uint32_t age)
6116{
6117 struct mlx5_priv *priv = dev->data->dev_private;
6118 struct mlx5_flow_counter_pool *pool;
6119 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6120 bool fallback = priv->sh->cmng.counter_fallback;
6121 uint32_t size = sizeof(*pool);
6122
6123 size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6124 size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6125 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6126 if (!pool) {
6127 rte_errno = ENOMEM;
6128 return NULL;
6129 }
6130 pool->raw = NULL;
6131 pool->is_aged = !!age;
6132 pool->query_gen = 0;
6133 pool->min_dcs = dcs;
6134 rte_spinlock_init(&pool->sl);
6135 rte_spinlock_init(&pool->csl);
6136 TAILQ_INIT(&pool->counters[0]);
6137 TAILQ_INIT(&pool->counters[1]);
6138 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6139 rte_spinlock_lock(&cmng->pool_update_sl);
6140 pool->index = cmng->n_valid;
6141 if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6142 mlx5_free(pool);
6143 rte_spinlock_unlock(&cmng->pool_update_sl);
6144 return NULL;
6145 }
6146 cmng->pools[pool->index] = pool;
6147 cmng->n_valid++;
6148 if (unlikely(fallback)) {
6149 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6150
6151 if (base < cmng->min_id)
6152 cmng->min_id = base;
6153 if (base > cmng->max_id)
6154 cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6155 cmng->last_pool_idx = pool->index;
6156 }
6157 rte_spinlock_unlock(&cmng->pool_update_sl);
6158 return pool;
6159}
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175static struct mlx5_flow_counter_pool *
6176flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6177 struct mlx5_flow_counter **cnt_free,
6178 uint32_t age)
6179{
6180 struct mlx5_priv *priv = dev->data->dev_private;
6181 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6182 struct mlx5_flow_counter_pool *pool;
6183 struct mlx5_counters tmp_tq;
6184 struct mlx5_devx_obj *dcs = NULL;
6185 struct mlx5_flow_counter *cnt;
6186 enum mlx5_counter_type cnt_type =
6187 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6188 bool fallback = priv->sh->cmng.counter_fallback;
6189 uint32_t i;
6190
6191 if (fallback) {
6192
6193 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6194 if (!dcs)
6195 return NULL;
6196 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6197 if (!pool) {
6198 pool = flow_dv_pool_create(dev, dcs, age);
6199 if (!pool) {
6200 mlx5_devx_cmd_destroy(dcs);
6201 return NULL;
6202 }
6203 }
6204 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6205 cnt = MLX5_POOL_GET_CNT(pool, i);
6206 cnt->pool = pool;
6207 cnt->dcs_when_free = dcs;
6208 *cnt_free = cnt;
6209 return pool;
6210 }
6211 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6212 if (!dcs) {
6213 rte_errno = ENODATA;
6214 return NULL;
6215 }
6216 pool = flow_dv_pool_create(dev, dcs, age);
6217 if (!pool) {
6218 mlx5_devx_cmd_destroy(dcs);
6219 return NULL;
6220 }
6221 TAILQ_INIT(&tmp_tq);
6222 for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6223 cnt = MLX5_POOL_GET_CNT(pool, i);
6224 cnt->pool = pool;
6225 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6226 }
6227 rte_spinlock_lock(&cmng->csl[cnt_type]);
6228 TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6229 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6230 *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6231 (*cnt_free)->pool = pool;
6232 return pool;
6233}
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246static uint32_t
6247flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6248{
6249 struct mlx5_priv *priv = dev->data->dev_private;
6250 struct mlx5_flow_counter_pool *pool = NULL;
6251 struct mlx5_flow_counter *cnt_free = NULL;
6252 bool fallback = priv->sh->cmng.counter_fallback;
6253 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6254 enum mlx5_counter_type cnt_type =
6255 age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6256 uint32_t cnt_idx;
6257
6258 if (!priv->sh->cdev->config.devx) {
6259 rte_errno = ENOTSUP;
6260 return 0;
6261 }
6262
6263 rte_spinlock_lock(&cmng->csl[cnt_type]);
6264 cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6265 if (cnt_free)
6266 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6267 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6268 if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6269 goto err;
6270 pool = cnt_free->pool;
6271 if (fallback)
6272 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6273
6274 if (!cnt_free->action) {
6275 uint16_t offset;
6276 struct mlx5_devx_obj *dcs;
6277 int ret;
6278
6279 if (!fallback) {
6280 offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6281 dcs = pool->min_dcs;
6282 } else {
6283 offset = 0;
6284 dcs = cnt_free->dcs_when_free;
6285 }
6286 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6287 &cnt_free->action);
6288 if (ret) {
6289 rte_errno = errno;
6290 goto err;
6291 }
6292 }
6293 cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6294 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6295
6296 if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6297 &cnt_free->bytes))
6298 goto err;
6299 if (!fallback && !priv->sh->cmng.query_thread_on)
6300
6301 mlx5_set_query_alarm(priv->sh);
6302
6303
6304
6305
6306
6307
6308 cnt_free->shared_info.refcnt = 1;
6309 return cnt_idx;
6310err:
6311 if (cnt_free) {
6312 cnt_free->pool = pool;
6313 if (fallback)
6314 cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6315 rte_spinlock_lock(&cmng->csl[cnt_type]);
6316 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6317 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6318 }
6319 return 0;
6320}
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332
6333static struct mlx5_age_param*
6334flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6335 uint32_t counter)
6336{
6337 struct mlx5_flow_counter *cnt;
6338 struct mlx5_flow_counter_pool *pool = NULL;
6339
6340 flow_dv_counter_get_by_idx(dev, counter, &pool);
6341 counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6342 cnt = MLX5_POOL_GET_CNT(pool, counter);
6343 return MLX5_CNT_TO_AGE(cnt);
6344}
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356static void
6357flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6358 uint32_t counter, struct mlx5_flow_counter *cnt)
6359{
6360 struct mlx5_age_info *age_info;
6361 struct mlx5_age_param *age_param;
6362 struct mlx5_priv *priv = dev->data->dev_private;
6363 uint16_t expected = AGE_CANDIDATE;
6364
6365 age_info = GET_PORT_AGE_INFO(priv);
6366 age_param = flow_dv_counter_idx_get_age(dev, counter);
6367 if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6368 AGE_FREE, false, __ATOMIC_RELAXED,
6369 __ATOMIC_RELAXED)) {
6370
6371
6372
6373
6374 rte_spinlock_lock(&age_info->aged_sl);
6375 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6376 rte_spinlock_unlock(&age_info->aged_sl);
6377 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6378 }
6379}
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389static void
6390flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6391{
6392 struct mlx5_priv *priv = dev->data->dev_private;
6393 struct mlx5_flow_counter_pool *pool = NULL;
6394 struct mlx5_flow_counter *cnt;
6395 enum mlx5_counter_type cnt_type;
6396
6397 if (!counter)
6398 return;
6399 cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6400 MLX5_ASSERT(pool);
6401 if (pool->is_aged) {
6402 flow_dv_counter_remove_from_age(dev, counter, cnt);
6403 } else {
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6414 __ATOMIC_RELAXED))
6415 return;
6416 }
6417 cnt->pool = pool;
6418
6419
6420
6421
6422
6423
6424
6425
6426
6427 if (!priv->sh->cmng.counter_fallback) {
6428 rte_spinlock_lock(&pool->csl);
6429 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6430 rte_spinlock_unlock(&pool->csl);
6431 } else {
6432 cnt->dcs_when_free = cnt->dcs_when_active;
6433 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6434 MLX5_COUNTER_TYPE_ORIGIN;
6435 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6436 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6437 cnt, next);
6438 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6439 }
6440}
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451static int
6452flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6453{
6454 struct mlx5_priv *priv = dev->data->dev_private;
6455 struct mlx5_aso_mtr_pools_mng *pools_mng =
6456 &priv->sh->mtrmng->pools_mng;
6457 void *old_pools = pools_mng->pools;
6458 uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6459 uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6460 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6461
6462 if (!pools) {
6463 rte_errno = ENOMEM;
6464 return -ENOMEM;
6465 }
6466 if (!pools_mng->n)
6467 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6468 mlx5_free(pools);
6469 return -ENOMEM;
6470 }
6471 if (old_pools)
6472 memcpy(pools, old_pools, pools_mng->n *
6473 sizeof(struct mlx5_aso_mtr_pool *));
6474 pools_mng->n = resize;
6475 pools_mng->pools = pools;
6476 if (old_pools)
6477 mlx5_free(old_pools);
6478 return 0;
6479}
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493static struct mlx5_aso_mtr_pool *
6494flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6495{
6496 struct mlx5_priv *priv = dev->data->dev_private;
6497 struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6498 struct mlx5_aso_mtr_pool *pool = NULL;
6499 struct mlx5_devx_obj *dcs = NULL;
6500 uint32_t i;
6501 uint32_t log_obj_size;
6502
6503 log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6504 dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6505 priv->sh->cdev->pdn,
6506 log_obj_size);
6507 if (!dcs) {
6508 rte_errno = ENODATA;
6509 return NULL;
6510 }
6511 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6512 if (!pool) {
6513 rte_errno = ENOMEM;
6514 claim_zero(mlx5_devx_cmd_destroy(dcs));
6515 return NULL;
6516 }
6517 pool->devx_obj = dcs;
6518 rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6519 pool->index = pools_mng->n_valid;
6520 if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6521 mlx5_free(pool);
6522 claim_zero(mlx5_devx_cmd_destroy(dcs));
6523 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6524 return NULL;
6525 }
6526 pools_mng->pools[pool->index] = pool;
6527 pools_mng->n_valid++;
6528 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6529 for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6530 pool->mtrs[i].offset = i;
6531 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6532 }
6533 pool->mtrs[0].offset = 0;
6534 *mtr_free = &pool->mtrs[0];
6535 return pool;
6536}
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546static void
6547flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6548{
6549 struct mlx5_priv *priv = dev->data->dev_private;
6550 struct mlx5_aso_mtr_pools_mng *pools_mng =
6551 &priv->sh->mtrmng->pools_mng;
6552 struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6553
6554 MLX5_ASSERT(aso_mtr);
6555 rte_spinlock_lock(&pools_mng->mtrsl);
6556 memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6557 aso_mtr->state = ASO_METER_FREE;
6558 LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6559 rte_spinlock_unlock(&pools_mng->mtrsl);
6560}
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571static uint32_t
6572flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6573{
6574 struct mlx5_priv *priv = dev->data->dev_private;
6575 struct mlx5_aso_mtr *mtr_free = NULL;
6576 struct mlx5_aso_mtr_pools_mng *pools_mng =
6577 &priv->sh->mtrmng->pools_mng;
6578 struct mlx5_aso_mtr_pool *pool;
6579 uint32_t mtr_idx = 0;
6580
6581 if (!priv->sh->cdev->config.devx) {
6582 rte_errno = ENOTSUP;
6583 return 0;
6584 }
6585
6586
6587 rte_spinlock_lock(&pools_mng->mtrsl);
6588 mtr_free = LIST_FIRST(&pools_mng->meters);
6589 if (mtr_free)
6590 LIST_REMOVE(mtr_free, next);
6591 if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6592 rte_spinlock_unlock(&pools_mng->mtrsl);
6593 return 0;
6594 }
6595 mtr_free->state = ASO_METER_WAIT;
6596 rte_spinlock_unlock(&pools_mng->mtrsl);
6597 pool = container_of(mtr_free,
6598 struct mlx5_aso_mtr_pool,
6599 mtrs[mtr_free->offset]);
6600 mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6601 if (!mtr_free->fm.meter_action_g) {
6602#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6603 struct rte_flow_error error;
6604 uint8_t reg_id;
6605
6606 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6607 mtr_free->fm.meter_action_g =
6608 mlx5_glue->dv_create_flow_action_aso
6609 (priv->sh->rx_domain,
6610 pool->devx_obj->obj,
6611 mtr_free->offset,
6612 (1 << MLX5_FLOW_COLOR_GREEN),
6613 reg_id - REG_C_0);
6614#endif
6615 if (!mtr_free->fm.meter_action_g) {
6616 flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6617 return 0;
6618 }
6619 }
6620 return mtr_idx;
6621}
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641static int
6642flow_dv_validate_attributes(struct rte_eth_dev *dev,
6643 const struct mlx5_flow_tunnel *tunnel,
6644 const struct rte_flow_attr *attributes,
6645 const struct flow_grp_info *grp_info,
6646 struct rte_flow_error *error)
6647{
6648 struct mlx5_priv *priv = dev->data->dev_private;
6649 uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6650 int ret = 0;
6651
6652#ifndef HAVE_MLX5DV_DR
6653 RTE_SET_USED(tunnel);
6654 RTE_SET_USED(grp_info);
6655 if (attributes->group)
6656 return rte_flow_error_set(error, ENOTSUP,
6657 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6658 NULL,
6659 "groups are not supported");
6660#else
6661 uint32_t table = 0;
6662
6663 ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6664 grp_info, error);
6665 if (ret)
6666 return ret;
6667 if (!table)
6668 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6669#endif
6670 if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6671 attributes->priority > lowest_priority)
6672 return rte_flow_error_set(error, ENOTSUP,
6673 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6674 NULL,
6675 "priority out of range");
6676 if (attributes->transfer) {
6677 if (!priv->sh->config.dv_esw_en)
6678 return rte_flow_error_set
6679 (error, ENOTSUP,
6680 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6681 "E-Switch dr is not supported");
6682 if (attributes->egress)
6683 return rte_flow_error_set
6684 (error, ENOTSUP,
6685 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6686 "egress is not supported");
6687 }
6688 if (!(attributes->egress ^ attributes->ingress))
6689 return rte_flow_error_set(error, ENOTSUP,
6690 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6691 "must specify exactly one of "
6692 "ingress or egress");
6693 return ret;
6694}
6695
6696static int
6697validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6698 int64_t pattern_flags, uint64_t l3_flags,
6699 uint64_t l4_flags, uint64_t ip4_flag,
6700 struct rte_flow_error *error)
6701{
6702 if (mask->l3_ok && !(pattern_flags & l3_flags))
6703 return rte_flow_error_set(error, EINVAL,
6704 RTE_FLOW_ERROR_TYPE_ITEM,
6705 NULL, "missing L3 protocol");
6706
6707 if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6708 return rte_flow_error_set(error, EINVAL,
6709 RTE_FLOW_ERROR_TYPE_ITEM,
6710 NULL, "missing IPv4 protocol");
6711
6712 if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6713 return rte_flow_error_set(error, EINVAL,
6714 RTE_FLOW_ERROR_TYPE_ITEM,
6715 NULL, "missing L4 protocol");
6716
6717 return 0;
6718}
6719
6720static int
6721flow_dv_validate_item_integrity_post(const struct
6722 rte_flow_item *integrity_items[2],
6723 int64_t pattern_flags,
6724 struct rte_flow_error *error)
6725{
6726 const struct rte_flow_item_integrity *mask;
6727 int ret;
6728
6729 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6730 mask = (typeof(mask))integrity_items[0]->mask;
6731 ret = validate_integrity_bits(mask, pattern_flags,
6732 MLX5_FLOW_LAYER_OUTER_L3,
6733 MLX5_FLOW_LAYER_OUTER_L4,
6734 MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6735 error);
6736 if (ret)
6737 return ret;
6738 }
6739 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6740 mask = (typeof(mask))integrity_items[1]->mask;
6741 ret = validate_integrity_bits(mask, pattern_flags,
6742 MLX5_FLOW_LAYER_INNER_L3,
6743 MLX5_FLOW_LAYER_INNER_L4,
6744 MLX5_FLOW_LAYER_INNER_L3_IPV4,
6745 error);
6746 if (ret)
6747 return ret;
6748 }
6749 return 0;
6750}
6751
6752static int
6753flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6754 const struct rte_flow_item *integrity_item,
6755 uint64_t pattern_flags, uint64_t *last_item,
6756 const struct rte_flow_item *integrity_items[2],
6757 struct rte_flow_error *error)
6758{
6759 struct mlx5_priv *priv = dev->data->dev_private;
6760 const struct rte_flow_item_integrity *mask = (typeof(mask))
6761 integrity_item->mask;
6762 const struct rte_flow_item_integrity *spec = (typeof(spec))
6763 integrity_item->spec;
6764
6765 if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
6766 return rte_flow_error_set(error, ENOTSUP,
6767 RTE_FLOW_ERROR_TYPE_ITEM,
6768 integrity_item,
6769 "packet integrity integrity_item not supported");
6770 if (!spec)
6771 return rte_flow_error_set(error, ENOTSUP,
6772 RTE_FLOW_ERROR_TYPE_ITEM,
6773 integrity_item,
6774 "no spec for integrity item");
6775 if (!mask)
6776 mask = &rte_flow_item_integrity_mask;
6777 if (!mlx5_validate_integrity_item(mask))
6778 return rte_flow_error_set(error, ENOTSUP,
6779 RTE_FLOW_ERROR_TYPE_ITEM,
6780 integrity_item,
6781 "unsupported integrity filter");
6782 if ((mask->l3_ok & !spec->l3_ok) || (mask->l4_ok & !spec->l4_ok) ||
6783 (mask->ipv4_csum_ok & !spec->ipv4_csum_ok) ||
6784 (mask->l4_csum_ok & !spec->l4_csum_ok))
6785 return rte_flow_error_set(error, EINVAL,
6786 RTE_FLOW_ERROR_TYPE_ITEM,
6787 NULL, "negative integrity flow is not supported");
6788 if (spec->level > 1) {
6789 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6790 return rte_flow_error_set
6791 (error, ENOTSUP,
6792 RTE_FLOW_ERROR_TYPE_ITEM,
6793 NULL, "multiple inner integrity items not supported");
6794 integrity_items[1] = integrity_item;
6795 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6796 } else {
6797 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6798 return rte_flow_error_set
6799 (error, ENOTSUP,
6800 RTE_FLOW_ERROR_TYPE_ITEM,
6801 NULL, "multiple outer integrity items not supported");
6802 integrity_items[0] = integrity_item;
6803 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6804 }
6805 return 0;
6806}
6807
6808static int
6809flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6810 const struct rte_flow_item *item,
6811 uint64_t item_flags,
6812 uint64_t *last_item,
6813 bool is_inner,
6814 struct rte_flow_error *error)
6815{
6816 const struct rte_flow_item_flex *flow_spec = item->spec;
6817 const struct rte_flow_item_flex *flow_mask = item->mask;
6818 struct mlx5_flex_item *flex;
6819
6820 if (!flow_spec)
6821 return rte_flow_error_set(error, EINVAL,
6822 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6823 "flex flow item spec cannot be NULL");
6824 if (!flow_mask)
6825 return rte_flow_error_set(error, EINVAL,
6826 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6827 "flex flow item mask cannot be NULL");
6828 if (item->last)
6829 return rte_flow_error_set(error, ENOTSUP,
6830 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6831 "flex flow item last not supported");
6832 if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6833 return rte_flow_error_set(error, EINVAL,
6834 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6835 "invalid flex flow item handle");
6836 flex = (struct mlx5_flex_item *)flow_spec->handle;
6837 switch (flex->tunnel_mode) {
6838 case FLEX_TUNNEL_MODE_SINGLE:
6839 if (item_flags &
6840 (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6841 rte_flow_error_set(error, EINVAL,
6842 RTE_FLOW_ERROR_TYPE_ITEM,
6843 NULL, "multiple flex items not supported");
6844 break;
6845 case FLEX_TUNNEL_MODE_OUTER:
6846 if (is_inner)
6847 rte_flow_error_set(error, EINVAL,
6848 RTE_FLOW_ERROR_TYPE_ITEM,
6849 NULL, "inner flex item was not configured");
6850 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6851 rte_flow_error_set(error, ENOTSUP,
6852 RTE_FLOW_ERROR_TYPE_ITEM,
6853 NULL, "multiple flex items not supported");
6854 break;
6855 case FLEX_TUNNEL_MODE_INNER:
6856 if (!is_inner)
6857 rte_flow_error_set(error, EINVAL,
6858 RTE_FLOW_ERROR_TYPE_ITEM,
6859 NULL, "outer flex item was not configured");
6860 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6861 rte_flow_error_set(error, EINVAL,
6862 RTE_FLOW_ERROR_TYPE_ITEM,
6863 NULL, "multiple flex items not supported");
6864 break;
6865 case FLEX_TUNNEL_MODE_MULTI:
6866 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6867 (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6868 rte_flow_error_set(error, EINVAL,
6869 RTE_FLOW_ERROR_TYPE_ITEM,
6870 NULL, "multiple flex items not supported");
6871 }
6872 break;
6873 case FLEX_TUNNEL_MODE_TUNNEL:
6874 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6875 rte_flow_error_set(error, EINVAL,
6876 RTE_FLOW_ERROR_TYPE_ITEM,
6877 NULL, "multiple flex tunnel items not supported");
6878 break;
6879 default:
6880 rte_flow_error_set(error, EINVAL,
6881 RTE_FLOW_ERROR_TYPE_ITEM,
6882 NULL, "invalid flex item configuration");
6883 }
6884 *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6885 MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6886 MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6887 return 0;
6888}
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911static int
6912flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6913 const struct rte_flow_item items[],
6914 const struct rte_flow_action actions[],
6915 bool external, int hairpin, struct rte_flow_error *error)
6916{
6917 int ret;
6918 uint64_t aso_mask, action_flags = 0;
6919 uint64_t item_flags = 0;
6920 uint64_t last_item = 0;
6921 uint8_t next_protocol = 0xff;
6922 uint16_t ether_type = 0;
6923 int actions_n = 0;
6924 uint8_t item_ipv6_proto = 0;
6925 int fdb_mirror_limit = 0;
6926 int modify_after_mirror = 0;
6927 const struct rte_flow_item *geneve_item = NULL;
6928 const struct rte_flow_item *gre_item = NULL;
6929 const struct rte_flow_item *gtp_item = NULL;
6930 const struct rte_flow_action_raw_decap *decap;
6931 const struct rte_flow_action_raw_encap *encap;
6932 const struct rte_flow_action_rss *rss = NULL;
6933 const struct rte_flow_action_rss *sample_rss = NULL;
6934 const struct rte_flow_action_count *sample_count = NULL;
6935 const struct rte_flow_item_tcp nic_tcp_mask = {
6936 .hdr = {
6937 .tcp_flags = 0xFF,
6938 .src_port = RTE_BE16(UINT16_MAX),
6939 .dst_port = RTE_BE16(UINT16_MAX),
6940 }
6941 };
6942 const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6943 .hdr = {
6944 .src_addr =
6945 "\xff\xff\xff\xff\xff\xff\xff\xff"
6946 "\xff\xff\xff\xff\xff\xff\xff\xff",
6947 .dst_addr =
6948 "\xff\xff\xff\xff\xff\xff\xff\xff"
6949 "\xff\xff\xff\xff\xff\xff\xff\xff",
6950 .vtc_flow = RTE_BE32(0xffffffff),
6951 .proto = 0xff,
6952 .hop_limits = 0xff,
6953 },
6954 .has_frag_ext = 1,
6955 };
6956 const struct rte_flow_item_ecpri nic_ecpri_mask = {
6957 .hdr = {
6958 .common = {
6959 .u32 =
6960 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6961 .type = 0xFF,
6962 }).u32),
6963 },
6964 .dummy[0] = 0xffffffff,
6965 },
6966 };
6967 struct mlx5_priv *priv = dev->data->dev_private;
6968 struct mlx5_sh_config *dev_conf = &priv->sh->config;
6969 uint16_t queue_index = 0xFFFF;
6970 const struct rte_flow_item_vlan *vlan_m = NULL;
6971 uint32_t rw_act_num = 0;
6972 uint64_t is_root;
6973 const struct mlx5_flow_tunnel *tunnel;
6974 enum mlx5_tof_rule_type tof_rule_type;
6975 struct flow_grp_info grp_info = {
6976 .external = !!external,
6977 .transfer = !!attr->transfer,
6978 .fdb_def_rule = !!priv->fdb_def_rule,
6979 .std_tbl_fix = true,
6980 };
6981 const struct rte_eth_hairpin_conf *conf;
6982 const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6983 const struct rte_flow_item *port_id_item = NULL;
6984 bool def_policy = false;
6985 bool shared_count = false;
6986 uint16_t udp_dport = 0;
6987 uint32_t tag_id = 0;
6988 const struct rte_flow_action_age *non_shared_age = NULL;
6989 const struct rte_flow_action_count *count = NULL;
6990
6991 if (items == NULL)
6992 return -1;
6993 tunnel = is_tunnel_offload_active(dev) ?
6994 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6995 if (tunnel) {
6996 if (!dev_conf->dv_flow_en)
6997 return rte_flow_error_set
6998 (error, ENOTSUP,
6999 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7000 NULL, "tunnel offload requires DV flow interface");
7001 if (priv->representor)
7002 return rte_flow_error_set
7003 (error, ENOTSUP,
7004 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7005 NULL, "decap not supported for VF representor");
7006 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
7007 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
7008 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
7009 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
7010 MLX5_FLOW_ACTION_DECAP;
7011 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
7012 (dev, attr, tunnel, tof_rule_type);
7013 }
7014 ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
7015 if (ret < 0)
7016 return ret;
7017 is_root = (uint64_t)ret;
7018 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
7019 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
7020 int type = items->type;
7021
7022 if (!mlx5_flow_os_item_supported(type))
7023 return rte_flow_error_set(error, ENOTSUP,
7024 RTE_FLOW_ERROR_TYPE_ITEM,
7025 NULL, "item not supported");
7026 switch (type) {
7027 case RTE_FLOW_ITEM_TYPE_VOID:
7028 break;
7029 case RTE_FLOW_ITEM_TYPE_ESP:
7030 ret = mlx5_flow_os_validate_item_esp(items, item_flags,
7031 next_protocol,
7032 error);
7033 if (ret < 0)
7034 return ret;
7035 last_item = MLX5_FLOW_ITEM_ESP;
7036 break;
7037 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7038 ret = flow_dv_validate_item_port_id
7039 (dev, items, attr, item_flags, error);
7040 if (ret < 0)
7041 return ret;
7042 last_item = MLX5_FLOW_ITEM_PORT_ID;
7043 port_id_item = items;
7044 break;
7045 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
7046 ret = flow_dv_validate_item_represented_port
7047 (dev, items, attr, item_flags, error);
7048 if (ret < 0)
7049 return ret;
7050 last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
7051 break;
7052 case RTE_FLOW_ITEM_TYPE_ETH:
7053 ret = mlx5_flow_validate_item_eth(items, item_flags,
7054 true, error);
7055 if (ret < 0)
7056 return ret;
7057 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
7058 MLX5_FLOW_LAYER_OUTER_L2;
7059 if (items->mask != NULL && items->spec != NULL) {
7060 ether_type =
7061 ((const struct rte_flow_item_eth *)
7062 items->spec)->type;
7063 ether_type &=
7064 ((const struct rte_flow_item_eth *)
7065 items->mask)->type;
7066 ether_type = rte_be_to_cpu_16(ether_type);
7067 } else {
7068 ether_type = 0;
7069 }
7070 break;
7071 case RTE_FLOW_ITEM_TYPE_VLAN:
7072 ret = flow_dv_validate_item_vlan(items, item_flags,
7073 dev, error);
7074 if (ret < 0)
7075 return ret;
7076 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
7077 MLX5_FLOW_LAYER_OUTER_VLAN;
7078 if (items->mask != NULL && items->spec != NULL) {
7079 ether_type =
7080 ((const struct rte_flow_item_vlan *)
7081 items->spec)->inner_type;
7082 ether_type &=
7083 ((const struct rte_flow_item_vlan *)
7084 items->mask)->inner_type;
7085 ether_type = rte_be_to_cpu_16(ether_type);
7086 } else {
7087 ether_type = 0;
7088 }
7089
7090 if (!tunnel)
7091 vlan_m = items->mask;
7092 break;
7093 case RTE_FLOW_ITEM_TYPE_IPV4:
7094 mlx5_flow_tunnel_ip_check(items, next_protocol,
7095 &item_flags, &tunnel);
7096 ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
7097 last_item, ether_type,
7098 error);
7099 if (ret < 0)
7100 return ret;
7101 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7102 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7103 if (items->mask != NULL &&
7104 ((const struct rte_flow_item_ipv4 *)
7105 items->mask)->hdr.next_proto_id) {
7106 next_protocol =
7107 ((const struct rte_flow_item_ipv4 *)
7108 (items->spec))->hdr.next_proto_id;
7109 next_protocol &=
7110 ((const struct rte_flow_item_ipv4 *)
7111 (items->mask))->hdr.next_proto_id;
7112 } else {
7113
7114 next_protocol = 0xff;
7115 }
7116 break;
7117 case RTE_FLOW_ITEM_TYPE_IPV6:
7118 mlx5_flow_tunnel_ip_check(items, next_protocol,
7119 &item_flags, &tunnel);
7120 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7121 last_item,
7122 ether_type,
7123 &nic_ipv6_mask,
7124 error);
7125 if (ret < 0)
7126 return ret;
7127 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7128 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7129 if (items->mask != NULL &&
7130 ((const struct rte_flow_item_ipv6 *)
7131 items->mask)->hdr.proto) {
7132 item_ipv6_proto =
7133 ((const struct rte_flow_item_ipv6 *)
7134 items->spec)->hdr.proto;
7135 next_protocol =
7136 ((const struct rte_flow_item_ipv6 *)
7137 items->spec)->hdr.proto;
7138 next_protocol &=
7139 ((const struct rte_flow_item_ipv6 *)
7140 items->mask)->hdr.proto;
7141 } else {
7142
7143 next_protocol = 0xff;
7144 }
7145 break;
7146 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7147 ret = flow_dv_validate_item_ipv6_frag_ext(items,
7148 item_flags,
7149 error);
7150 if (ret < 0)
7151 return ret;
7152 last_item = tunnel ?
7153 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7154 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7155 if (items->mask != NULL &&
7156 ((const struct rte_flow_item_ipv6_frag_ext *)
7157 items->mask)->hdr.next_header) {
7158 next_protocol =
7159 ((const struct rte_flow_item_ipv6_frag_ext *)
7160 items->spec)->hdr.next_header;
7161 next_protocol &=
7162 ((const struct rte_flow_item_ipv6_frag_ext *)
7163 items->mask)->hdr.next_header;
7164 } else {
7165
7166 next_protocol = 0xff;
7167 }
7168 break;
7169 case RTE_FLOW_ITEM_TYPE_TCP:
7170 ret = mlx5_flow_validate_item_tcp
7171 (items, item_flags,
7172 next_protocol,
7173 &nic_tcp_mask,
7174 error);
7175 if (ret < 0)
7176 return ret;
7177 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7178 MLX5_FLOW_LAYER_OUTER_L4_TCP;
7179 break;
7180 case RTE_FLOW_ITEM_TYPE_UDP:
7181 ret = mlx5_flow_validate_item_udp(items, item_flags,
7182 next_protocol,
7183 error);
7184 const struct rte_flow_item_udp *spec = items->spec;
7185 const struct rte_flow_item_udp *mask = items->mask;
7186 if (!mask)
7187 mask = &rte_flow_item_udp_mask;
7188 if (spec != NULL)
7189 udp_dport = rte_be_to_cpu_16
7190 (spec->hdr.dst_port &
7191 mask->hdr.dst_port);
7192 if (ret < 0)
7193 return ret;
7194 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7195 MLX5_FLOW_LAYER_OUTER_L4_UDP;
7196 break;
7197 case RTE_FLOW_ITEM_TYPE_GRE:
7198 ret = mlx5_flow_validate_item_gre(items, item_flags,
7199 next_protocol, error);
7200 if (ret < 0)
7201 return ret;
7202 gre_item = items;
7203 last_item = MLX5_FLOW_LAYER_GRE;
7204 break;
7205 case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
7206 ret = mlx5_flow_validate_item_gre_option(dev, items, item_flags,
7207 attr, gre_item, error);
7208 if (ret < 0)
7209 return ret;
7210 last_item = MLX5_FLOW_LAYER_GRE;
7211 break;
7212 case RTE_FLOW_ITEM_TYPE_NVGRE:
7213 ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7214 next_protocol,
7215 error);
7216 if (ret < 0)
7217 return ret;
7218 last_item = MLX5_FLOW_LAYER_NVGRE;
7219 break;
7220 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7221 ret = mlx5_flow_validate_item_gre_key
7222 (items, item_flags, gre_item, error);
7223 if (ret < 0)
7224 return ret;
7225 last_item = MLX5_FLOW_LAYER_GRE_KEY;
7226 break;
7227 case RTE_FLOW_ITEM_TYPE_VXLAN:
7228 ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7229 items, item_flags,
7230 attr, error);
7231 if (ret < 0)
7232 return ret;
7233 last_item = MLX5_FLOW_LAYER_VXLAN;
7234 break;
7235 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7236 ret = mlx5_flow_validate_item_vxlan_gpe(items,
7237 item_flags, dev,
7238 error);
7239 if (ret < 0)
7240 return ret;
7241 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7242 break;
7243 case RTE_FLOW_ITEM_TYPE_GENEVE:
7244 ret = mlx5_flow_validate_item_geneve(items,
7245 item_flags, dev,
7246 error);
7247 if (ret < 0)
7248 return ret;
7249 geneve_item = items;
7250 last_item = MLX5_FLOW_LAYER_GENEVE;
7251 break;
7252 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7253 ret = mlx5_flow_validate_item_geneve_opt(items,
7254 last_item,
7255 geneve_item,
7256 dev,
7257 error);
7258 if (ret < 0)
7259 return ret;
7260 last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7261 break;
7262 case RTE_FLOW_ITEM_TYPE_MPLS:
7263 ret = mlx5_flow_validate_item_mpls(dev, items,
7264 item_flags,
7265 last_item, error);
7266 if (ret < 0)
7267 return ret;
7268 last_item = MLX5_FLOW_LAYER_MPLS;
7269 break;
7270
7271 case RTE_FLOW_ITEM_TYPE_MARK:
7272 ret = flow_dv_validate_item_mark(dev, items, attr,
7273 error);
7274 if (ret < 0)
7275 return ret;
7276 last_item = MLX5_FLOW_ITEM_MARK;
7277 break;
7278 case RTE_FLOW_ITEM_TYPE_META:
7279 ret = flow_dv_validate_item_meta(dev, items, attr,
7280 error);
7281 if (ret < 0)
7282 return ret;
7283 last_item = MLX5_FLOW_ITEM_METADATA;
7284 break;
7285 case RTE_FLOW_ITEM_TYPE_ICMP:
7286 ret = mlx5_flow_validate_item_icmp(items, item_flags,
7287 next_protocol,
7288 error);
7289 if (ret < 0)
7290 return ret;
7291 last_item = MLX5_FLOW_LAYER_ICMP;
7292 break;
7293 case RTE_FLOW_ITEM_TYPE_ICMP6:
7294 ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7295 next_protocol,
7296 error);
7297 if (ret < 0)
7298 return ret;
7299 item_ipv6_proto = IPPROTO_ICMPV6;
7300 last_item = MLX5_FLOW_LAYER_ICMP6;
7301 break;
7302 case RTE_FLOW_ITEM_TYPE_TAG:
7303 ret = flow_dv_validate_item_tag(dev, items,
7304 attr, error);
7305 if (ret < 0)
7306 return ret;
7307 last_item = MLX5_FLOW_ITEM_TAG;
7308 break;
7309 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7310 last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7311 break;
7312 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7313 break;
7314 case RTE_FLOW_ITEM_TYPE_GTP:
7315 ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7316 error);
7317 if (ret < 0)
7318 return ret;
7319 gtp_item = items;
7320 last_item = MLX5_FLOW_LAYER_GTP;
7321 break;
7322 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7323 ret = flow_dv_validate_item_gtp_psc(items, last_item,
7324 gtp_item, attr,
7325 error);
7326 if (ret < 0)
7327 return ret;
7328 last_item = MLX5_FLOW_LAYER_GTP_PSC;
7329 break;
7330 case RTE_FLOW_ITEM_TYPE_ECPRI:
7331
7332 ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7333 last_item,
7334 ether_type,
7335 &nic_ecpri_mask,
7336 error);
7337 if (ret < 0)
7338 return ret;
7339 last_item = MLX5_FLOW_LAYER_ECPRI;
7340 break;
7341 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7342 ret = flow_dv_validate_item_integrity(dev, items,
7343 item_flags,
7344 &last_item,
7345 integrity_items,
7346 error);
7347 if (ret < 0)
7348 return ret;
7349 break;
7350 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7351 ret = flow_dv_validate_item_aso_ct(dev, items,
7352 &item_flags, error);
7353 if (ret < 0)
7354 return ret;
7355 break;
7356 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7357
7358
7359
7360 break;
7361 case RTE_FLOW_ITEM_TYPE_FLEX:
7362 ret = flow_dv_validate_item_flex(dev, items, item_flags,
7363 &last_item,
7364 tunnel != 0, error);
7365 if (ret < 0)
7366 return ret;
7367 break;
7368 default:
7369 return rte_flow_error_set(error, ENOTSUP,
7370 RTE_FLOW_ERROR_TYPE_ITEM,
7371 NULL, "item not supported");
7372 }
7373 item_flags |= last_item;
7374 }
7375 if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7376 ret = flow_dv_validate_item_integrity_post(integrity_items,
7377 item_flags, error);
7378 if (ret)
7379 return ret;
7380 }
7381 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7382 int type = actions->type;
7383
7384 if (!mlx5_flow_os_action_supported(type))
7385 return rte_flow_error_set(error, ENOTSUP,
7386 RTE_FLOW_ERROR_TYPE_ACTION,
7387 actions,
7388 "action not supported");
7389 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7390 return rte_flow_error_set(error, ENOTSUP,
7391 RTE_FLOW_ERROR_TYPE_ACTION,
7392 actions, "too many actions");
7393 if (action_flags &
7394 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7395 return rte_flow_error_set(error, ENOTSUP,
7396 RTE_FLOW_ERROR_TYPE_ACTION,
7397 NULL, "meter action with policy "
7398 "must be the last action");
7399 switch (type) {
7400 case RTE_FLOW_ACTION_TYPE_VOID:
7401 break;
7402 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7403 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7404 ret = flow_dv_validate_action_port_id(dev,
7405 action_flags,
7406 actions,
7407 attr,
7408 error);
7409 if (ret)
7410 return ret;
7411 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7412 ++actions_n;
7413 break;
7414 case RTE_FLOW_ACTION_TYPE_FLAG:
7415 ret = flow_dv_validate_action_flag(dev, action_flags,
7416 attr, error);
7417 if (ret < 0)
7418 return ret;
7419 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7420
7421 if (!(action_flags &
7422 MLX5_FLOW_MODIFY_HDR_ACTIONS))
7423 ++actions_n;
7424 action_flags |= MLX5_FLOW_ACTION_FLAG |
7425 MLX5_FLOW_ACTION_MARK_EXT;
7426 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7427 modify_after_mirror = 1;
7428
7429 } else {
7430 action_flags |= MLX5_FLOW_ACTION_FLAG;
7431 ++actions_n;
7432 }
7433 rw_act_num += MLX5_ACT_NUM_SET_MARK;
7434 break;
7435 case RTE_FLOW_ACTION_TYPE_MARK:
7436 ret = flow_dv_validate_action_mark(dev, actions,
7437 action_flags,
7438 attr, error);
7439 if (ret < 0)
7440 return ret;
7441 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7442
7443 if (!(action_flags &
7444 MLX5_FLOW_MODIFY_HDR_ACTIONS))
7445 ++actions_n;
7446 action_flags |= MLX5_FLOW_ACTION_MARK |
7447 MLX5_FLOW_ACTION_MARK_EXT;
7448 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7449 modify_after_mirror = 1;
7450 } else {
7451 action_flags |= MLX5_FLOW_ACTION_MARK;
7452 ++actions_n;
7453 }
7454 rw_act_num += MLX5_ACT_NUM_SET_MARK;
7455 break;
7456 case RTE_FLOW_ACTION_TYPE_SET_META:
7457 ret = flow_dv_validate_action_set_meta(dev, actions,
7458 action_flags,
7459 attr, error);
7460 if (ret < 0)
7461 return ret;
7462
7463 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7464 ++actions_n;
7465 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7466 modify_after_mirror = 1;
7467 action_flags |= MLX5_FLOW_ACTION_SET_META;
7468 rw_act_num += MLX5_ACT_NUM_SET_META;
7469 break;
7470 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7471 ret = flow_dv_validate_action_set_tag(dev, actions,
7472 action_flags,
7473 attr, error);
7474 if (ret < 0)
7475 return ret;
7476
7477 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7478 ++actions_n;
7479 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7480 modify_after_mirror = 1;
7481 tag_id = ((const struct rte_flow_action_set_tag *)
7482 actions->conf)->index;
7483 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7484 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7485 break;
7486 case RTE_FLOW_ACTION_TYPE_DROP:
7487 ret = mlx5_flow_validate_action_drop(action_flags,
7488 attr, error);
7489 if (ret < 0)
7490 return ret;
7491 action_flags |= MLX5_FLOW_ACTION_DROP;
7492 ++actions_n;
7493 break;
7494 case RTE_FLOW_ACTION_TYPE_QUEUE:
7495 ret = mlx5_flow_validate_action_queue(actions,
7496 action_flags, dev,
7497 attr, error);
7498 if (ret < 0)
7499 return ret;
7500 queue_index = ((const struct rte_flow_action_queue *)
7501 (actions->conf))->index;
7502 action_flags |= MLX5_FLOW_ACTION_QUEUE;
7503 ++actions_n;
7504 break;
7505 case RTE_FLOW_ACTION_TYPE_RSS:
7506 rss = actions->conf;
7507 ret = mlx5_flow_validate_action_rss(actions,
7508 action_flags, dev,
7509 attr, item_flags,
7510 error);
7511 if (ret < 0)
7512 return ret;
7513 if (rss && sample_rss &&
7514 (sample_rss->level != rss->level ||
7515 sample_rss->types != rss->types))
7516 return rte_flow_error_set(error, ENOTSUP,
7517 RTE_FLOW_ERROR_TYPE_ACTION,
7518 NULL,
7519 "Can't use the different RSS types "
7520 "or level in the same flow");
7521 if (rss != NULL && rss->queue_num)
7522 queue_index = rss->queue[0];
7523 action_flags |= MLX5_FLOW_ACTION_RSS;
7524 ++actions_n;
7525 break;
7526 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7527 ret =
7528 mlx5_flow_validate_action_default_miss(action_flags,
7529 attr, error);
7530 if (ret < 0)
7531 return ret;
7532 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7533 ++actions_n;
7534 break;
7535 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7536 shared_count = true;
7537
7538 case RTE_FLOW_ACTION_TYPE_COUNT:
7539 ret = flow_dv_validate_action_count(dev, shared_count,
7540 action_flags,
7541 attr, error);
7542 if (ret < 0)
7543 return ret;
7544 count = actions->conf;
7545 action_flags |= MLX5_FLOW_ACTION_COUNT;
7546 ++actions_n;
7547 break;
7548 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7549 if (flow_dv_validate_action_pop_vlan(dev,
7550 action_flags,
7551 actions,
7552 item_flags, attr,
7553 error))
7554 return -rte_errno;
7555 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7556 modify_after_mirror = 1;
7557 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7558 ++actions_n;
7559 break;
7560 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7561 ret = flow_dv_validate_action_push_vlan(dev,
7562 action_flags,
7563 vlan_m,
7564 actions, attr,
7565 error);
7566 if (ret < 0)
7567 return ret;
7568 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7569 modify_after_mirror = 1;
7570 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7571 ++actions_n;
7572 break;
7573 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7574 ret = flow_dv_validate_action_set_vlan_pcp
7575 (action_flags, actions, error);
7576 if (ret < 0)
7577 return ret;
7578 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7579 modify_after_mirror = 1;
7580
7581 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7582 break;
7583 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7584 ret = flow_dv_validate_action_set_vlan_vid
7585 (item_flags, action_flags,
7586 actions, error);
7587 if (ret < 0)
7588 return ret;
7589 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7590 modify_after_mirror = 1;
7591
7592 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7593 rw_act_num += MLX5_ACT_NUM_MDF_VID;
7594 break;
7595 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7596 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7597 ret = flow_dv_validate_action_l2_encap(dev,
7598 action_flags,
7599 actions, attr,
7600 error);
7601 if (ret < 0)
7602 return ret;
7603 action_flags |= MLX5_FLOW_ACTION_ENCAP;
7604 ++actions_n;
7605 break;
7606 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7607 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7608 ret = flow_dv_validate_action_decap(dev, action_flags,
7609 actions, item_flags,
7610 attr, error);
7611 if (ret < 0)
7612 return ret;
7613 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7614 modify_after_mirror = 1;
7615 action_flags |= MLX5_FLOW_ACTION_DECAP;
7616 ++actions_n;
7617 break;
7618 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7619 ret = flow_dv_validate_action_raw_encap_decap
7620 (dev, NULL, actions->conf, attr, &action_flags,
7621 &actions_n, actions, item_flags, error);
7622 if (ret < 0)
7623 return ret;
7624 break;
7625 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7626 decap = actions->conf;
7627 while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7628 ;
7629 if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7630 encap = NULL;
7631 actions--;
7632 } else {
7633 encap = actions->conf;
7634 }
7635 ret = flow_dv_validate_action_raw_encap_decap
7636 (dev,
7637 decap ? decap : &empty_decap, encap,
7638 attr, &action_flags, &actions_n,
7639 actions, item_flags, error);
7640 if (ret < 0)
7641 return ret;
7642 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7643 (action_flags & MLX5_FLOW_ACTION_DECAP))
7644 modify_after_mirror = 1;
7645 break;
7646 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7647 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7648 ret = flow_dv_validate_action_modify_mac(action_flags,
7649 actions,
7650 item_flags,
7651 error);
7652 if (ret < 0)
7653 return ret;
7654
7655 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7656 ++actions_n;
7657 action_flags |= actions->type ==
7658 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7659 MLX5_FLOW_ACTION_SET_MAC_SRC :
7660 MLX5_FLOW_ACTION_SET_MAC_DST;
7661 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7662 modify_after_mirror = 1;
7663
7664
7665
7666
7667
7668
7669
7670 rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7671 break;
7672 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7673 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7674 ret = flow_dv_validate_action_modify_ipv4(action_flags,
7675 actions,
7676 item_flags,
7677 error);
7678 if (ret < 0)
7679 return ret;
7680
7681 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7682 ++actions_n;
7683 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7684 modify_after_mirror = 1;
7685 action_flags |= actions->type ==
7686 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7687 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7688 MLX5_FLOW_ACTION_SET_IPV4_DST;
7689 rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7690 break;
7691 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7692 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7693 ret = flow_dv_validate_action_modify_ipv6(action_flags,
7694 actions,
7695 item_flags,
7696 error);
7697 if (ret < 0)
7698 return ret;
7699 if (item_ipv6_proto == IPPROTO_ICMPV6)
7700 return rte_flow_error_set(error, ENOTSUP,
7701 RTE_FLOW_ERROR_TYPE_ACTION,
7702 actions,
7703 "Can't change header "
7704 "with ICMPv6 proto");
7705
7706 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7707 ++actions_n;
7708 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7709 modify_after_mirror = 1;
7710 action_flags |= actions->type ==
7711 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7712 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7713 MLX5_FLOW_ACTION_SET_IPV6_DST;
7714 rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7715 break;
7716 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7717 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7718 ret = flow_dv_validate_action_modify_tp(action_flags,
7719 actions,
7720 item_flags,
7721 error);
7722 if (ret < 0)
7723 return ret;
7724
7725 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7726 ++actions_n;
7727 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7728 modify_after_mirror = 1;
7729 action_flags |= actions->type ==
7730 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7731 MLX5_FLOW_ACTION_SET_TP_SRC :
7732 MLX5_FLOW_ACTION_SET_TP_DST;
7733 rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7734 break;
7735 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7736 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7737 ret = flow_dv_validate_action_modify_ttl(action_flags,
7738 actions,
7739 item_flags,
7740 error);
7741 if (ret < 0)
7742 return ret;
7743
7744 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7745 ++actions_n;
7746 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7747 modify_after_mirror = 1;
7748 action_flags |= actions->type ==
7749 RTE_FLOW_ACTION_TYPE_SET_TTL ?
7750 MLX5_FLOW_ACTION_SET_TTL :
7751 MLX5_FLOW_ACTION_DEC_TTL;
7752 rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7753 break;
7754 case RTE_FLOW_ACTION_TYPE_JUMP:
7755 ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7756 action_flags,
7757 attr, external,
7758 error);
7759 if (ret)
7760 return ret;
7761 if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7762 fdb_mirror_limit)
7763 return rte_flow_error_set(error, EINVAL,
7764 RTE_FLOW_ERROR_TYPE_ACTION,
7765 NULL,
7766 "sample and jump action combination is not supported");
7767 ++actions_n;
7768 action_flags |= MLX5_FLOW_ACTION_JUMP;
7769 break;
7770 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7771 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7772 ret = flow_dv_validate_action_modify_tcp_seq
7773 (action_flags,
7774 actions,
7775 item_flags,
7776 error);
7777 if (ret < 0)
7778 return ret;
7779
7780 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7781 ++actions_n;
7782 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7783 modify_after_mirror = 1;
7784 action_flags |= actions->type ==
7785 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7786 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7787 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7788 rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7789 break;
7790 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7791 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7792 ret = flow_dv_validate_action_modify_tcp_ack
7793 (action_flags,
7794 actions,
7795 item_flags,
7796 error);
7797 if (ret < 0)
7798 return ret;
7799
7800 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7801 ++actions_n;
7802 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7803 modify_after_mirror = 1;
7804 action_flags |= actions->type ==
7805 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7806 MLX5_FLOW_ACTION_INC_TCP_ACK :
7807 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7808 rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7809 break;
7810 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7811 break;
7812 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7813 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7814 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7815 break;
7816 case RTE_FLOW_ACTION_TYPE_METER:
7817 ret = mlx5_flow_validate_action_meter(dev,
7818 action_flags,
7819 item_flags,
7820 actions, attr,
7821 port_id_item,
7822 &def_policy,
7823 error);
7824 if (ret < 0)
7825 return ret;
7826 action_flags |= MLX5_FLOW_ACTION_METER;
7827 if (!def_policy)
7828 action_flags |=
7829 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7830 ++actions_n;
7831
7832 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7833 break;
7834 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7835 if (!attr->transfer && !attr->group)
7836 return rte_flow_error_set(error, ENOTSUP,
7837 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7838 NULL,
7839 "Shared ASO age action is not supported for group 0");
7840 if (action_flags & MLX5_FLOW_ACTION_AGE)
7841 return rte_flow_error_set
7842 (error, EINVAL,
7843 RTE_FLOW_ERROR_TYPE_ACTION,
7844 NULL,
7845 "duplicate age actions set");
7846 action_flags |= MLX5_FLOW_ACTION_AGE;
7847 ++actions_n;
7848 break;
7849 case RTE_FLOW_ACTION_TYPE_AGE:
7850 non_shared_age = actions->conf;
7851 ret = flow_dv_validate_action_age(action_flags,
7852 actions, dev,
7853 error);
7854 if (ret < 0)
7855 return ret;
7856
7857
7858
7859
7860 if (!flow_hit_aso_supported(priv->sh, attr)) {
7861 if (shared_count)
7862 return rte_flow_error_set
7863 (error, EINVAL,
7864 RTE_FLOW_ERROR_TYPE_ACTION,
7865 NULL,
7866 "old age and indirect count combination is not supported");
7867 if (sample_count)
7868 return rte_flow_error_set
7869 (error, EINVAL,
7870 RTE_FLOW_ERROR_TYPE_ACTION,
7871 NULL,
7872 "old age action and count must be in the same sub flow");
7873 }
7874 action_flags |= MLX5_FLOW_ACTION_AGE;
7875 ++actions_n;
7876 break;
7877 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7878 ret = flow_dv_validate_action_modify_ipv4_dscp
7879 (action_flags,
7880 actions,
7881 item_flags,
7882 error);
7883 if (ret < 0)
7884 return ret;
7885
7886 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7887 ++actions_n;
7888 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7889 modify_after_mirror = 1;
7890 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7891 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7892 break;
7893 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7894 ret = flow_dv_validate_action_modify_ipv6_dscp
7895 (action_flags,
7896 actions,
7897 item_flags,
7898 error);
7899 if (ret < 0)
7900 return ret;
7901
7902 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7903 ++actions_n;
7904 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7905 modify_after_mirror = 1;
7906 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7907 rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7908 break;
7909 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7910 ret = flow_dv_validate_action_sample(&action_flags,
7911 actions, dev,
7912 attr, item_flags,
7913 rss, &sample_rss,
7914 &sample_count,
7915 &fdb_mirror_limit,
7916 error);
7917 if (ret < 0)
7918 return ret;
7919 if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) &&
7920 tag_id == 0 && priv->mtr_color_reg == REG_NON)
7921 return rte_flow_error_set(error, EINVAL,
7922 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7923 "sample after tag action causes metadata tag index 0 corruption");
7924 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7925 ++actions_n;
7926 break;
7927 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7928 ret = flow_dv_validate_action_modify_field(dev,
7929 action_flags,
7930 actions,
7931 attr,
7932 error);
7933 if (ret < 0)
7934 return ret;
7935 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7936 modify_after_mirror = 1;
7937
7938 if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7939 ++actions_n;
7940 action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7941 rw_act_num += ret;
7942 break;
7943 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7944 ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7945 item_flags, attr,
7946 error);
7947 if (ret < 0)
7948 return ret;
7949 action_flags |= MLX5_FLOW_ACTION_CT;
7950 break;
7951 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7952
7953
7954
7955 break;
7956 default:
7957 return rte_flow_error_set(error, ENOTSUP,
7958 RTE_FLOW_ERROR_TYPE_ACTION,
7959 actions,
7960 "action not supported");
7961 }
7962 }
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7974 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP |
7975 MLX5_FLOW_ACTION_MARK |
7976 MLX5_FLOW_ACTION_SET_TAG |
7977 MLX5_FLOW_ACTION_SET_META |
7978 MLX5_FLOW_ACTION_DROP;
7979
7980 if (action_flags & bad_actions_mask)
7981 return rte_flow_error_set
7982 (error, EINVAL,
7983 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7984 "Invalid RTE action in tunnel "
7985 "set decap rule");
7986 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7987 return rte_flow_error_set
7988 (error, EINVAL,
7989 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7990 "tunnel set decap rule must terminate "
7991 "with JUMP");
7992 if (!attr->ingress)
7993 return rte_flow_error_set
7994 (error, EINVAL,
7995 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7996 "tunnel flows for ingress traffic only");
7997 }
7998 if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7999 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP |
8000 MLX5_FLOW_ACTION_MARK |
8001 MLX5_FLOW_ACTION_SET_TAG |
8002 MLX5_FLOW_ACTION_SET_META;
8003
8004 if (action_flags & bad_actions_mask)
8005 return rte_flow_error_set
8006 (error, EINVAL,
8007 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8008 "Invalid RTE action in tunnel "
8009 "set match rule");
8010 }
8011
8012
8013
8014
8015
8016
8017 if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
8018 MLX5_FLOW_ACTION_TUNNEL_MATCH));
8019 else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
8020 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
8021 return rte_flow_error_set(error, EINVAL,
8022 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8023 "Drop action is mutually-exclusive "
8024 "with any other action, except for "
8025 "Count action");
8026
8027 if (attr->transfer) {
8028 if (!mlx5_flow_ext_mreg_supported(dev) &&
8029 action_flags & MLX5_FLOW_ACTION_FLAG)
8030 return rte_flow_error_set(error, ENOTSUP,
8031 RTE_FLOW_ERROR_TYPE_ACTION,
8032 NULL,
8033 "unsupported action FLAG");
8034 if (!mlx5_flow_ext_mreg_supported(dev) &&
8035 action_flags & MLX5_FLOW_ACTION_MARK)
8036 return rte_flow_error_set(error, ENOTSUP,
8037 RTE_FLOW_ERROR_TYPE_ACTION,
8038 NULL,
8039 "unsupported action MARK");
8040 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
8041 return rte_flow_error_set(error, ENOTSUP,
8042 RTE_FLOW_ERROR_TYPE_ACTION,
8043 NULL,
8044 "unsupported action QUEUE");
8045 if (action_flags & MLX5_FLOW_ACTION_RSS)
8046 return rte_flow_error_set(error, ENOTSUP,
8047 RTE_FLOW_ERROR_TYPE_ACTION,
8048 NULL,
8049 "unsupported action RSS");
8050 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
8051 return rte_flow_error_set(error, EINVAL,
8052 RTE_FLOW_ERROR_TYPE_ACTION,
8053 actions,
8054 "no fate action is found");
8055 } else {
8056 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
8057 return rte_flow_error_set(error, EINVAL,
8058 RTE_FLOW_ERROR_TYPE_ACTION,
8059 actions,
8060 "no fate action is found");
8061 }
8062
8063
8064
8065
8066
8067
8068 if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
8069 MLX5_FLOW_VLAN_ACTIONS)) &&
8070 (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index) ||
8071 ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
8072 conf->tx_explicit != 0))) {
8073 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
8074 MLX5_FLOW_XCAP_ACTIONS)
8075 return rte_flow_error_set(error, ENOTSUP,
8076 RTE_FLOW_ERROR_TYPE_ACTION,
8077 NULL, "encap and decap "
8078 "combination aren't supported");
8079
8080 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
8081 struct mlx5_dev_ctx_shared *sh = priv->sh;
8082 bool direction_error = false;
8083
8084 if (attr->transfer) {
8085 bool fdb_tx = priv->representor_id != UINT16_MAX;
8086 bool is_cx5 = sh->steering_format_version ==
8087 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
8088
8089 if (!fdb_tx && is_cx5)
8090 direction_error = true;
8091 } else if (attr->ingress) {
8092 direction_error = true;
8093 }
8094 if (direction_error)
8095 return rte_flow_error_set(error, ENOTSUP,
8096 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
8097 NULL,
8098 "push VLAN action not supported "
8099 "for ingress");
8100 }
8101 if (!attr->transfer && attr->ingress) {
8102 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8103 return rte_flow_error_set
8104 (error, ENOTSUP,
8105 RTE_FLOW_ERROR_TYPE_ACTION,
8106 NULL, "encap is not supported"
8107 " for ingress traffic");
8108 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
8109 MLX5_FLOW_VLAN_ACTIONS)
8110 return rte_flow_error_set
8111 (error, ENOTSUP,
8112 RTE_FLOW_ERROR_TYPE_ACTION,
8113 NULL, "no support for "
8114 "multiple VLAN actions");
8115 }
8116 }
8117 if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
8118 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
8119 ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
8120 attr->ingress)
8121 return rte_flow_error_set
8122 (error, ENOTSUP,
8123 RTE_FLOW_ERROR_TYPE_ACTION,
8124 NULL, "fate action not supported for "
8125 "meter with policy");
8126 if (attr->egress) {
8127 if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
8128 return rte_flow_error_set
8129 (error, ENOTSUP,
8130 RTE_FLOW_ERROR_TYPE_ACTION,
8131 NULL, "modify header action in egress "
8132 "cannot be done before meter action");
8133 if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8134 return rte_flow_error_set
8135 (error, ENOTSUP,
8136 RTE_FLOW_ERROR_TYPE_ACTION,
8137 NULL, "encap action in egress "
8138 "cannot be done before meter action");
8139 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8140 return rte_flow_error_set
8141 (error, ENOTSUP,
8142 RTE_FLOW_ERROR_TYPE_ACTION,
8143 NULL, "push vlan action in egress "
8144 "cannot be done before meter action");
8145 }
8146 }
8147
8148
8149
8150
8151
8152 aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 |
8153 (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 |
8154 (action_flags & MLX5_FLOW_ACTION_AGE &&
8155 !(non_shared_age && count) &&
8156 (attr->group || (attr->transfer && priv->fdb_def_rule)) &&
8157 priv->sh->flow_hit_aso_en);
8158 if (__builtin_popcountl(aso_mask) > 1)
8159 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8160 NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule");
8161
8162
8163
8164
8165 if (hairpin > 0)
8166 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8167
8168 if (dev_conf->dv_flow_en &&
8169 dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8170 mlx5_flow_ext_mreg_supported(dev))
8171 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8172 if (rw_act_num >
8173 flow_dv_modify_hdr_action_max(dev, is_root)) {
8174 return rte_flow_error_set(error, ENOTSUP,
8175 RTE_FLOW_ERROR_TYPE_ACTION,
8176 NULL, "too many header modify"
8177 " actions to support");
8178 }
8179
8180 if (fdb_mirror_limit && modify_after_mirror)
8181 return rte_flow_error_set(error, EINVAL,
8182 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8183 "sample before modify action is not supported");
8184
8185
8186
8187
8188
8189 if ((!attr->transfer && attr->egress) && priv->representor &&
8190 !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE))
8191 return rte_flow_error_set(error, EINVAL,
8192 RTE_FLOW_ERROR_TYPE_ITEM,
8193 NULL,
8194 "NIC egress rules on representors"
8195 " is not supported");
8196 return 0;
8197}
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207
8208
8209
8210
8211
8212
8213
8214
8215
8216
8217
8218static struct mlx5_flow *
8219flow_dv_prepare(struct rte_eth_dev *dev,
8220 const struct rte_flow_attr *attr __rte_unused,
8221 const struct rte_flow_item items[] __rte_unused,
8222 const struct rte_flow_action actions[] __rte_unused,
8223 struct rte_flow_error *error)
8224{
8225 uint32_t handle_idx = 0;
8226 struct mlx5_flow *dev_flow;
8227 struct mlx5_flow_handle *dev_handle;
8228 struct mlx5_priv *priv = dev->data->dev_private;
8229 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8230
8231 MLX5_ASSERT(wks);
8232 wks->skip_matcher_reg = 0;
8233 wks->policy = NULL;
8234 wks->final_policy = NULL;
8235
8236 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8237 rte_flow_error_set(error, ENOSPC,
8238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8239 "not free temporary device flow");
8240 return NULL;
8241 }
8242 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8243 &handle_idx);
8244 if (!dev_handle) {
8245 rte_flow_error_set(error, ENOMEM,
8246 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8247 "not enough memory to create flow handle");
8248 return NULL;
8249 }
8250 MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8251 dev_flow = &wks->flows[wks->flow_idx++];
8252 memset(dev_flow, 0, sizeof(*dev_flow));
8253 dev_flow->handle = dev_handle;
8254 dev_flow->handle_idx = handle_idx;
8255 dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8256 dev_flow->ingress = attr->ingress;
8257 dev_flow->dv.transfer = attr->transfer;
8258 return dev_flow;
8259}
8260
8261#ifdef RTE_LIBRTE_MLX5_DEBUG
8262
8263
8264
8265
8266
8267
8268
8269
8270
8271
8272
8273
8274static int
8275flow_dv_check_valid_spec(void *match_mask, void *match_value)
8276{
8277 uint8_t *m = match_mask;
8278 uint8_t *v = match_value;
8279 unsigned int i;
8280
8281 for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8282 if (v[i] & ~m[i]) {
8283 DRV_LOG(ERR,
8284 "match_value differs from match_criteria"
8285 " %p[%u] != %p[%u]",
8286 match_value, i, match_mask, i);
8287 return -EINVAL;
8288 }
8289 }
8290 return 0;
8291}
8292#endif
8293
8294
8295
8296
8297
8298
8299
8300
8301
8302
8303
8304
8305
8306static inline void
8307flow_dv_set_match_ip_version(uint32_t group,
8308 void *headers_v,
8309 void *headers_m,
8310 uint8_t ip_version)
8311{
8312 if (group == 0)
8313 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8314 else
8315 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8316 ip_version);
8317 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8318 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8319 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8320}
8321
8322
8323
8324
8325
8326
8327
8328
8329
8330
8331
8332
8333
8334static void
8335flow_dv_translate_item_eth(void *matcher, void *key,
8336 const struct rte_flow_item *item, int inner,
8337 uint32_t group)
8338{
8339 const struct rte_flow_item_eth *eth_m = item->mask;
8340 const struct rte_flow_item_eth *eth_v = item->spec;
8341 const struct rte_flow_item_eth nic_mask = {
8342 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8343 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8344 .type = RTE_BE16(0xffff),
8345 .has_vlan = 0,
8346 };
8347 void *hdrs_m;
8348 void *hdrs_v;
8349 char *l24_v;
8350 unsigned int i;
8351
8352 if (!eth_v)
8353 return;
8354 if (!eth_m)
8355 eth_m = &nic_mask;
8356 if (inner) {
8357 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8358 inner_headers);
8359 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8360 } else {
8361 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8362 outer_headers);
8363 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8364 }
8365 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8366 ð_m->dst, sizeof(eth_m->dst));
8367
8368 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8369 for (i = 0; i < sizeof(eth_m->dst); ++i)
8370 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8371 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8372 ð_m->src, sizeof(eth_m->src));
8373 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8374
8375 for (i = 0; i < sizeof(eth_m->dst); ++i)
8376 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8377
8378
8379
8380
8381
8382
8383
8384
8385 if (eth_m->type == 0xFFFF) {
8386
8387 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8388 switch (eth_v->type) {
8389 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8390 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8391 return;
8392 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8393 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8394 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8395 return;
8396 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8397 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8398 return;
8399 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8400 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8401 return;
8402 default:
8403 break;
8404 }
8405 }
8406 if (eth_m->has_vlan) {
8407 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8408 if (eth_v->has_vlan) {
8409
8410
8411
8412
8413 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8414 return;
8415 }
8416 }
8417 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8418 rte_be_to_cpu_16(eth_m->type));
8419 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8420 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8421}
8422
8423
8424
8425
8426
8427
8428
8429
8430
8431
8432
8433
8434
8435
8436
8437static void
8438flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8439 void *matcher, void *key,
8440 const struct rte_flow_item *item,
8441 int inner, uint32_t group)
8442{
8443 const struct rte_flow_item_vlan *vlan_m = item->mask;
8444 const struct rte_flow_item_vlan *vlan_v = item->spec;
8445 void *hdrs_m;
8446 void *hdrs_v;
8447 uint16_t tci_m;
8448 uint16_t tci_v;
8449
8450 if (inner) {
8451 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8452 inner_headers);
8453 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8454 } else {
8455 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8456 outer_headers);
8457 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8458
8459
8460
8461
8462 if (vlan_v)
8463 dev_flow->handle->vf_vlan.tag =
8464 rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8465 }
8466
8467
8468
8469
8470 if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8471 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8472 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8473 }
8474 if (!vlan_v)
8475 return;
8476 if (!vlan_m)
8477 vlan_m = &rte_flow_item_vlan_mask;
8478 tci_m = rte_be_to_cpu_16(vlan_m->tci);
8479 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8480 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8481 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8482 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8483 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8484 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8485 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8486
8487
8488
8489
8490 if (vlan_m->inner_type == 0xFFFF) {
8491 switch (vlan_v->inner_type) {
8492 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8493 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8494 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8495 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8496 return;
8497 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8498 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8499 return;
8500 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8501 flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8502 return;
8503 default:
8504 break;
8505 }
8506 }
8507 if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8508 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8509 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8510
8511 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8512 return;
8513 }
8514 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8515 rte_be_to_cpu_16(vlan_m->inner_type));
8516 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8517 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8518}
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528
8529
8530
8531
8532
8533
8534static void
8535flow_dv_translate_item_ipv4(void *matcher, void *key,
8536 const struct rte_flow_item *item,
8537 int inner, uint32_t group)
8538{
8539 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8540 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8541 const struct rte_flow_item_ipv4 nic_mask = {
8542 .hdr = {
8543 .src_addr = RTE_BE32(0xffffffff),
8544 .dst_addr = RTE_BE32(0xffffffff),
8545 .type_of_service = 0xff,
8546 .next_proto_id = 0xff,
8547 .time_to_live = 0xff,
8548 },
8549 };
8550 void *headers_m;
8551 void *headers_v;
8552 char *l24_m;
8553 char *l24_v;
8554 uint8_t tos, ihl_m, ihl_v;
8555
8556 if (inner) {
8557 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8558 inner_headers);
8559 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8560 } else {
8561 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8562 outer_headers);
8563 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8564 }
8565 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8566 if (!ipv4_v)
8567 return;
8568 if (!ipv4_m)
8569 ipv4_m = &nic_mask;
8570 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8571 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8572 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8573 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8574 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8575 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8576 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8577 src_ipv4_src_ipv6.ipv4_layout.ipv4);
8578 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8579 src_ipv4_src_ipv6.ipv4_layout.ipv4);
8580 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8581 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8582 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8583 ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8584 ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8585 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8586 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8587 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8588 ipv4_m->hdr.type_of_service);
8589 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8590 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8591 ipv4_m->hdr.type_of_service >> 2);
8592 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8593 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8594 ipv4_m->hdr.next_proto_id);
8595 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8596 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8597 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8598 ipv4_m->hdr.time_to_live);
8599 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8600 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8601 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8602 !!(ipv4_m->hdr.fragment_offset));
8603 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8604 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8605}
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615
8616
8617
8618
8619
8620
8621static void
8622flow_dv_translate_item_ipv6(void *matcher, void *key,
8623 const struct rte_flow_item *item,
8624 int inner, uint32_t group)
8625{
8626 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8627 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8628 const struct rte_flow_item_ipv6 nic_mask = {
8629 .hdr = {
8630 .src_addr =
8631 "\xff\xff\xff\xff\xff\xff\xff\xff"
8632 "\xff\xff\xff\xff\xff\xff\xff\xff",
8633 .dst_addr =
8634 "\xff\xff\xff\xff\xff\xff\xff\xff"
8635 "\xff\xff\xff\xff\xff\xff\xff\xff",
8636 .vtc_flow = RTE_BE32(0xffffffff),
8637 .proto = 0xff,
8638 .hop_limits = 0xff,
8639 },
8640 };
8641 void *headers_m;
8642 void *headers_v;
8643 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8644 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8645 char *l24_m;
8646 char *l24_v;
8647 uint32_t vtc_m;
8648 uint32_t vtc_v;
8649 int i;
8650 int size;
8651
8652 if (inner) {
8653 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8654 inner_headers);
8655 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8656 } else {
8657 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8658 outer_headers);
8659 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8660 }
8661 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8662 if (!ipv6_v)
8663 return;
8664 if (!ipv6_m)
8665 ipv6_m = &nic_mask;
8666 size = sizeof(ipv6_m->hdr.dst_addr);
8667 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8668 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8669 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8670 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8671 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8672 for (i = 0; i < size; ++i)
8673 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8674 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8675 src_ipv4_src_ipv6.ipv6_layout.ipv6);
8676 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8677 src_ipv4_src_ipv6.ipv6_layout.ipv6);
8678 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8679 for (i = 0; i < size; ++i)
8680 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8681
8682 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8683 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8684 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8685 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8686 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8687 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8688
8689 if (inner) {
8690 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8691 vtc_m);
8692 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8693 vtc_v);
8694 } else {
8695 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8696 vtc_m);
8697 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8698 vtc_v);
8699 }
8700
8701 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8702 ipv6_m->hdr.proto);
8703 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8704 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8705
8706 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8707 ipv6_m->hdr.hop_limits);
8708 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8709 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8710 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8711 !!(ipv6_m->has_frag_ext));
8712 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8713 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8714}
8715
8716
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727
8728static void
8729flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8730 const struct rte_flow_item *item,
8731 int inner)
8732{
8733 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8734 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8735 const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8736 .hdr = {
8737 .next_header = 0xff,
8738 .frag_data = RTE_BE16(0xffff),
8739 },
8740 };
8741 void *headers_m;
8742 void *headers_v;
8743
8744 if (inner) {
8745 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8746 inner_headers);
8747 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8748 } else {
8749 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8750 outer_headers);
8751 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8752 }
8753
8754 MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8755 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8756 if (!ipv6_frag_ext_v)
8757 return;
8758 if (!ipv6_frag_ext_m)
8759 ipv6_frag_ext_m = &nic_mask;
8760 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8761 ipv6_frag_ext_m->hdr.next_header);
8762 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8763 ipv6_frag_ext_v->hdr.next_header &
8764 ipv6_frag_ext_m->hdr.next_header);
8765}
8766
8767
8768
8769
8770
8771
8772
8773
8774
8775
8776
8777
8778
8779static void
8780flow_dv_translate_item_tcp(void *matcher, void *key,
8781 const struct rte_flow_item *item,
8782 int inner)
8783{
8784 const struct rte_flow_item_tcp *tcp_m = item->mask;
8785 const struct rte_flow_item_tcp *tcp_v = item->spec;
8786 void *headers_m;
8787 void *headers_v;
8788
8789 if (inner) {
8790 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8791 inner_headers);
8792 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8793 } else {
8794 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8795 outer_headers);
8796 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8797 }
8798 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8799 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8800 if (!tcp_v)
8801 return;
8802 if (!tcp_m)
8803 tcp_m = &rte_flow_item_tcp_mask;
8804 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8805 rte_be_to_cpu_16(tcp_m->hdr.src_port));
8806 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8807 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8808 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8809 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8810 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8811 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8812 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8813 tcp_m->hdr.tcp_flags);
8814 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8815 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8816}
8817
8818
8819
8820
8821
8822
8823
8824
8825
8826
8827
8828
8829
8830static void
8831flow_dv_translate_item_esp(void *matcher, void *key,
8832 const struct rte_flow_item *item,
8833 int inner)
8834{
8835 const struct rte_flow_item_esp *esp_m = item->mask;
8836 const struct rte_flow_item_esp *esp_v = item->spec;
8837 void *headers_m;
8838 void *headers_v;
8839 char *spi_m;
8840 char *spi_v;
8841
8842 if (inner) {
8843 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8844 inner_headers);
8845 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8846 } else {
8847 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8848 outer_headers);
8849 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8850 }
8851 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8852 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);
8853 if (!esp_v)
8854 return;
8855 if (!esp_m)
8856 esp_m = &rte_flow_item_esp_mask;
8857 headers_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8858 headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8859 if (inner) {
8860 spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi);
8861 spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi);
8862 } else {
8863 spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi);
8864 spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);
8865 }
8866 *(uint32_t *)spi_m = esp_m->hdr.spi;
8867 *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;
8868}
8869
8870
8871
8872
8873
8874
8875
8876
8877
8878
8879
8880
8881
8882static void
8883flow_dv_translate_item_udp(void *matcher, void *key,
8884 const struct rte_flow_item *item,
8885 int inner)
8886{
8887 const struct rte_flow_item_udp *udp_m = item->mask;
8888 const struct rte_flow_item_udp *udp_v = item->spec;
8889 void *headers_m;
8890 void *headers_v;
8891
8892 if (inner) {
8893 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8894 inner_headers);
8895 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8896 } else {
8897 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8898 outer_headers);
8899 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8900 }
8901 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8902 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8903 if (!udp_v)
8904 return;
8905 if (!udp_m)
8906 udp_m = &rte_flow_item_udp_mask;
8907 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8908 rte_be_to_cpu_16(udp_m->hdr.src_port));
8909 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8910 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8911 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8912 rte_be_to_cpu_16(udp_m->hdr.dst_port));
8913 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8914 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8915}
8916
8917
8918
8919
8920
8921
8922
8923
8924
8925
8926
8927
8928
8929static void
8930flow_dv_translate_item_gre_key(void *matcher, void *key,
8931 const struct rte_flow_item *item)
8932{
8933 const rte_be32_t *key_m = item->mask;
8934 const rte_be32_t *key_v = item->spec;
8935 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8936 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8937 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8938
8939
8940 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8941 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8942 if (!key_v)
8943 return;
8944 if (!key_m)
8945 key_m = &gre_key_default_mask;
8946 MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8947 rte_be_to_cpu_32(*key_m) >> 8);
8948 MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8949 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8950 MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8951 rte_be_to_cpu_32(*key_m) & 0xFF);
8952 MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8953 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8954}
8955
8956
8957
8958
8959
8960
8961
8962
8963
8964
8965
8966
8967
8968static void
8969flow_dv_translate_item_gre(void *matcher, void *key,
8970 const struct rte_flow_item *item,
8971 uint64_t pattern_flags)
8972{
8973 static const struct rte_flow_item_gre empty_gre = {0,};
8974 const struct rte_flow_item_gre *gre_m = item->mask;
8975 const struct rte_flow_item_gre *gre_v = item->spec;
8976 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8977 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8978 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8979 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8980 struct {
8981 union {
8982 __extension__
8983 struct {
8984 uint16_t version:3;
8985 uint16_t rsvd0:9;
8986 uint16_t s_present:1;
8987 uint16_t k_present:1;
8988 uint16_t rsvd_bit1:1;
8989 uint16_t c_present:1;
8990 };
8991 uint16_t value;
8992 };
8993 } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8994 uint16_t protocol_m, protocol_v;
8995
8996 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8997 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8998 if (!gre_v) {
8999 gre_v = &empty_gre;
9000 gre_m = &empty_gre;
9001 } else {
9002 if (!gre_m)
9003 gre_m = &rte_flow_item_gre_mask;
9004 }
9005 gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
9006 gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
9007 MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
9008 gre_crks_rsvd0_ver_m.c_present);
9009 MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
9010 gre_crks_rsvd0_ver_v.c_present &
9011 gre_crks_rsvd0_ver_m.c_present);
9012 MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
9013 gre_crks_rsvd0_ver_m.k_present);
9014 MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
9015 gre_crks_rsvd0_ver_v.k_present &
9016 gre_crks_rsvd0_ver_m.k_present);
9017 MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
9018 gre_crks_rsvd0_ver_m.s_present);
9019 MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
9020 gre_crks_rsvd0_ver_v.s_present &
9021 gre_crks_rsvd0_ver_m.s_present);
9022 protocol_m = rte_be_to_cpu_16(gre_m->protocol);
9023 protocol_v = rte_be_to_cpu_16(gre_v->protocol);
9024 if (!protocol_m) {
9025
9026 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9027 if (protocol_v)
9028 protocol_m = 0xFFFF;
9029 }
9030 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
9031 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9032 protocol_m & protocol_v);
9033}
9034
9035
9036
9037
9038
9039
9040
9041
9042
9043
9044
9045
9046
9047
9048
9049static void
9050flow_dv_translate_item_gre_option(void *matcher, void *key,
9051 const struct rte_flow_item *item,
9052 const struct rte_flow_item *gre_item,
9053 uint64_t pattern_flags)
9054{
9055 const struct rte_flow_item_gre_opt *option_m = item->mask;
9056 const struct rte_flow_item_gre_opt *option_v = item->spec;
9057 const struct rte_flow_item_gre *gre_m = gre_item->mask;
9058 const struct rte_flow_item_gre *gre_v = gre_item->spec;
9059 static const struct rte_flow_item_gre empty_gre = {0};
9060 struct rte_flow_item gre_key_item;
9061 uint16_t c_rsvd0_ver_m, c_rsvd0_ver_v;
9062 uint16_t protocol_m, protocol_v;
9063 void *misc5_m;
9064 void *misc5_v;
9065
9066
9067
9068
9069
9070
9071 if (!(option_m->sequence.sequence ||
9072 option_m->checksum_rsvd.checksum)) {
9073 flow_dv_translate_item_gre(matcher, key, gre_item,
9074 pattern_flags);
9075 gre_key_item.spec = &option_v->key.key;
9076 gre_key_item.mask = &option_m->key.key;
9077 flow_dv_translate_item_gre_key(matcher, key, &gre_key_item);
9078 return;
9079 }
9080 if (!gre_v) {
9081 gre_v = &empty_gre;
9082 gre_m = &empty_gre;
9083 } else {
9084 if (!gre_m)
9085 gre_m = &rte_flow_item_gre_mask;
9086 }
9087 protocol_v = gre_v->protocol;
9088 protocol_m = gre_m->protocol;
9089 if (!protocol_m) {
9090
9091 uint16_t ether_type =
9092 mlx5_translate_tunnel_etypes(pattern_flags);
9093 if (ether_type) {
9094 protocol_v = rte_be_to_cpu_16(ether_type);
9095 protocol_m = UINT16_MAX;
9096 }
9097 }
9098 c_rsvd0_ver_v = gre_v->c_rsvd0_ver;
9099 c_rsvd0_ver_m = gre_m->c_rsvd0_ver;
9100 if (option_m->sequence.sequence) {
9101 c_rsvd0_ver_v |= RTE_BE16(0x1000);
9102 c_rsvd0_ver_m |= RTE_BE16(0x1000);
9103 }
9104 if (option_m->key.key) {
9105 c_rsvd0_ver_v |= RTE_BE16(0x2000);
9106 c_rsvd0_ver_m |= RTE_BE16(0x2000);
9107 }
9108 if (option_m->checksum_rsvd.checksum) {
9109 c_rsvd0_ver_v |= RTE_BE16(0x8000);
9110 c_rsvd0_ver_m |= RTE_BE16(0x8000);
9111 }
9112
9113
9114
9115
9116 misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
9117 misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
9118 MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0,
9119 rte_be_to_cpu_32((c_rsvd0_ver_v | protocol_v << 16) &
9120 (c_rsvd0_ver_m | protocol_m << 16)));
9121 MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_0,
9122 rte_be_to_cpu_32(c_rsvd0_ver_m | protocol_m << 16));
9123 MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1,
9124 rte_be_to_cpu_32(option_v->checksum_rsvd.checksum &
9125 option_m->checksum_rsvd.checksum));
9126 MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_1,
9127 rte_be_to_cpu_32(option_m->checksum_rsvd.checksum));
9128 MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_2,
9129 rte_be_to_cpu_32(option_v->key.key & option_m->key.key));
9130 MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_2,
9131 rte_be_to_cpu_32(option_m->key.key));
9132 MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_3,
9133 rte_be_to_cpu_32(option_v->sequence.sequence &
9134 option_m->sequence.sequence));
9135 MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_3,
9136 rte_be_to_cpu_32(option_m->sequence.sequence));
9137}
9138
9139
9140
9141
9142
9143
9144
9145
9146
9147
9148
9149
9150
9151static void
9152flow_dv_translate_item_nvgre(void *matcher, void *key,
9153 const struct rte_flow_item *item,
9154 unsigned long pattern_flags)
9155{
9156 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
9157 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
9158 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9159 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9160 const char *tni_flow_id_m;
9161 const char *tni_flow_id_v;
9162 char *gre_key_m;
9163 char *gre_key_v;
9164 int size;
9165 int i;
9166
9167
9168 const struct rte_flow_item_gre gre_spec = {
9169 .c_rsvd0_ver = RTE_BE16(0x2000),
9170 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
9171 };
9172 const struct rte_flow_item_gre gre_mask = {
9173 .c_rsvd0_ver = RTE_BE16(0xB000),
9174 .protocol = RTE_BE16(UINT16_MAX),
9175 };
9176 const struct rte_flow_item gre_item = {
9177 .spec = &gre_spec,
9178 .mask = &gre_mask,
9179 .last = NULL,
9180 };
9181 flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
9182 if (!nvgre_v)
9183 return;
9184 if (!nvgre_m)
9185 nvgre_m = &rte_flow_item_nvgre_mask;
9186 tni_flow_id_m = (const char *)nvgre_m->tni;
9187 tni_flow_id_v = (const char *)nvgre_v->tni;
9188 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
9189 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
9190 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
9191 memcpy(gre_key_m, tni_flow_id_m, size);
9192 for (i = 0; i < size; ++i)
9193 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
9194}
9195
9196
9197
9198
9199
9200
9201
9202
9203
9204
9205
9206
9207
9208
9209
9210
9211
9212static void
9213flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
9214 const struct rte_flow_attr *attr,
9215 void *matcher, void *key,
9216 const struct rte_flow_item *item,
9217 int inner)
9218{
9219 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
9220 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
9221 void *headers_m;
9222 void *headers_v;
9223 void *misc5_m;
9224 void *misc5_v;
9225 uint32_t *tunnel_header_v;
9226 uint32_t *tunnel_header_m;
9227 uint16_t dport;
9228 struct mlx5_priv *priv = dev->data->dev_private;
9229 const struct rte_flow_item_vxlan nic_mask = {
9230 .vni = "\xff\xff\xff",
9231 .rsvd1 = 0xff,
9232 };
9233
9234 if (inner) {
9235 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9236 inner_headers);
9237 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9238 } else {
9239 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9240 outer_headers);
9241 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9242 }
9243 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
9244 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
9245 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9246 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9247 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9248 }
9249 dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
9250 if (!vxlan_v)
9251 return;
9252 if (!vxlan_m) {
9253 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
9254 (attr->group && !priv->sh->misc5_cap))
9255 vxlan_m = &rte_flow_item_vxlan_mask;
9256 else
9257 vxlan_m = &nic_mask;
9258 }
9259 if ((priv->sh->steering_format_version ==
9260 MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
9261 dport != MLX5_UDP_PORT_VXLAN) ||
9262 (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
9263 ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
9264 void *misc_m;
9265 void *misc_v;
9266 char *vni_m;
9267 char *vni_v;
9268 int size;
9269 int i;
9270 misc_m = MLX5_ADDR_OF(fte_match_param,
9271 matcher, misc_parameters);
9272 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9273 size = sizeof(vxlan_m->vni);
9274 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
9275 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
9276 memcpy(vni_m, vxlan_m->vni, size);
9277 for (i = 0; i < size; ++i)
9278 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9279 return;
9280 }
9281 misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
9282 misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
9283 tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
9284 misc5_v,
9285 tunnel_header_1);
9286 tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
9287 misc5_m,
9288 tunnel_header_1);
9289 *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
9290 (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
9291 (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
9292 if (*tunnel_header_v)
9293 *tunnel_header_m = vxlan_m->vni[0] |
9294 vxlan_m->vni[1] << 8 |
9295 vxlan_m->vni[2] << 16;
9296 else
9297 *tunnel_header_m = 0x0;
9298 *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
9299 if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
9300 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
9301}
9302
9303
9304
9305
9306
9307
9308
9309
9310
9311
9312
9313
9314
9315
9316static void
9317flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9318 const struct rte_flow_item *item,
9319 const uint64_t pattern_flags)
9320{
9321 static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9322 const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9323 const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9324
9325 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9326 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9327 void *misc_m =
9328 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9329 void *misc_v =
9330 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9331 char *vni_m =
9332 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9333 char *vni_v =
9334 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9335 int i, size = sizeof(vxlan_m->vni);
9336 uint8_t flags_m = 0xff;
9337 uint8_t flags_v = 0xc;
9338 uint8_t m_protocol, v_protocol;
9339
9340 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9341 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9342 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9343 MLX5_UDP_PORT_VXLAN_GPE);
9344 }
9345 if (!vxlan_v) {
9346 vxlan_v = &dummy_vxlan_gpe_hdr;
9347 vxlan_m = &dummy_vxlan_gpe_hdr;
9348 } else {
9349 if (!vxlan_m)
9350 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9351 }
9352 memcpy(vni_m, vxlan_m->vni, size);
9353 for (i = 0; i < size; ++i)
9354 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9355 if (vxlan_m->flags) {
9356 flags_m = vxlan_m->flags;
9357 flags_v = vxlan_v->flags;
9358 }
9359 MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9360 MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9361 m_protocol = vxlan_m->protocol;
9362 v_protocol = vxlan_v->protocol;
9363 if (!m_protocol) {
9364
9365 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9366 v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9367 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9368 v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9369 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9370 v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9371 if (v_protocol)
9372 m_protocol = 0xFF;
9373 }
9374 MLX5_SET(fte_match_set_misc3, misc_m,
9375 outer_vxlan_gpe_next_protocol, m_protocol);
9376 MLX5_SET(fte_match_set_misc3, misc_v,
9377 outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9378}
9379
9380
9381
9382
9383
9384
9385
9386
9387
9388
9389
9390
9391
9392
9393static void
9394flow_dv_translate_item_geneve(void *matcher, void *key,
9395 const struct rte_flow_item *item,
9396 uint64_t pattern_flags)
9397{
9398 static const struct rte_flow_item_geneve empty_geneve = {0,};
9399 const struct rte_flow_item_geneve *geneve_m = item->mask;
9400 const struct rte_flow_item_geneve *geneve_v = item->spec;
9401
9402 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9403 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9404 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9405 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9406 uint16_t gbhdr_m;
9407 uint16_t gbhdr_v;
9408 char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9409 char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9410 size_t size = sizeof(geneve_m->vni), i;
9411 uint16_t protocol_m, protocol_v;
9412
9413 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9414 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9415 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9416 MLX5_UDP_PORT_GENEVE);
9417 }
9418 if (!geneve_v) {
9419 geneve_v = &empty_geneve;
9420 geneve_m = &empty_geneve;
9421 } else {
9422 if (!geneve_m)
9423 geneve_m = &rte_flow_item_geneve_mask;
9424 }
9425 memcpy(vni_m, geneve_m->vni, size);
9426 for (i = 0; i < size; ++i)
9427 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9428 gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9429 gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9430 MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9431 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9432 MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9433 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9434 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9435 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9436 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9437 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9438 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9439 protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9440 protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9441 if (!protocol_m) {
9442
9443 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9444 if (protocol_v)
9445 protocol_m = 0xFFFF;
9446 }
9447 MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9448 MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9449 protocol_m & protocol_v);
9450}
9451
9452
9453
9454
9455
9456
9457
9458
9459
9460
9461
9462
9463
9464
9465
9466
9467
9468int
9469flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9470 const struct rte_flow_item *item,
9471 struct rte_flow_error *error)
9472{
9473 struct mlx5_priv *priv = dev->data->dev_private;
9474 struct mlx5_dev_ctx_shared *sh = priv->sh;
9475 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9476 sh->geneve_tlv_option_resource;
9477 struct mlx5_devx_obj *obj;
9478 const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9479 int ret = 0;
9480
9481 if (!geneve_opt_v)
9482 return -1;
9483 rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9484 if (geneve_opt_resource != NULL) {
9485 if (geneve_opt_resource->option_class ==
9486 geneve_opt_v->option_class &&
9487 geneve_opt_resource->option_type ==
9488 geneve_opt_v->option_type &&
9489 geneve_opt_resource->length ==
9490 geneve_opt_v->option_len) {
9491
9492 __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9493 __ATOMIC_RELAXED);
9494 } else {
9495 ret = rte_flow_error_set(error, ENOMEM,
9496 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9497 "Only one GENEVE TLV option supported");
9498 goto exit;
9499 }
9500 } else {
9501
9502 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9503 geneve_opt_v->option_class,
9504 geneve_opt_v->option_type,
9505 geneve_opt_v->option_len);
9506 if (!obj) {
9507 ret = rte_flow_error_set(error, ENODATA,
9508 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9509 "Failed to create GENEVE TLV Devx object");
9510 goto exit;
9511 }
9512 sh->geneve_tlv_option_resource =
9513 mlx5_malloc(MLX5_MEM_ZERO,
9514 sizeof(*geneve_opt_resource),
9515 0, SOCKET_ID_ANY);
9516 if (!sh->geneve_tlv_option_resource) {
9517 claim_zero(mlx5_devx_cmd_destroy(obj));
9518 ret = rte_flow_error_set(error, ENOMEM,
9519 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9520 "GENEVE TLV object memory allocation failed");
9521 goto exit;
9522 }
9523 geneve_opt_resource = sh->geneve_tlv_option_resource;
9524 geneve_opt_resource->obj = obj;
9525 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9526 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9527 geneve_opt_resource->length = geneve_opt_v->option_len;
9528 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9529 __ATOMIC_RELAXED);
9530 }
9531exit:
9532 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9533 return ret;
9534}
9535
9536
9537
9538
9539
9540
9541
9542
9543
9544
9545
9546
9547
9548
9549
9550static int
9551flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9552 void *key, const struct rte_flow_item *item,
9553 struct rte_flow_error *error)
9554{
9555 const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9556 const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9557 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9558 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9559 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9560 misc_parameters_3);
9561 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9562 rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9563 int ret = 0;
9564
9565 if (!geneve_opt_v)
9566 return -1;
9567 if (!geneve_opt_m)
9568 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9569 ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9570 error);
9571 if (ret) {
9572 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9573 return ret;
9574 }
9575
9576
9577
9578
9579
9580
9581
9582 if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9583 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9584 MLX5_GENEVE_OPTLEN_MASK);
9585 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9586 geneve_opt_v->option_len + 1);
9587 }
9588 MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9589 MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9590
9591 if (geneve_opt_v->data) {
9592 memcpy(&opt_data_key, geneve_opt_v->data,
9593 RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9594 sizeof(opt_data_key)));
9595 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9596 sizeof(opt_data_key));
9597 memcpy(&opt_data_mask, geneve_opt_m->data,
9598 RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9599 sizeof(opt_data_mask)));
9600 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9601 sizeof(opt_data_mask));
9602 MLX5_SET(fte_match_set_misc3, misc3_m,
9603 geneve_tlv_option_0_data,
9604 rte_be_to_cpu_32(opt_data_mask));
9605 MLX5_SET(fte_match_set_misc3, misc3_v,
9606 geneve_tlv_option_0_data,
9607 rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9608 }
9609 return ret;
9610}
9611
9612
9613
9614
9615
9616
9617
9618
9619
9620
9621
9622
9623
9624
9625
9626static void
9627flow_dv_translate_item_mpls(void *matcher, void *key,
9628 const struct rte_flow_item *item,
9629 uint64_t prev_layer,
9630 int inner)
9631{
9632 const uint32_t *in_mpls_m = item->mask;
9633 const uint32_t *in_mpls_v = item->spec;
9634 uint32_t *out_mpls_m = 0;
9635 uint32_t *out_mpls_v = 0;
9636 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9637 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9638 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9639 misc_parameters_2);
9640 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9641 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9642 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9643
9644 switch (prev_layer) {
9645 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9646 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9647 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9648 0xffff);
9649 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9650 MLX5_UDP_PORT_MPLS);
9651 }
9652 break;
9653 case MLX5_FLOW_LAYER_GRE:
9654
9655 case MLX5_FLOW_LAYER_GRE_KEY:
9656 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9657 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9658 0xffff);
9659 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9660 RTE_ETHER_TYPE_MPLS);
9661 }
9662 break;
9663 default:
9664 break;
9665 }
9666 if (!in_mpls_v)
9667 return;
9668 if (!in_mpls_m)
9669 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9670 switch (prev_layer) {
9671 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9672 out_mpls_m =
9673 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9674 outer_first_mpls_over_udp);
9675 out_mpls_v =
9676 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9677 outer_first_mpls_over_udp);
9678 break;
9679 case MLX5_FLOW_LAYER_GRE:
9680 out_mpls_m =
9681 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9682 outer_first_mpls_over_gre);
9683 out_mpls_v =
9684 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9685 outer_first_mpls_over_gre);
9686 break;
9687 default:
9688
9689 if (!inner) {
9690 out_mpls_m =
9691 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9692 misc2_m,
9693 outer_first_mpls);
9694 out_mpls_v =
9695 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9696 misc2_v,
9697 outer_first_mpls);
9698 }
9699 break;
9700 }
9701 if (out_mpls_m && out_mpls_v) {
9702 *out_mpls_m = *in_mpls_m;
9703 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9704 }
9705}
9706
9707
9708
9709
9710
9711
9712
9713
9714
9715
9716
9717
9718
9719
9720
9721static void
9722flow_dv_match_meta_reg(void *matcher, void *key,
9723 enum modify_reg reg_type,
9724 uint32_t data, uint32_t mask)
9725{
9726 void *misc2_m =
9727 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9728 void *misc2_v =
9729 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9730 uint32_t temp;
9731
9732 data &= mask;
9733 switch (reg_type) {
9734 case REG_A:
9735 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9736 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9737 break;
9738 case REG_B:
9739 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9740 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9741 break;
9742 case REG_C_0:
9743
9744
9745
9746
9747
9748 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9749 temp |= mask;
9750 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9751 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9752 temp &= ~mask;
9753 temp |= data;
9754 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9755 break;
9756 case REG_C_1:
9757 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9758 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9759 break;
9760 case REG_C_2:
9761 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9762 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9763 break;
9764 case REG_C_3:
9765 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9766 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9767 break;
9768 case REG_C_4:
9769 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9770 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9771 break;
9772 case REG_C_5:
9773 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9774 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9775 break;
9776 case REG_C_6:
9777 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9778 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9779 break;
9780 case REG_C_7:
9781 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9782 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9783 break;
9784 default:
9785 MLX5_ASSERT(false);
9786 break;
9787 }
9788}
9789
9790
9791
9792
9793
9794
9795
9796
9797
9798
9799
9800
9801
9802static void
9803flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9804 void *matcher, void *key,
9805 const struct rte_flow_item *item)
9806{
9807 struct mlx5_priv *priv = dev->data->dev_private;
9808 const struct rte_flow_item_mark *mark;
9809 uint32_t value;
9810 uint32_t mask;
9811
9812 mark = item->mask ? (const void *)item->mask :
9813 &rte_flow_item_mark_mask;
9814 mask = mark->id & priv->sh->dv_mark_mask;
9815 mark = (const void *)item->spec;
9816 MLX5_ASSERT(mark);
9817 value = mark->id & priv->sh->dv_mark_mask & mask;
9818 if (mask) {
9819 enum modify_reg reg;
9820
9821
9822 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9823 MLX5_ASSERT(reg > 0);
9824 if (reg == REG_C_0) {
9825 struct mlx5_priv *priv = dev->data->dev_private;
9826 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9827 uint32_t shl_c0 = rte_bsf32(msk_c0);
9828
9829 mask &= msk_c0;
9830 mask <<= shl_c0;
9831 value <<= shl_c0;
9832 }
9833 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9834 }
9835}
9836
9837
9838
9839
9840
9841
9842
9843
9844
9845
9846
9847
9848
9849
9850
9851static void
9852flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9853 void *matcher, void *key,
9854 const struct rte_flow_attr *attr,
9855 const struct rte_flow_item *item)
9856{
9857 const struct rte_flow_item_meta *meta_m;
9858 const struct rte_flow_item_meta *meta_v;
9859
9860 meta_m = (const void *)item->mask;
9861 if (!meta_m)
9862 meta_m = &rte_flow_item_meta_mask;
9863 meta_v = (const void *)item->spec;
9864 if (meta_v) {
9865 int reg;
9866 uint32_t value = meta_v->data;
9867 uint32_t mask = meta_m->data;
9868
9869 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9870 if (reg < 0)
9871 return;
9872 MLX5_ASSERT(reg != REG_NON);
9873 if (reg == REG_C_0) {
9874 struct mlx5_priv *priv = dev->data->dev_private;
9875 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9876 uint32_t shl_c0 = rte_bsf32(msk_c0);
9877
9878 mask &= msk_c0;
9879 mask <<= shl_c0;
9880 value <<= shl_c0;
9881 }
9882 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9883 }
9884}
9885
9886
9887
9888
9889
9890
9891
9892
9893
9894
9895
9896static void
9897flow_dv_translate_item_meta_vport(void *matcher, void *key,
9898 uint32_t value, uint32_t mask)
9899{
9900 flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9901}
9902
9903
9904
9905
9906
9907
9908
9909
9910
9911
9912
9913
9914
9915static void
9916flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9917 void *matcher, void *key,
9918 const struct rte_flow_item *item)
9919{
9920 const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9921 const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9922 uint32_t mask, value;
9923
9924 MLX5_ASSERT(tag_v);
9925 value = tag_v->data;
9926 mask = tag_m ? tag_m->data : UINT32_MAX;
9927 if (tag_v->id == REG_C_0) {
9928 struct mlx5_priv *priv = dev->data->dev_private;
9929 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9930 uint32_t shl_c0 = rte_bsf32(msk_c0);
9931
9932 mask &= msk_c0;
9933 mask <<= shl_c0;
9934 value <<= shl_c0;
9935 }
9936 flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9937}
9938
9939
9940
9941
9942
9943
9944
9945
9946
9947
9948
9949
9950
9951static void
9952flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9953 void *matcher, void *key,
9954 const struct rte_flow_item *item)
9955{
9956 const struct rte_flow_item_tag *tag_v = item->spec;
9957 const struct rte_flow_item_tag *tag_m = item->mask;
9958 enum modify_reg reg;
9959
9960 MLX5_ASSERT(tag_v);
9961 tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9962
9963 reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9964 MLX5_ASSERT(reg > 0);
9965 flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9966}
9967
9968
9969
9970
9971
9972
9973
9974
9975
9976
9977
9978
9979
9980static void
9981flow_dv_translate_item_source_vport(void *matcher, void *key,
9982 int16_t port, uint16_t mask)
9983{
9984 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9985 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9986
9987 MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9988 MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9989}
9990
9991
9992
9993
9994
9995
9996
9997
9998
9999
10000
10001
10002
10003
10004
10005
10006
10007
10008static int
10009flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
10010 void *key, const struct rte_flow_item *item,
10011 const struct rte_flow_attr *attr)
10012{
10013 const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
10014 const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
10015 struct mlx5_priv *priv;
10016 uint16_t mask, id;
10017
10018 if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
10019 flow_dv_translate_item_source_vport(matcher, key,
10020 mlx5_flow_get_esw_manager_vport_id(dev), 0xffff);
10021 return 0;
10022 }
10023 mask = pid_m ? pid_m->id : 0xffff;
10024 id = pid_v ? pid_v->id : dev->data->port_id;
10025 priv = mlx5_port_to_eswitch_info(id, item == NULL);
10026 if (!priv)
10027 return -rte_errno;
10028
10029
10030
10031
10032
10033 if (priv->vport_meta_mask) {
10034
10035
10036
10037
10038
10039 if (mask == 0xffff && priv->vport_id == 0xffff &&
10040 priv->pf_bond < 0 && attr->transfer)
10041 flow_dv_translate_item_source_vport
10042 (matcher, key, priv->vport_id, mask);
10043
10044
10045
10046
10047
10048
10049 flow_dv_translate_item_meta_vport(matcher, key,
10050 priv->vport_meta_tag,
10051 priv->vport_meta_mask);
10052 } else {
10053 flow_dv_translate_item_source_vport(matcher, key,
10054 priv->vport_id, mask);
10055 }
10056 return 0;
10057}
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076static int
10077flow_dv_translate_item_represented_port(struct rte_eth_dev *dev, void *matcher,
10078 void *key,
10079 const struct rte_flow_item *item,
10080 const struct rte_flow_attr *attr)
10081{
10082 const struct rte_flow_item_ethdev *pid_m = item ? item->mask : NULL;
10083 const struct rte_flow_item_ethdev *pid_v = item ? item->spec : NULL;
10084 struct mlx5_priv *priv;
10085 uint16_t mask, id;
10086
10087 if (!pid_m && !pid_v)
10088 return 0;
10089 if (pid_v && pid_v->port_id == UINT16_MAX) {
10090 flow_dv_translate_item_source_vport(matcher, key,
10091 mlx5_flow_get_esw_manager_vport_id(dev), UINT16_MAX);
10092 return 0;
10093 }
10094 mask = pid_m ? pid_m->port_id : UINT16_MAX;
10095 id = pid_v ? pid_v->port_id : dev->data->port_id;
10096 priv = mlx5_port_to_eswitch_info(id, item == NULL);
10097 if (!priv)
10098 return -rte_errno;
10099
10100
10101
10102
10103
10104 if (priv->vport_meta_mask) {
10105
10106
10107
10108
10109
10110 if (mask == UINT16_MAX && priv->vport_id == UINT16_MAX &&
10111 priv->pf_bond < 0 && attr->transfer)
10112 flow_dv_translate_item_source_vport
10113 (matcher, key, priv->vport_id, mask);
10114
10115
10116
10117
10118
10119
10120 flow_dv_translate_item_meta_vport(matcher, key,
10121 priv->vport_meta_tag,
10122 priv->vport_meta_mask);
10123 } else {
10124 flow_dv_translate_item_source_vport(matcher, key,
10125 priv->vport_id, mask);
10126 }
10127 return 0;
10128}
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142static void
10143flow_dv_translate_item_icmp6(void *matcher, void *key,
10144 const struct rte_flow_item *item,
10145 int inner)
10146{
10147 const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
10148 const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
10149 void *headers_m;
10150 void *headers_v;
10151 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10152 misc_parameters_3);
10153 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10154 if (inner) {
10155 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10156 inner_headers);
10157 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
10158 } else {
10159 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10160 outer_headers);
10161 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10162 }
10163 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
10164 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
10165 if (!icmp6_v)
10166 return;
10167 if (!icmp6_m)
10168 icmp6_m = &rte_flow_item_icmp6_mask;
10169 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
10170 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
10171 icmp6_v->type & icmp6_m->type);
10172 MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
10173 MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
10174 icmp6_v->code & icmp6_m->code);
10175}
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189static void
10190flow_dv_translate_item_icmp(void *matcher, void *key,
10191 const struct rte_flow_item *item,
10192 int inner)
10193{
10194 const struct rte_flow_item_icmp *icmp_m = item->mask;
10195 const struct rte_flow_item_icmp *icmp_v = item->spec;
10196 uint32_t icmp_header_data_m = 0;
10197 uint32_t icmp_header_data_v = 0;
10198 void *headers_m;
10199 void *headers_v;
10200 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10201 misc_parameters_3);
10202 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10203 if (inner) {
10204 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10205 inner_headers);
10206 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
10207 } else {
10208 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10209 outer_headers);
10210 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10211 }
10212 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
10213 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
10214 if (!icmp_v)
10215 return;
10216 if (!icmp_m)
10217 icmp_m = &rte_flow_item_icmp_mask;
10218 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
10219 icmp_m->hdr.icmp_type);
10220 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
10221 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
10222 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
10223 icmp_m->hdr.icmp_code);
10224 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
10225 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
10226 icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
10227 icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
10228 if (icmp_header_data_m) {
10229 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
10230 icmp_header_data_v |=
10231 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
10232 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
10233 icmp_header_data_m);
10234 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
10235 icmp_header_data_v & icmp_header_data_m);
10236 }
10237}
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251static void
10252flow_dv_translate_item_gtp(void *matcher, void *key,
10253 const struct rte_flow_item *item, int inner)
10254{
10255 const struct rte_flow_item_gtp *gtp_m = item->mask;
10256 const struct rte_flow_item_gtp *gtp_v = item->spec;
10257 void *headers_m;
10258 void *headers_v;
10259 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10260 misc_parameters_3);
10261 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10262 uint16_t dport = RTE_GTPU_UDP_PORT;
10263
10264 if (inner) {
10265 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10266 inner_headers);
10267 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
10268 } else {
10269 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10270 outer_headers);
10271 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10272 }
10273 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
10274 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
10275 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
10276 }
10277 if (!gtp_v)
10278 return;
10279 if (!gtp_m)
10280 gtp_m = &rte_flow_item_gtp_mask;
10281 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
10282 gtp_m->v_pt_rsv_flags);
10283 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
10284 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
10285 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
10286 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
10287 gtp_v->msg_type & gtp_m->msg_type);
10288 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
10289 rte_be_to_cpu_32(gtp_m->teid));
10290 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
10291 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
10292}
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303
10304static int
10305flow_dv_translate_item_gtp_psc(void *matcher, void *key,
10306 const struct rte_flow_item *item)
10307{
10308 const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
10309 const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
10310 void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10311 misc_parameters_3);
10312 void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10313 union {
10314 uint32_t w32;
10315 struct {
10316 uint16_t seq_num;
10317 uint8_t npdu_num;
10318 uint8_t next_ext_header_type;
10319 };
10320 } dw_2;
10321 uint8_t gtp_flags;
10322
10323
10324 gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
10325 gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
10326 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
10327 gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
10328 gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
10329 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
10330
10331 dw_2.seq_num = 0;
10332 dw_2.npdu_num = 0;
10333 dw_2.next_ext_header_type = 0xff;
10334 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
10335 rte_cpu_to_be_32(dw_2.w32));
10336 dw_2.seq_num = 0;
10337 dw_2.npdu_num = 0;
10338 dw_2.next_ext_header_type = 0x85;
10339 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
10340 rte_cpu_to_be_32(dw_2.w32));
10341 if (gtp_psc_v) {
10342 union {
10343 uint32_t w32;
10344 struct {
10345 uint8_t len;
10346 uint8_t type_flags;
10347 uint8_t qfi;
10348 uint8_t reserved;
10349 };
10350 } dw_0;
10351
10352
10353 if (!gtp_psc_m)
10354 gtp_psc_m = &rte_flow_item_gtp_psc_mask;
10355 dw_0.w32 = 0;
10356 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
10357 dw_0.qfi = gtp_psc_m->hdr.qfi;
10358 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
10359 rte_cpu_to_be_32(dw_0.w32));
10360 dw_0.w32 = 0;
10361 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
10362 gtp_psc_m->hdr.type);
10363 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
10364 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
10365 rte_cpu_to_be_32(dw_0.w32));
10366 }
10367 return 0;
10368}
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384static void
10385flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
10386 void *key, const struct rte_flow_item *item,
10387 uint64_t last_item)
10388{
10389 struct mlx5_priv *priv = dev->data->dev_private;
10390 const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10391 const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10392 struct rte_ecpri_common_hdr common;
10393 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10394 misc_parameters_4);
10395 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10396 uint32_t *samples;
10397 void *dw_m;
10398 void *dw_v;
10399
10400
10401
10402
10403
10404 if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10405 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10406
10407 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10408 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10409 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10410 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10411 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10412 *(uint16_t *)l2m = UINT16_MAX;
10413 *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10414 }
10415 }
10416 if (!ecpri_v)
10417 return;
10418 if (!ecpri_m)
10419 ecpri_m = &rte_flow_item_ecpri_mask;
10420
10421
10422
10423
10424
10425
10426
10427 if (!ecpri_m->hdr.common.u32)
10428 return;
10429 samples = priv->sh->ecpri_parser.ids;
10430
10431 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10432 prog_sample_field_value_0);
10433 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10434 prog_sample_field_value_0);
10435
10436 *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10437 *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10438
10439 MLX5_SET(fte_match_set_misc4, misc4_m,
10440 prog_sample_field_id_0, samples[0]);
10441
10442 MLX5_SET(fte_match_set_misc4, misc4_v,
10443 prog_sample_field_id_0, samples[0]);
10444
10445
10446
10447
10448 if (ecpri_m->hdr.dummy[0]) {
10449 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10450 switch (common.type) {
10451 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10452 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10453 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10454 dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10455 prog_sample_field_value_1);
10456 dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10457 prog_sample_field_value_1);
10458 *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10459 *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10460 ecpri_m->hdr.dummy[0];
10461
10462 MLX5_SET(fte_match_set_misc4, misc4_m,
10463 prog_sample_field_id_1, samples[1]);
10464 MLX5_SET(fte_match_set_misc4, misc4_v,
10465 prog_sample_field_id_1, samples[1]);
10466 break;
10467 default:
10468
10469 break;
10470 }
10471 }
10472}
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485
10486static void
10487flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10488 void *matcher, void *key,
10489 const struct rte_flow_item *item)
10490{
10491 uint32_t reg_value = 0;
10492 int reg_id;
10493
10494 uint32_t reg_mask = 0;
10495 const struct rte_flow_item_conntrack *spec = item->spec;
10496 const struct rte_flow_item_conntrack *mask = item->mask;
10497 uint32_t flags;
10498 struct rte_flow_error error;
10499
10500 if (!mask)
10501 mask = &rte_flow_item_conntrack_mask;
10502 if (!spec || !mask->flags)
10503 return;
10504 flags = spec->flags & mask->flags;
10505
10506 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10507 reg_value |= MLX5_CT_SYNDROME_VALID;
10508 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10509 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10510 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10511 reg_value |= MLX5_CT_SYNDROME_INVALID;
10512 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10513 reg_value |= MLX5_CT_SYNDROME_TRAP;
10514 if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10515 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10516 if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10517 RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10518 RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10519 reg_mask |= 0xc0;
10520 if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10521 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10522 if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10523 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10524
10525 reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10526 if (reg_id == REG_NON)
10527 return;
10528 flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10529 reg_value, reg_mask);
10530}
10531
10532static void
10533flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10534 const struct rte_flow_item *item,
10535 struct mlx5_flow *dev_flow, bool is_inner)
10536{
10537 const struct rte_flow_item_flex *spec =
10538 (const struct rte_flow_item_flex *)item->spec;
10539 int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10540
10541 MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10542 if (index < 0)
10543 return;
10544 if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10545
10546 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10547 MLX5_ASSERT(false);
10548 dev_flow->handle->flex_item |= (uint8_t)RTE_BIT32(index);
10549 }
10550 mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10551}
10552
10553static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10554
10555#define HEADER_IS_ZERO(match_criteria, headers) \
10556 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
10557 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568static uint8_t
10569flow_dv_matcher_enable(uint32_t *match_criteria)
10570{
10571 uint8_t match_criteria_enable;
10572
10573 match_criteria_enable =
10574 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10575 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10576 match_criteria_enable |=
10577 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10578 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10579 match_criteria_enable |=
10580 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10581 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10582 match_criteria_enable |=
10583 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10584 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10585 match_criteria_enable |=
10586 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10587 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10588 match_criteria_enable |=
10589 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10590 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10591 match_criteria_enable |=
10592 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10593 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10594 return match_criteria_enable;
10595}
10596
10597static void
10598__flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10599{
10600
10601
10602
10603
10604
10605
10606
10607 if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10608 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10609 MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10610 if (!(match_criteria & (1 <<
10611 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10612 *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10613 }
10614 }
10615}
10616
10617static struct mlx5_list_entry *
10618flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10619 struct mlx5_list_entry *entry, void *cb_ctx)
10620{
10621 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10622 struct mlx5_flow_dv_matcher *ref = ctx->data;
10623 struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10624 typeof(*tbl), tbl);
10625 struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10626 sizeof(*resource),
10627 0, SOCKET_ID_ANY);
10628
10629 if (!resource) {
10630 rte_flow_error_set(ctx->error, ENOMEM,
10631 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10632 "cannot create matcher");
10633 return NULL;
10634 }
10635 memcpy(resource, entry, sizeof(*resource));
10636 resource->tbl = &tbl->tbl;
10637 return &resource->entry;
10638}
10639
10640static void
10641flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10642 struct mlx5_list_entry *entry)
10643{
10644 mlx5_free(entry);
10645}
10646
10647struct mlx5_list_entry *
10648flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10649{
10650 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10651 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10652 struct rte_eth_dev *dev = ctx->dev;
10653 struct mlx5_flow_tbl_data_entry *tbl_data;
10654 struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10655 struct rte_flow_error *error = ctx->error;
10656 union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10657 struct mlx5_flow_tbl_resource *tbl;
10658 void *domain;
10659 uint32_t idx = 0;
10660 int ret;
10661
10662 tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10663 if (!tbl_data) {
10664 rte_flow_error_set(error, ENOMEM,
10665 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10666 NULL,
10667 "cannot allocate flow table data entry");
10668 return NULL;
10669 }
10670 tbl_data->idx = idx;
10671 tbl_data->tunnel = tt_prm->tunnel;
10672 tbl_data->group_id = tt_prm->group_id;
10673 tbl_data->external = !!tt_prm->external;
10674 tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10675 tbl_data->is_egress = !!key.is_egress;
10676 tbl_data->is_transfer = !!key.is_fdb;
10677 tbl_data->dummy = !!key.dummy;
10678 tbl_data->level = key.level;
10679 tbl_data->id = key.id;
10680 tbl = &tbl_data->tbl;
10681 if (key.dummy)
10682 return &tbl_data->entry;
10683 if (key.is_fdb)
10684 domain = sh->fdb_domain;
10685 else if (key.is_egress)
10686 domain = sh->tx_domain;
10687 else
10688 domain = sh->rx_domain;
10689 ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10690 if (ret) {
10691 rte_flow_error_set(error, ENOMEM,
10692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10693 NULL, "cannot create flow table object");
10694 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10695 return NULL;
10696 }
10697 if (key.level != 0) {
10698 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10699 (tbl->obj, &tbl_data->jump.action);
10700 if (ret) {
10701 rte_flow_error_set(error, ENOMEM,
10702 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10703 NULL,
10704 "cannot create flow jump action");
10705 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10706 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10707 return NULL;
10708 }
10709 }
10710 MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10711 key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10712 key.level, key.id);
10713 tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10714 flow_dv_matcher_create_cb,
10715 flow_dv_matcher_match_cb,
10716 flow_dv_matcher_remove_cb,
10717 flow_dv_matcher_clone_cb,
10718 flow_dv_matcher_clone_free_cb);
10719 if (!tbl_data->matchers) {
10720 rte_flow_error_set(error, ENOMEM,
10721 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10722 NULL,
10723 "cannot create tbl matcher list");
10724 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10725 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10726 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10727 return NULL;
10728 }
10729 return &tbl_data->entry;
10730}
10731
10732int
10733flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10734 void *cb_ctx)
10735{
10736 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10737 struct mlx5_flow_tbl_data_entry *tbl_data =
10738 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10739 union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10740
10741 return tbl_data->level != key.level ||
10742 tbl_data->id != key.id ||
10743 tbl_data->dummy != key.dummy ||
10744 tbl_data->is_transfer != !!key.is_fdb ||
10745 tbl_data->is_egress != !!key.is_egress;
10746}
10747
10748struct mlx5_list_entry *
10749flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10750 void *cb_ctx)
10751{
10752 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10753 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10754 struct mlx5_flow_tbl_data_entry *tbl_data;
10755 struct rte_flow_error *error = ctx->error;
10756 uint32_t idx = 0;
10757
10758 tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10759 if (!tbl_data) {
10760 rte_flow_error_set(error, ENOMEM,
10761 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10762 NULL,
10763 "cannot allocate flow table data entry");
10764 return NULL;
10765 }
10766 memcpy(tbl_data, oentry, sizeof(*tbl_data));
10767 tbl_data->idx = idx;
10768 return &tbl_data->entry;
10769}
10770
10771void
10772flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10773{
10774 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10775 struct mlx5_flow_tbl_data_entry *tbl_data =
10776 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10777
10778 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10779}
10780
10781
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800
10801
10802struct mlx5_flow_tbl_resource *
10803flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10804 uint32_t table_level, uint8_t egress,
10805 uint8_t transfer,
10806 bool external,
10807 const struct mlx5_flow_tunnel *tunnel,
10808 uint32_t group_id, uint8_t dummy,
10809 uint32_t table_id,
10810 struct rte_flow_error *error)
10811{
10812 struct mlx5_priv *priv = dev->data->dev_private;
10813 union mlx5_flow_tbl_key table_key = {
10814 {
10815 .level = table_level,
10816 .id = table_id,
10817 .reserved = 0,
10818 .dummy = !!dummy,
10819 .is_fdb = !!transfer,
10820 .is_egress = !!egress,
10821 }
10822 };
10823 struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10824 .tunnel = tunnel,
10825 .group_id = group_id,
10826 .external = external,
10827 };
10828 struct mlx5_flow_cb_ctx ctx = {
10829 .dev = dev,
10830 .error = error,
10831 .data = &table_key.v64,
10832 .data2 = &tt_prm,
10833 };
10834 struct mlx5_list_entry *entry;
10835 struct mlx5_flow_tbl_data_entry *tbl_data;
10836
10837 entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10838 if (!entry) {
10839 rte_flow_error_set(error, ENOMEM,
10840 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10841 "cannot get table");
10842 return NULL;
10843 }
10844 DRV_LOG(DEBUG, "table_level %u table_id %u "
10845 "tunnel %u group %u registered.",
10846 table_level, table_id,
10847 tunnel ? tunnel->tunnel_id : 0, group_id);
10848 tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10849 return &tbl_data->tbl;
10850}
10851
10852void
10853flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10854{
10855 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10856 struct mlx5_flow_tbl_data_entry *tbl_data =
10857 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10858
10859 MLX5_ASSERT(entry && sh);
10860 if (tbl_data->jump.action)
10861 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10862 if (tbl_data->tbl.obj)
10863 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10864 if (tbl_data->tunnel_offload && tbl_data->external) {
10865 struct mlx5_list_entry *he;
10866 struct mlx5_hlist *tunnel_grp_hash;
10867 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10868 union tunnel_tbl_key tunnel_key = {
10869 .tunnel_id = tbl_data->tunnel ?
10870 tbl_data->tunnel->tunnel_id : 0,
10871 .group = tbl_data->group_id
10872 };
10873 uint32_t table_level = tbl_data->level;
10874 struct mlx5_flow_cb_ctx ctx = {
10875 .data = (void *)&tunnel_key.val,
10876 };
10877
10878 tunnel_grp_hash = tbl_data->tunnel ?
10879 tbl_data->tunnel->groups :
10880 thub->groups;
10881 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10882 if (he)
10883 mlx5_hlist_unregister(tunnel_grp_hash, he);
10884 DRV_LOG(DEBUG,
10885 "table_level %u id %u tunnel %u group %u released.",
10886 table_level,
10887 tbl_data->id,
10888 tbl_data->tunnel ?
10889 tbl_data->tunnel->tunnel_id : 0,
10890 tbl_data->group_id);
10891 }
10892 if (tbl_data->matchers)
10893 mlx5_list_destroy(tbl_data->matchers);
10894 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10895}
10896
10897
10898
10899
10900
10901
10902
10903
10904
10905
10906
10907
10908static int
10909flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10910 struct mlx5_flow_tbl_resource *tbl)
10911{
10912 struct mlx5_flow_tbl_data_entry *tbl_data =
10913 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10914
10915 if (!tbl)
10916 return 0;
10917 return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10918}
10919
10920int
10921flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10922 struct mlx5_list_entry *entry, void *cb_ctx)
10923{
10924 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10925 struct mlx5_flow_dv_matcher *ref = ctx->data;
10926 struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10927 entry);
10928
10929 return cur->crc != ref->crc ||
10930 cur->priority != ref->priority ||
10931 memcmp((const void *)cur->mask.buf,
10932 (const void *)ref->mask.buf, ref->mask.size);
10933}
10934
10935struct mlx5_list_entry *
10936flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10937{
10938 struct mlx5_dev_ctx_shared *sh = tool_ctx;
10939 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10940 struct mlx5_flow_dv_matcher *ref = ctx->data;
10941 struct mlx5_flow_dv_matcher *resource;
10942 struct mlx5dv_flow_matcher_attr dv_attr = {
10943 .type = IBV_FLOW_ATTR_NORMAL,
10944 .match_mask = (void *)&ref->mask,
10945 };
10946 struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10947 typeof(*tbl), tbl);
10948 int ret;
10949
10950 resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10951 SOCKET_ID_ANY);
10952 if (!resource) {
10953 rte_flow_error_set(ctx->error, ENOMEM,
10954 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10955 "cannot create matcher");
10956 return NULL;
10957 }
10958 *resource = *ref;
10959 dv_attr.match_criteria_enable =
10960 flow_dv_matcher_enable(resource->mask.buf);
10961 __flow_dv_adjust_buf_size(&ref->mask.size,
10962 dv_attr.match_criteria_enable);
10963 dv_attr.priority = ref->priority;
10964 if (tbl->is_egress)
10965 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10966 ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10967 tbl->tbl.obj,
10968 &resource->matcher_object);
10969 if (ret) {
10970 mlx5_free(resource);
10971 rte_flow_error_set(ctx->error, ENOMEM,
10972 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10973 "cannot create matcher");
10974 return NULL;
10975 }
10976 return &resource->entry;
10977}
10978
10979
10980
10981
10982
10983
10984
10985
10986
10987
10988
10989
10990
10991
10992
10993
10994
10995
10996static int
10997flow_dv_matcher_register(struct rte_eth_dev *dev,
10998 struct mlx5_flow_dv_matcher *ref,
10999 union mlx5_flow_tbl_key *key,
11000 struct mlx5_flow *dev_flow,
11001 const struct mlx5_flow_tunnel *tunnel,
11002 uint32_t group_id,
11003 struct rte_flow_error *error)
11004{
11005 struct mlx5_list_entry *entry;
11006 struct mlx5_flow_dv_matcher *resource;
11007 struct mlx5_flow_tbl_resource *tbl;
11008 struct mlx5_flow_tbl_data_entry *tbl_data;
11009 struct mlx5_flow_cb_ctx ctx = {
11010 .error = error,
11011 .data = ref,
11012 };
11013
11014
11015
11016
11017 tbl = flow_dv_tbl_resource_get(dev, key->level,
11018 key->is_egress, key->is_fdb,
11019 dev_flow->external, tunnel,
11020 group_id, 0, key->id, error);
11021 if (!tbl)
11022 return -rte_errno;
11023 tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
11024 ref->tbl = tbl;
11025 entry = mlx5_list_register(tbl_data->matchers, &ctx);
11026 if (!entry) {
11027 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11028 return rte_flow_error_set(error, ENOMEM,
11029 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11030 "cannot allocate ref memory");
11031 }
11032 resource = container_of(entry, typeof(*resource), entry);
11033 dev_flow->handle->dvh.matcher = resource;
11034 return 0;
11035}
11036
11037struct mlx5_list_entry *
11038flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
11039{
11040 struct mlx5_dev_ctx_shared *sh = tool_ctx;
11041 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11042 struct mlx5_flow_dv_tag_resource *entry;
11043 uint32_t idx = 0;
11044 int ret;
11045
11046 entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
11047 if (!entry) {
11048 rte_flow_error_set(ctx->error, ENOMEM,
11049 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11050 "cannot allocate resource memory");
11051 return NULL;
11052 }
11053 entry->idx = idx;
11054 entry->tag_id = *(uint32_t *)(ctx->data);
11055 ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
11056 &entry->action);
11057 if (ret) {
11058 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
11059 rte_flow_error_set(ctx->error, ENOMEM,
11060 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11061 NULL, "cannot create action");
11062 return NULL;
11063 }
11064 return &entry->entry;
11065}
11066
11067int
11068flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
11069 void *cb_ctx)
11070{
11071 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11072 struct mlx5_flow_dv_tag_resource *tag =
11073 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
11074
11075 return *(uint32_t *)(ctx->data) != tag->tag_id;
11076}
11077
11078struct mlx5_list_entry *
11079flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
11080 void *cb_ctx)
11081{
11082 struct mlx5_dev_ctx_shared *sh = tool_ctx;
11083 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11084 struct mlx5_flow_dv_tag_resource *entry;
11085 uint32_t idx = 0;
11086
11087 entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
11088 if (!entry) {
11089 rte_flow_error_set(ctx->error, ENOMEM,
11090 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11091 "cannot allocate tag resource memory");
11092 return NULL;
11093 }
11094 memcpy(entry, oentry, sizeof(*entry));
11095 entry->idx = idx;
11096 return &entry->entry;
11097}
11098
11099void
11100flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
11101{
11102 struct mlx5_dev_ctx_shared *sh = tool_ctx;
11103 struct mlx5_flow_dv_tag_resource *tag =
11104 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
11105
11106 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
11107}
11108
11109
11110
11111
11112
11113
11114
11115
11116
11117
11118
11119
11120
11121
11122
11123
11124static int
11125flow_dv_tag_resource_register
11126 (struct rte_eth_dev *dev,
11127 uint32_t tag_be24,
11128 struct mlx5_flow *dev_flow,
11129 struct rte_flow_error *error)
11130{
11131 struct mlx5_priv *priv = dev->data->dev_private;
11132 struct mlx5_flow_dv_tag_resource *resource;
11133 struct mlx5_list_entry *entry;
11134 struct mlx5_flow_cb_ctx ctx = {
11135 .error = error,
11136 .data = &tag_be24,
11137 };
11138 struct mlx5_hlist *tag_table;
11139
11140 tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
11141 "tags",
11142 MLX5_TAGS_HLIST_ARRAY_SIZE,
11143 false, false, priv->sh,
11144 flow_dv_tag_create_cb,
11145 flow_dv_tag_match_cb,
11146 flow_dv_tag_remove_cb,
11147 flow_dv_tag_clone_cb,
11148 flow_dv_tag_clone_free_cb,
11149 error);
11150 if (unlikely(!tag_table))
11151 return -rte_errno;
11152 entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
11153 if (entry) {
11154 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
11155 entry);
11156 dev_flow->handle->dvh.rix_tag = resource->idx;
11157 dev_flow->dv.tag_resource = resource;
11158 return 0;
11159 }
11160 return -rte_errno;
11161}
11162
11163void
11164flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
11165{
11166 struct mlx5_dev_ctx_shared *sh = tool_ctx;
11167 struct mlx5_flow_dv_tag_resource *tag =
11168 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
11169
11170 MLX5_ASSERT(tag && sh && tag->action);
11171 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
11172 DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
11173 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
11174}
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184
11185
11186
11187static int
11188flow_dv_tag_release(struct rte_eth_dev *dev,
11189 uint32_t tag_idx)
11190{
11191 struct mlx5_priv *priv = dev->data->dev_private;
11192 struct mlx5_flow_dv_tag_resource *tag;
11193
11194 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
11195 if (!tag)
11196 return 0;
11197 DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
11198 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
11199 return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
11200}
11201
11202
11203
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213
11214
11215
11216
11217static int
11218flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
11219 const struct rte_flow_action *action,
11220 uint32_t *dst_port_id,
11221 struct rte_flow_error *error)
11222{
11223 uint32_t port;
11224 struct mlx5_priv *priv;
11225
11226 switch (action->type) {
11227 case RTE_FLOW_ACTION_TYPE_PORT_ID: {
11228 const struct rte_flow_action_port_id *conf;
11229
11230 conf = (const struct rte_flow_action_port_id *)action->conf;
11231 port = conf->original ? dev->data->port_id : conf->id;
11232 break;
11233 }
11234 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
11235 const struct rte_flow_action_ethdev *ethdev;
11236
11237 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
11238 port = ethdev->port_id;
11239 break;
11240 }
11241 default:
11242 MLX5_ASSERT(false);
11243 return rte_flow_error_set(error, EINVAL,
11244 RTE_FLOW_ERROR_TYPE_ACTION, action,
11245 "unknown E-Switch action");
11246 }
11247
11248 priv = mlx5_port_to_eswitch_info(port, false);
11249 if (!priv)
11250 return rte_flow_error_set(error, -rte_errno,
11251 RTE_FLOW_ERROR_TYPE_ACTION,
11252 NULL,
11253 "No eswitch info was found for port");
11254#ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
11255
11256
11257
11258
11259 *dst_port_id = priv->dev_port;
11260#else
11261
11262
11263
11264
11265
11266 *dst_port_id = priv->vport_id;
11267#endif
11268 return 0;
11269}
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284
11285
11286static uint32_t
11287flow_dv_translate_create_counter(struct rte_eth_dev *dev,
11288 struct mlx5_flow *dev_flow,
11289 const struct rte_flow_action_count *count
11290 __rte_unused,
11291 const struct rte_flow_action_age *age)
11292{
11293 uint32_t counter;
11294 struct mlx5_age_param *age_param;
11295
11296 counter = flow_dv_counter_alloc(dev, !!age);
11297 if (!counter || age == NULL)
11298 return counter;
11299 age_param = flow_dv_counter_idx_get_age(dev, counter);
11300 age_param->context = age->context ? age->context :
11301 (void *)(uintptr_t)(dev_flow->flow_idx);
11302 age_param->timeout = age->timeout;
11303 age_param->port_id = dev->data->port_id;
11304 __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
11305 __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
11306 return counter;
11307}
11308
11309
11310
11311
11312
11313
11314
11315
11316
11317
11318
11319
11320
11321
11322
11323static void
11324flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
11325 void *matcher, void *key,
11326 const struct rte_flow_item *item)
11327{
11328 const struct mlx5_rte_flow_item_tx_queue *queue_m;
11329 const struct mlx5_rte_flow_item_tx_queue *queue_v;
11330 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
11331 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
11332 struct mlx5_txq_ctrl *txq;
11333 uint32_t queue, mask;
11334
11335 queue_m = (const void *)item->mask;
11336 queue_v = (const void *)item->spec;
11337 if (!queue_v)
11338 return;
11339 txq = mlx5_txq_get(dev, queue_v->queue);
11340 if (!txq)
11341 return;
11342 if (txq->is_hairpin)
11343 queue = txq->obj->sq->id;
11344 else
11345 queue = txq->obj->sq_obj.sq->id;
11346 mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
11347 MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
11348 MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
11349 mlx5_txq_release(dev, queue_v->queue);
11350}
11351
11352
11353
11354
11355
11356
11357
11358
11359
11360
11361
11362void
11363flow_dv_hashfields_set(uint64_t item_flags,
11364 struct mlx5_flow_rss_desc *rss_desc,
11365 uint64_t *hash_fields)
11366{
11367 uint64_t items = item_flags;
11368 uint64_t fields = 0;
11369 int rss_inner = 0;
11370 uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
11371
11372 *hash_fields = 0;
11373#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
11374 if (rss_desc->level >= 2)
11375 rss_inner = 1;
11376#endif
11377 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
11378 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||
11379 !items) {
11380 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
11381 if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11382 fields |= IBV_RX_HASH_SRC_IPV4;
11383 else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11384 fields |= IBV_RX_HASH_DST_IPV4;
11385 else
11386 fields |= MLX5_IPV4_IBV_RX_HASH;
11387 }
11388 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
11389 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||
11390 !items) {
11391 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
11392 if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11393 fields |= IBV_RX_HASH_SRC_IPV6;
11394 else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11395 fields |= IBV_RX_HASH_DST_IPV6;
11396 else
11397 fields |= MLX5_IPV6_IBV_RX_HASH;
11398 }
11399 }
11400 if (items & MLX5_FLOW_ITEM_ESP) {
11401 if (rss_types & RTE_ETH_RSS_ESP)
11402 fields |= IBV_RX_HASH_IPSEC_SPI;
11403 }
11404 if ((fields & ~IBV_RX_HASH_IPSEC_SPI) == 0) {
11405 *hash_fields = fields;
11406
11407
11408
11409
11410 return;
11411 }
11412 if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11413 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
11414 !items) {
11415 if (rss_types & RTE_ETH_RSS_UDP) {
11416 if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11417 fields |= IBV_RX_HASH_SRC_PORT_UDP;
11418 else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11419 fields |= IBV_RX_HASH_DST_PORT_UDP;
11420 else
11421 fields |= MLX5_UDP_IBV_RX_HASH;
11422 }
11423 } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11424 (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||
11425 !items) {
11426 if (rss_types & RTE_ETH_RSS_TCP) {
11427 if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11428 fields |= IBV_RX_HASH_SRC_PORT_TCP;
11429 else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11430 fields |= IBV_RX_HASH_DST_PORT_TCP;
11431 else
11432 fields |= MLX5_TCP_IBV_RX_HASH;
11433 }
11434 }
11435 if (rss_inner)
11436 fields |= IBV_RX_HASH_INNER;
11437 *hash_fields = fields;
11438}
11439
11440
11441
11442
11443
11444
11445
11446
11447
11448
11449
11450
11451
11452
11453
11454
11455static struct mlx5_hrxq *
11456flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11457 struct mlx5_flow *dev_flow,
11458 struct mlx5_flow_rss_desc *rss_desc,
11459 uint32_t *hrxq_idx)
11460{
11461 struct mlx5_flow_handle *dh = dev_flow->handle;
11462 uint32_t shared_rss = rss_desc->shared_rss;
11463 struct mlx5_hrxq *hrxq;
11464
11465 MLX5_ASSERT(rss_desc->queue_num);
11466 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11467 rss_desc->hash_fields = dev_flow->hash_fields;
11468 rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11469 rss_desc->shared_rss = 0;
11470 if (rss_desc->hash_fields == 0)
11471 rss_desc->queue_num = 1;
11472 hrxq = mlx5_hrxq_get(dev, rss_desc);
11473 *hrxq_idx = hrxq ? hrxq->idx : 0;
11474 rss_desc->shared_rss = shared_rss;
11475 return hrxq;
11476}
11477
11478
11479
11480
11481
11482
11483
11484
11485
11486static void
11487flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11488 struct mlx5_flow_sub_actions_idx *act_res)
11489{
11490 if (act_res->rix_hrxq) {
11491 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11492 act_res->rix_hrxq = 0;
11493 }
11494 if (act_res->rix_encap_decap) {
11495 flow_dv_encap_decap_resource_release(dev,
11496 act_res->rix_encap_decap);
11497 act_res->rix_encap_decap = 0;
11498 }
11499 if (act_res->rix_port_id_action) {
11500 flow_dv_port_id_action_resource_release(dev,
11501 act_res->rix_port_id_action);
11502 act_res->rix_port_id_action = 0;
11503 }
11504 if (act_res->rix_tag) {
11505 flow_dv_tag_release(dev, act_res->rix_tag);
11506 act_res->rix_tag = 0;
11507 }
11508 if (act_res->rix_jump) {
11509 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11510 act_res->rix_jump = 0;
11511 }
11512}
11513
11514int
11515flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11516 struct mlx5_list_entry *entry, void *cb_ctx)
11517{
11518 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11519 struct rte_eth_dev *dev = ctx->dev;
11520 struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11521 struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11522 typeof(*resource),
11523 entry);
11524
11525 if (ctx_resource->ratio == resource->ratio &&
11526 ctx_resource->ft_type == resource->ft_type &&
11527 ctx_resource->ft_id == resource->ft_id &&
11528 ctx_resource->set_action == resource->set_action &&
11529 !memcmp((void *)&ctx_resource->sample_act,
11530 (void *)&resource->sample_act,
11531 sizeof(struct mlx5_flow_sub_actions_list))) {
11532
11533
11534
11535
11536 flow_dv_sample_sub_actions_release(dev,
11537 &ctx_resource->sample_idx);
11538 return 0;
11539 }
11540 return 1;
11541}
11542
11543struct mlx5_list_entry *
11544flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11545{
11546 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11547 struct rte_eth_dev *dev = ctx->dev;
11548 struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11549 void **sample_dv_actions = ctx_resource->sub_actions;
11550 struct mlx5_flow_dv_sample_resource *resource;
11551 struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11552 struct mlx5_priv *priv = dev->data->dev_private;
11553 struct mlx5_dev_ctx_shared *sh = priv->sh;
11554 struct mlx5_flow_tbl_resource *tbl;
11555 uint32_t idx = 0;
11556 const uint32_t next_ft_step = 1;
11557 uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11558 uint8_t is_egress = 0;
11559 uint8_t is_transfer = 0;
11560 struct rte_flow_error *error = ctx->error;
11561
11562
11563 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11564 if (!resource) {
11565 rte_flow_error_set(error, ENOMEM,
11566 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11567 NULL,
11568 "cannot allocate resource memory");
11569 return NULL;
11570 }
11571 *resource = *ctx_resource;
11572
11573 if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11574 is_transfer = 1;
11575 else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11576 is_egress = 1;
11577 tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11578 is_egress, is_transfer,
11579 true, NULL, 0, 0, 0, error);
11580 if (!tbl) {
11581 rte_flow_error_set(error, ENOMEM,
11582 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11583 NULL,
11584 "fail to create normal path table "
11585 "for sample");
11586 goto error;
11587 }
11588 resource->normal_path_tbl = tbl;
11589 if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11590 if (!sh->default_miss_action) {
11591 rte_flow_error_set(error, ENOMEM,
11592 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11593 NULL,
11594 "default miss action was not "
11595 "created");
11596 goto error;
11597 }
11598 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11599 sh->default_miss_action;
11600 }
11601
11602 sampler_attr.sample_ratio = resource->ratio;
11603 sampler_attr.default_next_table = tbl->obj;
11604 sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11605 sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11606 &sample_dv_actions[0];
11607 sampler_attr.action = resource->set_action;
11608 if (mlx5_os_flow_dr_create_flow_action_sampler
11609 (&sampler_attr, &resource->verbs_action)) {
11610 rte_flow_error_set(error, ENOMEM,
11611 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11612 NULL, "cannot create sample action");
11613 goto error;
11614 }
11615 resource->idx = idx;
11616 resource->dev = dev;
11617 return &resource->entry;
11618error:
11619 if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11620 flow_dv_sample_sub_actions_release(dev,
11621 &resource->sample_idx);
11622 if (resource->normal_path_tbl)
11623 flow_dv_tbl_resource_release(MLX5_SH(dev),
11624 resource->normal_path_tbl);
11625 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11626 return NULL;
11627
11628}
11629
11630struct mlx5_list_entry *
11631flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11632 struct mlx5_list_entry *entry __rte_unused,
11633 void *cb_ctx)
11634{
11635 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11636 struct rte_eth_dev *dev = ctx->dev;
11637 struct mlx5_flow_dv_sample_resource *resource;
11638 struct mlx5_priv *priv = dev->data->dev_private;
11639 struct mlx5_dev_ctx_shared *sh = priv->sh;
11640 uint32_t idx = 0;
11641
11642 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11643 if (!resource) {
11644 rte_flow_error_set(ctx->error, ENOMEM,
11645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11646 NULL,
11647 "cannot allocate resource memory");
11648 return NULL;
11649 }
11650 memcpy(resource, entry, sizeof(*resource));
11651 resource->idx = idx;
11652 resource->dev = dev;
11653 return &resource->entry;
11654}
11655
11656void
11657flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11658 struct mlx5_list_entry *entry)
11659{
11660 struct mlx5_flow_dv_sample_resource *resource =
11661 container_of(entry, typeof(*resource), entry);
11662 struct rte_eth_dev *dev = resource->dev;
11663 struct mlx5_priv *priv = dev->data->dev_private;
11664
11665 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11666}
11667
11668
11669
11670
11671
11672
11673
11674
11675
11676
11677
11678
11679
11680
11681
11682
11683static int
11684flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11685 struct mlx5_flow_dv_sample_resource *ref,
11686 struct mlx5_flow *dev_flow,
11687 struct rte_flow_error *error)
11688{
11689 struct mlx5_flow_dv_sample_resource *resource;
11690 struct mlx5_list_entry *entry;
11691 struct mlx5_priv *priv = dev->data->dev_private;
11692 struct mlx5_flow_cb_ctx ctx = {
11693 .dev = dev,
11694 .error = error,
11695 .data = ref,
11696 };
11697
11698 entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11699 if (!entry)
11700 return -rte_errno;
11701 resource = container_of(entry, typeof(*resource), entry);
11702 dev_flow->handle->dvh.rix_sample = resource->idx;
11703 dev_flow->dv.sample_res = resource;
11704 return 0;
11705}
11706
11707int
11708flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11709 struct mlx5_list_entry *entry, void *cb_ctx)
11710{
11711 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11712 struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11713 struct rte_eth_dev *dev = ctx->dev;
11714 struct mlx5_flow_dv_dest_array_resource *resource =
11715 container_of(entry, typeof(*resource), entry);
11716 uint32_t idx = 0;
11717
11718 if (ctx_resource->num_of_dest == resource->num_of_dest &&
11719 ctx_resource->ft_type == resource->ft_type &&
11720 !memcmp((void *)resource->sample_act,
11721 (void *)ctx_resource->sample_act,
11722 (ctx_resource->num_of_dest *
11723 sizeof(struct mlx5_flow_sub_actions_list)))) {
11724
11725
11726
11727
11728 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11729 flow_dv_sample_sub_actions_release(dev,
11730 &ctx_resource->sample_idx[idx]);
11731 return 0;
11732 }
11733 return 1;
11734}
11735
11736struct mlx5_list_entry *
11737flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11738{
11739 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11740 struct rte_eth_dev *dev = ctx->dev;
11741 struct mlx5_flow_dv_dest_array_resource *resource;
11742 struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11743 struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11744 struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11745 struct mlx5_priv *priv = dev->data->dev_private;
11746 struct mlx5_dev_ctx_shared *sh = priv->sh;
11747 struct mlx5_flow_sub_actions_list *sample_act;
11748 struct mlx5dv_dr_domain *domain;
11749 uint32_t idx = 0, res_idx = 0;
11750 struct rte_flow_error *error = ctx->error;
11751 uint64_t action_flags;
11752 int ret;
11753
11754
11755 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11756 &res_idx);
11757 if (!resource) {
11758 rte_flow_error_set(error, ENOMEM,
11759 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11760 NULL,
11761 "cannot allocate resource memory");
11762 return NULL;
11763 }
11764 *resource = *ctx_resource;
11765 if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11766 domain = sh->fdb_domain;
11767 else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11768 domain = sh->rx_domain;
11769 else
11770 domain = sh->tx_domain;
11771 for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11772 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11773 mlx5_malloc(MLX5_MEM_ZERO,
11774 sizeof(struct mlx5dv_dr_action_dest_attr),
11775 0, SOCKET_ID_ANY);
11776 if (!dest_attr[idx]) {
11777 rte_flow_error_set(error, ENOMEM,
11778 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11779 NULL,
11780 "cannot allocate resource memory");
11781 goto error;
11782 }
11783 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11784 sample_act = &ctx_resource->sample_act[idx];
11785 action_flags = sample_act->action_flags;
11786 switch (action_flags) {
11787 case MLX5_FLOW_ACTION_QUEUE:
11788 dest_attr[idx]->dest = sample_act->dr_queue_action;
11789 break;
11790 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11791 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11792 dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11793 dest_attr[idx]->dest_reformat->reformat =
11794 sample_act->dr_encap_action;
11795 dest_attr[idx]->dest_reformat->dest =
11796 sample_act->dr_port_id_action;
11797 break;
11798 case MLX5_FLOW_ACTION_PORT_ID:
11799 dest_attr[idx]->dest = sample_act->dr_port_id_action;
11800 break;
11801 case MLX5_FLOW_ACTION_JUMP:
11802 dest_attr[idx]->dest = sample_act->dr_jump_action;
11803 break;
11804 default:
11805 rte_flow_error_set(error, EINVAL,
11806 RTE_FLOW_ERROR_TYPE_ACTION,
11807 NULL,
11808 "unsupported actions type");
11809 goto error;
11810 }
11811 }
11812
11813 ret = mlx5_os_flow_dr_create_flow_action_dest_array
11814 (domain,
11815 resource->num_of_dest,
11816 dest_attr,
11817 &resource->action);
11818 if (ret) {
11819 rte_flow_error_set(error, ENOMEM,
11820 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11821 NULL,
11822 "cannot create destination array action");
11823 goto error;
11824 }
11825 resource->idx = res_idx;
11826 resource->dev = dev;
11827 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11828 mlx5_free(dest_attr[idx]);
11829 return &resource->entry;
11830error:
11831 for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11832 flow_dv_sample_sub_actions_release(dev,
11833 &resource->sample_idx[idx]);
11834 if (dest_attr[idx])
11835 mlx5_free(dest_attr[idx]);
11836 }
11837 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11838 return NULL;
11839}
11840
11841struct mlx5_list_entry *
11842flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11843 struct mlx5_list_entry *entry __rte_unused,
11844 void *cb_ctx)
11845{
11846 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11847 struct rte_eth_dev *dev = ctx->dev;
11848 struct mlx5_flow_dv_dest_array_resource *resource;
11849 struct mlx5_priv *priv = dev->data->dev_private;
11850 struct mlx5_dev_ctx_shared *sh = priv->sh;
11851 uint32_t res_idx = 0;
11852 struct rte_flow_error *error = ctx->error;
11853
11854 resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11855 &res_idx);
11856 if (!resource) {
11857 rte_flow_error_set(error, ENOMEM,
11858 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11859 NULL,
11860 "cannot allocate dest-array memory");
11861 return NULL;
11862 }
11863 memcpy(resource, entry, sizeof(*resource));
11864 resource->idx = res_idx;
11865 resource->dev = dev;
11866 return &resource->entry;
11867}
11868
11869void
11870flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11871 struct mlx5_list_entry *entry)
11872{
11873 struct mlx5_flow_dv_dest_array_resource *resource =
11874 container_of(entry, typeof(*resource), entry);
11875 struct rte_eth_dev *dev = resource->dev;
11876 struct mlx5_priv *priv = dev->data->dev_private;
11877
11878 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11879}
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896static int
11897flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11898 struct mlx5_flow_dv_dest_array_resource *ref,
11899 struct mlx5_flow *dev_flow,
11900 struct rte_flow_error *error)
11901{
11902 struct mlx5_flow_dv_dest_array_resource *resource;
11903 struct mlx5_priv *priv = dev->data->dev_private;
11904 struct mlx5_list_entry *entry;
11905 struct mlx5_flow_cb_ctx ctx = {
11906 .dev = dev,
11907 .error = error,
11908 .data = ref,
11909 };
11910
11911 entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11912 if (!entry)
11913 return -rte_errno;
11914 resource = container_of(entry, typeof(*resource), entry);
11915 dev_flow->handle->dvh.rix_dest_array = resource->idx;
11916 dev_flow->dv.dest_array_res = resource;
11917 return 0;
11918}
11919
11920
11921
11922
11923
11924
11925
11926
11927
11928
11929
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939
11940
11941
11942
11943static int
11944flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11945 const struct rte_flow_action_sample *action,
11946 struct mlx5_flow *dev_flow,
11947 const struct rte_flow_attr *attr,
11948 uint32_t *num_of_dest,
11949 void **sample_actions,
11950 struct mlx5_flow_dv_sample_resource *res,
11951 struct rte_flow_error *error)
11952{
11953 struct mlx5_priv *priv = dev->data->dev_private;
11954 const struct rte_flow_action *sub_actions;
11955 struct mlx5_flow_sub_actions_list *sample_act;
11956 struct mlx5_flow_sub_actions_idx *sample_idx;
11957 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11958 struct rte_flow *flow = dev_flow->flow;
11959 struct mlx5_flow_rss_desc *rss_desc;
11960 uint64_t action_flags = 0;
11961
11962 MLX5_ASSERT(wks);
11963 rss_desc = &wks->rss_desc;
11964 sample_act = &res->sample_act;
11965 sample_idx = &res->sample_idx;
11966 res->ratio = action->ratio;
11967 sub_actions = action->actions;
11968 for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11969 int type = sub_actions->type;
11970 uint32_t pre_rix = 0;
11971 void *pre_r;
11972 switch (type) {
11973 case RTE_FLOW_ACTION_TYPE_QUEUE:
11974 {
11975 const struct rte_flow_action_queue *queue;
11976 struct mlx5_hrxq *hrxq;
11977 uint32_t hrxq_idx;
11978
11979 queue = sub_actions->conf;
11980 rss_desc->queue_num = 1;
11981 rss_desc->queue[0] = queue->index;
11982 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11983 rss_desc, &hrxq_idx);
11984 if (!hrxq)
11985 return rte_flow_error_set
11986 (error, rte_errno,
11987 RTE_FLOW_ERROR_TYPE_ACTION,
11988 NULL,
11989 "cannot create fate queue");
11990 sample_act->dr_queue_action = hrxq->action;
11991 sample_idx->rix_hrxq = hrxq_idx;
11992 sample_actions[sample_act->actions_num++] =
11993 hrxq->action;
11994 (*num_of_dest)++;
11995 action_flags |= MLX5_FLOW_ACTION_QUEUE;
11996 if (action_flags & MLX5_FLOW_ACTION_MARK)
11997 dev_flow->handle->rix_hrxq = hrxq_idx;
11998 dev_flow->handle->fate_action =
11999 MLX5_FLOW_FATE_QUEUE;
12000 break;
12001 }
12002 case RTE_FLOW_ACTION_TYPE_RSS:
12003 {
12004 struct mlx5_hrxq *hrxq;
12005 uint32_t hrxq_idx;
12006 const struct rte_flow_action_rss *rss;
12007 const uint8_t *rss_key;
12008
12009 rss = sub_actions->conf;
12010 memcpy(rss_desc->queue, rss->queue,
12011 rss->queue_num * sizeof(uint16_t));
12012 rss_desc->queue_num = rss->queue_num;
12013
12014 rss_key = !rss->key ? rss_hash_default_key : rss->key;
12015 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12016
12017
12018
12019
12020 flow_dv_hashfields_set(dev_flow->handle->layers,
12021 rss_desc,
12022 &dev_flow->hash_fields);
12023 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
12024 rss_desc, &hrxq_idx);
12025 if (!hrxq)
12026 return rte_flow_error_set
12027 (error, rte_errno,
12028 RTE_FLOW_ERROR_TYPE_ACTION,
12029 NULL,
12030 "cannot create fate queue");
12031 sample_act->dr_queue_action = hrxq->action;
12032 sample_idx->rix_hrxq = hrxq_idx;
12033 sample_actions[sample_act->actions_num++] =
12034 hrxq->action;
12035 (*num_of_dest)++;
12036 action_flags |= MLX5_FLOW_ACTION_RSS;
12037 if (action_flags & MLX5_FLOW_ACTION_MARK)
12038 dev_flow->handle->rix_hrxq = hrxq_idx;
12039 dev_flow->handle->fate_action =
12040 MLX5_FLOW_FATE_QUEUE;
12041 break;
12042 }
12043 case RTE_FLOW_ACTION_TYPE_MARK:
12044 {
12045 uint32_t tag_be = mlx5_flow_mark_set
12046 (((const struct rte_flow_action_mark *)
12047 (sub_actions->conf))->id);
12048
12049 wks->mark = 1;
12050 pre_rix = dev_flow->handle->dvh.rix_tag;
12051
12052 pre_r = dev_flow->dv.tag_resource;
12053 if (flow_dv_tag_resource_register(dev, tag_be,
12054 dev_flow, error))
12055 return -rte_errno;
12056 MLX5_ASSERT(dev_flow->dv.tag_resource);
12057 sample_act->dr_tag_action =
12058 dev_flow->dv.tag_resource->action;
12059 sample_idx->rix_tag =
12060 dev_flow->handle->dvh.rix_tag;
12061 sample_actions[sample_act->actions_num++] =
12062 sample_act->dr_tag_action;
12063
12064 dev_flow->dv.tag_resource = pre_r;
12065 dev_flow->handle->dvh.rix_tag = pre_rix;
12066 action_flags |= MLX5_FLOW_ACTION_MARK;
12067 break;
12068 }
12069 case RTE_FLOW_ACTION_TYPE_COUNT:
12070 {
12071 if (!flow->counter) {
12072 flow->counter =
12073 flow_dv_translate_create_counter(dev,
12074 dev_flow, sub_actions->conf,
12075 0);
12076 if (!flow->counter)
12077 return rte_flow_error_set
12078 (error, rte_errno,
12079 RTE_FLOW_ERROR_TYPE_ACTION,
12080 NULL,
12081 "cannot create counter"
12082 " object.");
12083 }
12084 sample_act->dr_cnt_action =
12085 (flow_dv_counter_get_by_idx(dev,
12086 flow->counter, NULL))->action;
12087 sample_actions[sample_act->actions_num++] =
12088 sample_act->dr_cnt_action;
12089 action_flags |= MLX5_FLOW_ACTION_COUNT;
12090 break;
12091 }
12092 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12093 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12094 {
12095 struct mlx5_flow_dv_port_id_action_resource
12096 port_id_resource;
12097 uint32_t port_id = 0;
12098
12099 memset(&port_id_resource, 0, sizeof(port_id_resource));
12100
12101 pre_rix = dev_flow->handle->rix_port_id_action;
12102 pre_r = dev_flow->dv.port_id_action;
12103 if (flow_dv_translate_action_port_id(dev, sub_actions,
12104 &port_id, error))
12105 return -rte_errno;
12106 port_id_resource.port_id = port_id;
12107 if (flow_dv_port_id_action_resource_register
12108 (dev, &port_id_resource, dev_flow, error))
12109 return -rte_errno;
12110 sample_act->dr_port_id_action =
12111 dev_flow->dv.port_id_action->action;
12112 sample_idx->rix_port_id_action =
12113 dev_flow->handle->rix_port_id_action;
12114 sample_actions[sample_act->actions_num++] =
12115 sample_act->dr_port_id_action;
12116
12117 dev_flow->dv.port_id_action = pre_r;
12118 dev_flow->handle->rix_port_id_action = pre_rix;
12119 (*num_of_dest)++;
12120 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12121 break;
12122 }
12123 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12124 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12125 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12126
12127 pre_rix = dev_flow->handle->dvh.rix_encap_decap;
12128 pre_r = dev_flow->dv.encap_decap;
12129 if (flow_dv_create_action_l2_encap(dev, sub_actions,
12130 dev_flow,
12131 attr->transfer,
12132 error))
12133 return -rte_errno;
12134 sample_act->dr_encap_action =
12135 dev_flow->dv.encap_decap->action;
12136 sample_idx->rix_encap_decap =
12137 dev_flow->handle->dvh.rix_encap_decap;
12138 sample_actions[sample_act->actions_num++] =
12139 sample_act->dr_encap_action;
12140
12141 dev_flow->dv.encap_decap = pre_r;
12142 dev_flow->handle->dvh.rix_encap_decap = pre_rix;
12143 action_flags |= MLX5_FLOW_ACTION_ENCAP;
12144 break;
12145 default:
12146 return rte_flow_error_set(error, EINVAL,
12147 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12148 NULL,
12149 "Not support for sampler action");
12150 }
12151 }
12152 sample_act->action_flags = action_flags;
12153 res->ft_id = dev_flow->dv.group;
12154 if (attr->transfer) {
12155 union {
12156 uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
12157 uint64_t set_action;
12158 } action_ctx = { .set_action = 0 };
12159
12160 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12161 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
12162 MLX5_MODIFICATION_TYPE_SET);
12163 MLX5_SET(set_action_in, action_ctx.action_in, field,
12164 MLX5_MODI_META_REG_C_0);
12165 MLX5_SET(set_action_in, action_ctx.action_in, data,
12166 priv->vport_meta_tag);
12167 res->set_action = action_ctx.set_action;
12168 } else if (attr->ingress) {
12169 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12170 } else {
12171 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
12172 }
12173 return 0;
12174}
12175
12176
12177
12178
12179
12180
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199static int
12200flow_dv_create_action_sample(struct rte_eth_dev *dev,
12201 struct mlx5_flow *dev_flow,
12202 uint32_t num_of_dest,
12203 struct mlx5_flow_dv_sample_resource *res,
12204 struct mlx5_flow_dv_dest_array_resource *mdest_res,
12205 void **sample_actions,
12206 uint64_t action_flags,
12207 struct rte_flow_error *error)
12208{
12209
12210 uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
12211 struct mlx5_flow_sub_actions_list *sample_act =
12212 &mdest_res->sample_act[dest_index];
12213 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12214 struct mlx5_flow_rss_desc *rss_desc;
12215 uint32_t normal_idx = 0;
12216 struct mlx5_hrxq *hrxq;
12217 uint32_t hrxq_idx;
12218
12219 MLX5_ASSERT(wks);
12220 rss_desc = &wks->rss_desc;
12221 if (num_of_dest > 1) {
12222 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
12223
12224 hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
12225 rss_desc, &hrxq_idx);
12226 if (!hrxq)
12227 return rte_flow_error_set
12228 (error, rte_errno,
12229 RTE_FLOW_ERROR_TYPE_ACTION,
12230 NULL,
12231 "cannot create rx queue");
12232 normal_idx++;
12233 mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
12234 sample_act->dr_queue_action = hrxq->action;
12235 if (action_flags & MLX5_FLOW_ACTION_MARK)
12236 dev_flow->handle->rix_hrxq = hrxq_idx;
12237 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12238 }
12239 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
12240 normal_idx++;
12241 mdest_res->sample_idx[dest_index].rix_encap_decap =
12242 dev_flow->handle->dvh.rix_encap_decap;
12243 sample_act->dr_encap_action =
12244 dev_flow->dv.encap_decap->action;
12245 dev_flow->handle->dvh.rix_encap_decap = 0;
12246 }
12247 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
12248 normal_idx++;
12249 mdest_res->sample_idx[dest_index].rix_port_id_action =
12250 dev_flow->handle->rix_port_id_action;
12251 sample_act->dr_port_id_action =
12252 dev_flow->dv.port_id_action->action;
12253 dev_flow->handle->rix_port_id_action = 0;
12254 }
12255 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
12256 normal_idx++;
12257 mdest_res->sample_idx[dest_index].rix_jump =
12258 dev_flow->handle->rix_jump;
12259 sample_act->dr_jump_action =
12260 dev_flow->dv.jump->action;
12261 dev_flow->handle->rix_jump = 0;
12262 }
12263 sample_act->actions_num = normal_idx;
12264
12265 mdest_res->ft_type = res->ft_type;
12266 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
12267 sizeof(struct mlx5_flow_sub_actions_idx));
12268 memcpy(&mdest_res->sample_act[0], &res->sample_act,
12269 sizeof(struct mlx5_flow_sub_actions_list));
12270 mdest_res->num_of_dest = num_of_dest;
12271 if (flow_dv_dest_array_resource_register(dev, mdest_res,
12272 dev_flow, error))
12273 return rte_flow_error_set(error, EINVAL,
12274 RTE_FLOW_ERROR_TYPE_ACTION,
12275 NULL, "can't create sample "
12276 "action");
12277 } else {
12278 res->sub_actions = sample_actions;
12279 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
12280 return rte_flow_error_set(error, EINVAL,
12281 RTE_FLOW_ERROR_TYPE_ACTION,
12282 NULL,
12283 "can't create sample action");
12284 }
12285 return 0;
12286}
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296static void
12297flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
12298 struct mlx5_aso_age_action *age)
12299{
12300 struct mlx5_age_info *age_info;
12301 struct mlx5_age_param *age_param = &age->age_params;
12302 struct mlx5_priv *priv = dev->data->dev_private;
12303 uint16_t expected = AGE_CANDIDATE;
12304
12305 age_info = GET_PORT_AGE_INFO(priv);
12306 if (!__atomic_compare_exchange_n(&age_param->state, &expected,
12307 AGE_FREE, false, __ATOMIC_RELAXED,
12308 __ATOMIC_RELAXED)) {
12309
12310
12311
12312
12313 rte_spinlock_lock(&age_info->aged_sl);
12314 LIST_REMOVE(age, next);
12315 rte_spinlock_unlock(&age_info->aged_sl);
12316 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
12317 }
12318}
12319
12320
12321
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334static int
12335flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
12336{
12337 struct mlx5_priv *priv = dev->data->dev_private;
12338 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12339 struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
12340 uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
12341
12342 if (!ret) {
12343 flow_dv_aso_age_remove_from_age(dev, age);
12344 rte_spinlock_lock(&mng->free_sl);
12345 LIST_INSERT_HEAD(&mng->free, age, next);
12346 rte_spinlock_unlock(&mng->free_sl);
12347 }
12348 return ret;
12349}
12350
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360static int
12361flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
12362{
12363 struct mlx5_priv *priv = dev->data->dev_private;
12364 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12365 void *old_pools = mng->pools;
12366 uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
12367 uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
12368 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12369
12370 if (!pools) {
12371 rte_errno = ENOMEM;
12372 return -ENOMEM;
12373 }
12374 if (old_pools) {
12375 memcpy(pools, old_pools,
12376 mng->n * sizeof(struct mlx5_flow_counter_pool *));
12377 mlx5_free(old_pools);
12378 } else {
12379
12380 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
12381
12382 if (ret) {
12383 mlx5_free(pools);
12384 return ret;
12385 }
12386 }
12387 mng->n = resize;
12388 mng->pools = pools;
12389 return 0;
12390}
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401
12402
12403
12404static struct mlx5_aso_age_pool *
12405flow_dv_age_pool_create(struct rte_eth_dev *dev,
12406 struct mlx5_aso_age_action **age_free)
12407{
12408 struct mlx5_priv *priv = dev->data->dev_private;
12409 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12410 struct mlx5_aso_age_pool *pool = NULL;
12411 struct mlx5_devx_obj *obj = NULL;
12412 uint32_t i;
12413
12414 obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12415 priv->sh->cdev->pdn);
12416 if (!obj) {
12417 rte_errno = ENODATA;
12418 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12419 return NULL;
12420 }
12421 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12422 if (!pool) {
12423 claim_zero(mlx5_devx_cmd_destroy(obj));
12424 rte_errno = ENOMEM;
12425 return NULL;
12426 }
12427 pool->flow_hit_aso_obj = obj;
12428 pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12429 rte_rwlock_write_lock(&mng->resize_rwl);
12430 pool->index = mng->next;
12431
12432 if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12433 claim_zero(mlx5_devx_cmd_destroy(obj));
12434 mlx5_free(pool);
12435 rte_rwlock_write_unlock(&mng->resize_rwl);
12436 return NULL;
12437 }
12438 mng->pools[pool->index] = pool;
12439 mng->next++;
12440 rte_rwlock_write_unlock(&mng->resize_rwl);
12441
12442 *age_free = &pool->actions[0];
12443 for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12444 pool->actions[i].offset = i;
12445 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12446 }
12447 return pool;
12448}
12449
12450
12451
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461static uint32_t
12462flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12463{
12464 struct mlx5_priv *priv = dev->data->dev_private;
12465 const struct mlx5_aso_age_pool *pool;
12466 struct mlx5_aso_age_action *age_free = NULL;
12467 struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12468
12469 MLX5_ASSERT(mng);
12470
12471 rte_spinlock_lock(&mng->free_sl);
12472 age_free = LIST_FIRST(&mng->free);
12473 if (age_free) {
12474 LIST_REMOVE(age_free, next);
12475 } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12476 rte_spinlock_unlock(&mng->free_sl);
12477 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12478 NULL, "failed to create ASO age pool");
12479 return 0;
12480 }
12481 rte_spinlock_unlock(&mng->free_sl);
12482 pool = container_of
12483 ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12484 (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12485 actions);
12486 if (!age_free->dr_action) {
12487 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12488 error);
12489
12490 if (reg_c < 0) {
12491 rte_flow_error_set(error, rte_errno,
12492 RTE_FLOW_ERROR_TYPE_ACTION,
12493 NULL, "failed to get reg_c "
12494 "for ASO flow hit");
12495 return 0;
12496 }
12497#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12498 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12499 (priv->sh->rx_domain,
12500 pool->flow_hit_aso_obj->obj, age_free->offset,
12501 MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12502 (reg_c - REG_C_0));
12503#endif
12504 if (!age_free->dr_action) {
12505 rte_errno = errno;
12506 rte_spinlock_lock(&mng->free_sl);
12507 LIST_INSERT_HEAD(&mng->free, age_free, next);
12508 rte_spinlock_unlock(&mng->free_sl);
12509 rte_flow_error_set(error, rte_errno,
12510 RTE_FLOW_ERROR_TYPE_ACTION,
12511 NULL, "failed to create ASO "
12512 "flow hit action");
12513 return 0;
12514 }
12515 }
12516 __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12517 return pool->index | ((age_free->offset + 1) << 16);
12518}
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528
12529
12530
12531
12532
12533static void
12534flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12535 uint32_t age_idx,
12536 void *context,
12537 uint32_t timeout)
12538{
12539 struct mlx5_aso_age_action *aso_age;
12540
12541 aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12542 MLX5_ASSERT(aso_age);
12543 aso_age->age_params.context = context;
12544 aso_age->age_params.timeout = timeout;
12545 aso_age->age_params.port_id = dev->data->port_id;
12546 __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12547 __ATOMIC_RELAXED);
12548 __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12549 __ATOMIC_RELAXED);
12550}
12551
12552static void
12553flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12554 const struct rte_flow_item_integrity *value,
12555 void *headers_m, void *headers_v)
12556{
12557 if (mask->l4_ok) {
12558
12559
12560
12561
12562
12563
12564
12565
12566 if (value->l4_ok) {
12567 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12568 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12569 }
12570 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12571 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12572 !!value->l4_ok);
12573 }
12574 if (mask->l4_csum_ok) {
12575 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12576 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12577 value->l4_csum_ok);
12578 }
12579}
12580
12581static void
12582flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12583 const struct rte_flow_item_integrity *value,
12584 void *headers_m, void *headers_v, bool is_ipv4)
12585{
12586 if (mask->l3_ok) {
12587
12588
12589
12590
12591
12592
12593
12594
12595 if (is_ipv4) {
12596 if (value->l3_ok) {
12597 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12598 l3_ok, 1);
12599 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12600 l3_ok, 1);
12601 }
12602 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12603 ipv4_checksum_ok, 1);
12604 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12605 ipv4_checksum_ok, !!value->l3_ok);
12606 } else {
12607 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12608 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12609 value->l3_ok);
12610 }
12611 }
12612 if (mask->ipv4_csum_ok) {
12613 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12614 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12615 value->ipv4_csum_ok);
12616 }
12617}
12618
12619static void
12620set_integrity_bits(void *headers_m, void *headers_v,
12621 const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12622{
12623 const struct rte_flow_item_integrity *spec = integrity_item->spec;
12624 const struct rte_flow_item_integrity *mask = integrity_item->mask;
12625
12626
12627 MLX5_ASSERT(spec != NULL);
12628 if (!mask)
12629 mask = &rte_flow_item_integrity_mask;
12630 flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12631 is_l3_ip4);
12632 flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12633}
12634
12635static void
12636flow_dv_translate_item_integrity_post(void *matcher, void *key,
12637 const
12638 struct rte_flow_item *integrity_items[2],
12639 uint64_t pattern_flags)
12640{
12641 void *headers_m, *headers_v;
12642 bool is_l3_ip4;
12643
12644 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12645 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12646 inner_headers);
12647 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12648 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12649 0;
12650 set_integrity_bits(headers_m, headers_v,
12651 integrity_items[1], is_l3_ip4);
12652 }
12653 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12654 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12655 outer_headers);
12656 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12657 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12658 0;
12659 set_integrity_bits(headers_m, headers_v,
12660 integrity_items[0], is_l3_ip4);
12661 }
12662}
12663
12664static void
12665flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12666 const struct rte_flow_item *integrity_items[2],
12667 uint64_t *last_item)
12668{
12669 const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12670
12671
12672 MLX5_ASSERT(spec != NULL);
12673 if (spec->level > 1) {
12674 integrity_items[1] = item;
12675 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12676 } else {
12677 integrity_items[0] = item;
12678 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12679 }
12680}
12681
12682
12683
12684
12685
12686
12687
12688
12689
12690
12691
12692
12693
12694
12695
12696
12697
12698
12699
12700
12701
12702static struct mlx5_flow_counter *
12703flow_dv_prepare_counter(struct rte_eth_dev *dev,
12704 struct mlx5_flow *dev_flow,
12705 struct rte_flow *flow,
12706 const struct rte_flow_action_count *count,
12707 const struct rte_flow_action_age *age,
12708 struct rte_flow_error *error)
12709{
12710 if (!flow->counter) {
12711 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12712 count, age);
12713 if (!flow->counter) {
12714 rte_flow_error_set(error, rte_errno,
12715 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12716 "cannot create counter object.");
12717 return NULL;
12718 }
12719 }
12720 return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12721}
12722
12723
12724
12725
12726
12727
12728
12729
12730
12731
12732
12733
12734static inline int
12735flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12736{
12737 struct mlx5_priv *priv = dev->data->dev_private;
12738 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12739 uint32_t ret;
12740 struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12741 enum mlx5_aso_ct_state state =
12742 __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12743
12744
12745 if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12746 return -1;
12747 ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12748 if (!ret) {
12749 if (ct->dr_action_orig) {
12750#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12751 claim_zero(mlx5_glue->destroy_flow_action
12752 (ct->dr_action_orig));
12753#endif
12754 ct->dr_action_orig = NULL;
12755 }
12756 if (ct->dr_action_rply) {
12757#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12758 claim_zero(mlx5_glue->destroy_flow_action
12759 (ct->dr_action_rply));
12760#endif
12761 ct->dr_action_rply = NULL;
12762 }
12763
12764 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12765 rte_spinlock_lock(&mng->ct_sl);
12766 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12767 rte_spinlock_unlock(&mng->ct_sl);
12768 }
12769 return (int)ret;
12770}
12771
12772static inline int
12773flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12774 struct rte_flow_error *error)
12775{
12776 uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12777 uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12778 struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12779 int ret;
12780
12781 MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12782 if (dev->data->dev_started != 1)
12783 return rte_flow_error_set(error, EAGAIN,
12784 RTE_FLOW_ERROR_TYPE_ACTION,
12785 NULL,
12786 "Indirect CT action cannot be destroyed when the port is stopped");
12787 ret = flow_dv_aso_ct_dev_release(owndev, idx);
12788 if (ret < 0)
12789 return rte_flow_error_set(error, EAGAIN,
12790 RTE_FLOW_ERROR_TYPE_ACTION,
12791 NULL,
12792 "Current state prevents indirect CT action from being destroyed");
12793 return ret;
12794}
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805static int
12806flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12807{
12808 struct mlx5_priv *priv = dev->data->dev_private;
12809 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12810 void *old_pools = mng->pools;
12811
12812 uint32_t resize = mng->n + 64;
12813 uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12814 void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12815
12816 if (!pools) {
12817 rte_errno = ENOMEM;
12818 return -rte_errno;
12819 }
12820 rte_rwlock_write_lock(&mng->resize_rwl);
12821
12822 if (old_pools) {
12823
12824 rte_memcpy(pools, old_pools,
12825 mng->n * sizeof(struct mlx5_aso_ct_pool *));
12826 mlx5_free(old_pools);
12827 }
12828 mng->n = resize;
12829 mng->pools = pools;
12830 rte_rwlock_write_unlock(&mng->resize_rwl);
12831 return 0;
12832}
12833
12834
12835
12836
12837
12838
12839
12840
12841
12842
12843
12844
12845
12846static struct mlx5_aso_ct_pool *
12847flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12848 struct mlx5_aso_ct_action **ct_free)
12849{
12850 struct mlx5_priv *priv = dev->data->dev_private;
12851 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12852 struct mlx5_aso_ct_pool *pool = NULL;
12853 struct mlx5_devx_obj *obj = NULL;
12854 uint32_t i;
12855 uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12856
12857 obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12858 priv->sh->cdev->pdn,
12859 log_obj_size);
12860 if (!obj) {
12861 rte_errno = ENODATA;
12862 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12863 return NULL;
12864 }
12865 pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12866 if (!pool) {
12867 rte_errno = ENOMEM;
12868 claim_zero(mlx5_devx_cmd_destroy(obj));
12869 return NULL;
12870 }
12871 pool->devx_obj = obj;
12872 pool->index = mng->next;
12873
12874 if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12875 claim_zero(mlx5_devx_cmd_destroy(obj));
12876 mlx5_free(pool);
12877 return NULL;
12878 }
12879 mng->pools[pool->index] = pool;
12880 mng->next++;
12881
12882 *ct_free = &pool->actions[0];
12883
12884 for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12885
12886 pool->actions[i].offset = i;
12887 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12888 }
12889 return pool;
12890}
12891
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903static uint32_t
12904flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12905{
12906 struct mlx5_priv *priv = dev->data->dev_private;
12907 struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12908 struct mlx5_aso_ct_action *ct = NULL;
12909 struct mlx5_aso_ct_pool *pool;
12910 uint8_t reg_c;
12911 uint32_t ct_idx;
12912
12913 MLX5_ASSERT(mng);
12914 if (!priv->sh->cdev->config.devx) {
12915 rte_errno = ENOTSUP;
12916 return 0;
12917 }
12918
12919 rte_spinlock_lock(&mng->ct_sl);
12920 ct = LIST_FIRST(&mng->free_cts);
12921 if (ct) {
12922 LIST_REMOVE(ct, next);
12923 } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12924 rte_spinlock_unlock(&mng->ct_sl);
12925 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12926 NULL, "failed to create ASO CT pool");
12927 return 0;
12928 }
12929 rte_spinlock_unlock(&mng->ct_sl);
12930 pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12931 ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12932
12933 __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12934 reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12935 if (!ct->dr_action_orig) {
12936#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12937 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12938 (priv->sh->rx_domain, pool->devx_obj->obj,
12939 ct->offset,
12940 MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12941 reg_c - REG_C_0);
12942#else
12943 RTE_SET_USED(reg_c);
12944#endif
12945 if (!ct->dr_action_orig) {
12946 flow_dv_aso_ct_dev_release(dev, ct_idx);
12947 rte_flow_error_set(error, rte_errno,
12948 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12949 "failed to create ASO CT action");
12950 return 0;
12951 }
12952 }
12953 if (!ct->dr_action_rply) {
12954#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12955 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12956 (priv->sh->rx_domain, pool->devx_obj->obj,
12957 ct->offset,
12958 MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12959 reg_c - REG_C_0);
12960#endif
12961 if (!ct->dr_action_rply) {
12962 flow_dv_aso_ct_dev_release(dev, ct_idx);
12963 rte_flow_error_set(error, rte_errno,
12964 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12965 "failed to create ASO CT action");
12966 return 0;
12967 }
12968 }
12969 return ct_idx;
12970}
12971
12972
12973
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985static uint32_t
12986flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12987 const struct rte_flow_action_conntrack *pro,
12988 struct rte_flow_error *error)
12989{
12990 struct mlx5_priv *priv = dev->data->dev_private;
12991 struct mlx5_dev_ctx_shared *sh = priv->sh;
12992 struct mlx5_aso_ct_action *ct;
12993 uint32_t idx;
12994
12995 if (!sh->ct_aso_en)
12996 return rte_flow_error_set(error, ENOTSUP,
12997 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12998 "Connection is not supported");
12999 idx = flow_dv_aso_ct_alloc(dev, error);
13000 if (!idx)
13001 return rte_flow_error_set(error, rte_errno,
13002 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13003 "Failed to allocate CT object");
13004 ct = flow_aso_ct_get_by_dev_idx(dev, idx);
13005 if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
13006 return rte_flow_error_set(error, EBUSY,
13007 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13008 "Failed to update CT");
13009 ct->is_original = !!pro->is_original_dir;
13010 ct->peer = pro->peer_port;
13011 return idx;
13012}
13013
13014
13015
13016
13017
13018
13019
13020
13021
13022
13023
13024
13025
13026
13027
13028
13029
13030
13031
13032
13033
13034static int
13035flow_dv_translate(struct rte_eth_dev *dev,
13036 struct mlx5_flow *dev_flow,
13037 const struct rte_flow_attr *attr,
13038 const struct rte_flow_item items[],
13039 const struct rte_flow_action actions[],
13040 struct rte_flow_error *error)
13041{
13042 struct mlx5_priv *priv = dev->data->dev_private;
13043 struct mlx5_sh_config *dev_conf = &priv->sh->config;
13044 struct rte_flow *flow = dev_flow->flow;
13045 struct mlx5_flow_handle *handle = dev_flow->handle;
13046 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13047 struct mlx5_flow_rss_desc *rss_desc;
13048 uint64_t item_flags = 0;
13049 uint64_t last_item = 0;
13050 uint64_t action_flags = 0;
13051 struct mlx5_flow_dv_matcher matcher = {
13052 .mask = {
13053 .size = sizeof(matcher.mask.buf),
13054 },
13055 };
13056 int actions_n = 0;
13057 bool actions_end = false;
13058 union {
13059 struct mlx5_flow_dv_modify_hdr_resource res;
13060 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13061 sizeof(struct mlx5_modification_cmd) *
13062 (MLX5_MAX_MODIFY_NUM + 1)];
13063 } mhdr_dummy;
13064 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
13065 const struct rte_flow_action_count *count = NULL;
13066 const struct rte_flow_action_age *non_shared_age = NULL;
13067 union flow_dv_attr flow_attr = { .attr = 0 };
13068 uint32_t tag_be;
13069 union mlx5_flow_tbl_key tbl_key;
13070 uint32_t modify_action_position = UINT32_MAX;
13071 void *match_mask = matcher.mask.buf;
13072 void *match_value = dev_flow->dv.value.buf;
13073 uint8_t next_protocol = 0xff;
13074 struct rte_vlan_hdr vlan = { 0 };
13075 struct mlx5_flow_dv_dest_array_resource mdest_res;
13076 struct mlx5_flow_dv_sample_resource sample_res;
13077 void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13078 const struct rte_flow_action_sample *sample = NULL;
13079 struct mlx5_flow_sub_actions_list *sample_act;
13080 uint32_t sample_act_pos = UINT32_MAX;
13081 uint32_t age_act_pos = UINT32_MAX;
13082 uint32_t num_of_dest = 0;
13083 int tmp_actions_n = 0;
13084 uint32_t table;
13085 int ret = 0;
13086 const struct mlx5_flow_tunnel *tunnel = NULL;
13087 struct flow_grp_info grp_info = {
13088 .external = !!dev_flow->external,
13089 .transfer = !!attr->transfer,
13090 .fdb_def_rule = !!priv->fdb_def_rule,
13091 .skip_scale = dev_flow->skip_scale &
13092 (1 << MLX5_SCALE_FLOW_GROUP_BIT),
13093 .std_tbl_fix = true,
13094 };
13095 const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
13096 const struct rte_flow_item *tunnel_item = NULL;
13097 const struct rte_flow_item *gre_item = NULL;
13098
13099 if (!wks)
13100 return rte_flow_error_set(error, ENOMEM,
13101 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13102 NULL,
13103 "failed to push flow workspace");
13104 rss_desc = &wks->rss_desc;
13105 memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
13106 memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
13107 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13108 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13109
13110 sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
13111 if (is_tunnel_offload_active(dev)) {
13112 if (dev_flow->tunnel) {
13113 RTE_VERIFY(dev_flow->tof_type ==
13114 MLX5_TUNNEL_OFFLOAD_MISS_RULE);
13115 tunnel = dev_flow->tunnel;
13116 } else {
13117 tunnel = mlx5_get_tof(items, actions,
13118 &dev_flow->tof_type);
13119 dev_flow->tunnel = tunnel;
13120 }
13121 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
13122 (dev, attr, tunnel, dev_flow->tof_type);
13123 }
13124 mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13125 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13126 ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
13127 &grp_info, error);
13128 if (ret)
13129 return ret;
13130 dev_flow->dv.group = table;
13131 if (attr->transfer)
13132 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
13133
13134 mhdr_res->actions_num = 0;
13135 if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
13136
13137
13138
13139
13140
13141
13142
13143
13144
13145 bool add_decap = true;
13146 const struct rte_flow_action *ptr = actions;
13147
13148 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
13149 if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
13150 add_decap = false;
13151 break;
13152 }
13153 }
13154 if (add_decap) {
13155 if (flow_dv_create_action_l2_decap(dev, dev_flow,
13156 attr->transfer,
13157 error))
13158 return -rte_errno;
13159 dev_flow->dv.actions[actions_n++] =
13160 dev_flow->dv.encap_decap->action;
13161 action_flags |= MLX5_FLOW_ACTION_DECAP;
13162 }
13163 }
13164 for (; !actions_end ; actions++) {
13165 const struct rte_flow_action_queue *queue;
13166 const struct rte_flow_action_rss *rss;
13167 const struct rte_flow_action *action = actions;
13168 const uint8_t *rss_key;
13169 struct mlx5_flow_tbl_resource *tbl;
13170 struct mlx5_aso_age_action *age_act;
13171 struct mlx5_flow_counter *cnt_act;
13172 uint32_t port_id = 0;
13173 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
13174 int action_type = actions->type;
13175 const struct rte_flow_action *found_action = NULL;
13176 uint32_t jump_group = 0;
13177 uint32_t owner_idx;
13178 struct mlx5_aso_ct_action *ct;
13179
13180 if (!mlx5_flow_os_action_supported(action_type))
13181 return rte_flow_error_set(error, ENOTSUP,
13182 RTE_FLOW_ERROR_TYPE_ACTION,
13183 actions,
13184 "action not supported");
13185 switch (action_type) {
13186 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
13187 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
13188 break;
13189 case RTE_FLOW_ACTION_TYPE_VOID:
13190 break;
13191 case RTE_FLOW_ACTION_TYPE_PORT_ID:
13192 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
13193 if (flow_dv_translate_action_port_id(dev, action,
13194 &port_id, error))
13195 return -rte_errno;
13196 port_id_resource.port_id = port_id;
13197 MLX5_ASSERT(!handle->rix_port_id_action);
13198 if (flow_dv_port_id_action_resource_register
13199 (dev, &port_id_resource, dev_flow, error))
13200 return -rte_errno;
13201 dev_flow->dv.actions[actions_n++] =
13202 dev_flow->dv.port_id_action->action;
13203 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
13204 dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
13205 sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
13206 num_of_dest++;
13207 break;
13208 case RTE_FLOW_ACTION_TYPE_FLAG:
13209 action_flags |= MLX5_FLOW_ACTION_FLAG;
13210 wks->mark = 1;
13211 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
13212 struct rte_flow_action_mark mark = {
13213 .id = MLX5_FLOW_MARK_DEFAULT,
13214 };
13215
13216 if (flow_dv_convert_action_mark(dev, &mark,
13217 mhdr_res,
13218 error))
13219 return -rte_errno;
13220 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
13221 break;
13222 }
13223 tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
13224
13225
13226
13227
13228
13229 MLX5_ASSERT(!handle->dvh.rix_tag);
13230 if (flow_dv_tag_resource_register(dev, tag_be,
13231 dev_flow, error))
13232 return -rte_errno;
13233 MLX5_ASSERT(dev_flow->dv.tag_resource);
13234 dev_flow->dv.actions[actions_n++] =
13235 dev_flow->dv.tag_resource->action;
13236 break;
13237 case RTE_FLOW_ACTION_TYPE_MARK:
13238 action_flags |= MLX5_FLOW_ACTION_MARK;
13239 wks->mark = 1;
13240 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
13241 const struct rte_flow_action_mark *mark =
13242 (const struct rte_flow_action_mark *)
13243 actions->conf;
13244
13245 if (flow_dv_convert_action_mark(dev, mark,
13246 mhdr_res,
13247 error))
13248 return -rte_errno;
13249 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
13250 break;
13251 }
13252
13253 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
13254
13255 tag_be = mlx5_flow_mark_set
13256 (((const struct rte_flow_action_mark *)
13257 (actions->conf))->id);
13258 MLX5_ASSERT(!handle->dvh.rix_tag);
13259 if (flow_dv_tag_resource_register(dev, tag_be,
13260 dev_flow, error))
13261 return -rte_errno;
13262 MLX5_ASSERT(dev_flow->dv.tag_resource);
13263 dev_flow->dv.actions[actions_n++] =
13264 dev_flow->dv.tag_resource->action;
13265 break;
13266 case RTE_FLOW_ACTION_TYPE_SET_META:
13267 if (flow_dv_convert_action_set_meta
13268 (dev, mhdr_res, attr,
13269 (const struct rte_flow_action_set_meta *)
13270 actions->conf, error))
13271 return -rte_errno;
13272 action_flags |= MLX5_FLOW_ACTION_SET_META;
13273 break;
13274 case RTE_FLOW_ACTION_TYPE_SET_TAG:
13275 if (flow_dv_convert_action_set_tag
13276 (dev, mhdr_res,
13277 (const struct rte_flow_action_set_tag *)
13278 actions->conf, error))
13279 return -rte_errno;
13280 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13281 break;
13282 case RTE_FLOW_ACTION_TYPE_DROP:
13283 action_flags |= MLX5_FLOW_ACTION_DROP;
13284 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
13285 break;
13286 case RTE_FLOW_ACTION_TYPE_QUEUE:
13287 queue = actions->conf;
13288 rss_desc->queue_num = 1;
13289 rss_desc->queue[0] = queue->index;
13290 action_flags |= MLX5_FLOW_ACTION_QUEUE;
13291 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
13292 sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
13293 num_of_dest++;
13294 break;
13295 case RTE_FLOW_ACTION_TYPE_RSS:
13296 rss = actions->conf;
13297 memcpy(rss_desc->queue, rss->queue,
13298 rss->queue_num * sizeof(uint16_t));
13299 rss_desc->queue_num = rss->queue_num;
13300
13301 rss_key = !rss->key ? rss_hash_default_key : rss->key;
13302 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
13303
13304
13305
13306
13307 action_flags |= MLX5_FLOW_ACTION_RSS;
13308 dev_flow->handle->fate_action = rss_desc->shared_rss ?
13309 MLX5_FLOW_FATE_SHARED_RSS :
13310 MLX5_FLOW_FATE_QUEUE;
13311 break;
13312 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
13313 owner_idx = (uint32_t)(uintptr_t)action->conf;
13314 age_act = flow_aso_age_get_by_idx(dev, owner_idx);
13315 if (flow->age == 0) {
13316 flow->age = owner_idx;
13317 __atomic_fetch_add(&age_act->refcnt, 1,
13318 __ATOMIC_RELAXED);
13319 }
13320 age_act_pos = actions_n++;
13321 action_flags |= MLX5_FLOW_ACTION_AGE;
13322 break;
13323 case RTE_FLOW_ACTION_TYPE_AGE:
13324 non_shared_age = action->conf;
13325 age_act_pos = actions_n++;
13326 action_flags |= MLX5_FLOW_ACTION_AGE;
13327 break;
13328 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
13329 owner_idx = (uint32_t)(uintptr_t)action->conf;
13330 cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
13331 NULL);
13332 MLX5_ASSERT(cnt_act != NULL);
13333
13334
13335
13336
13337 if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13338 dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
13339 dev_flow->dv.actions[actions_n++] =
13340 cnt_act->action;
13341 } else {
13342 if (flow->counter == 0) {
13343 flow->counter = owner_idx;
13344 __atomic_fetch_add
13345 (&cnt_act->shared_info.refcnt,
13346 1, __ATOMIC_RELAXED);
13347 }
13348
13349 action_flags |= MLX5_FLOW_ACTION_COUNT;
13350 }
13351 break;
13352 case RTE_FLOW_ACTION_TYPE_COUNT:
13353 if (!priv->sh->cdev->config.devx) {
13354 return rte_flow_error_set
13355 (error, ENOTSUP,
13356 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13357 NULL,
13358 "count action not supported");
13359 }
13360
13361 count = action->conf;
13362 action_flags |= MLX5_FLOW_ACTION_COUNT;
13363 break;
13364 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
13365 dev_flow->dv.actions[actions_n++] =
13366 priv->sh->pop_vlan_action;
13367 action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
13368 break;
13369 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
13370 if (!(action_flags &
13371 MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
13372 flow_dev_get_vlan_info_from_items(items, &vlan);
13373 vlan.eth_proto = rte_be_to_cpu_16
13374 ((((const struct rte_flow_action_of_push_vlan *)
13375 actions->conf)->ethertype));
13376 found_action = mlx5_flow_find_action
13377 (actions + 1,
13378 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
13379 if (found_action)
13380 mlx5_update_vlan_vid_pcp(found_action, &vlan);
13381 found_action = mlx5_flow_find_action
13382 (actions + 1,
13383 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
13384 if (found_action)
13385 mlx5_update_vlan_vid_pcp(found_action, &vlan);
13386 if (flow_dv_create_action_push_vlan
13387 (dev, attr, &vlan, dev_flow, error))
13388 return -rte_errno;
13389 dev_flow->dv.actions[actions_n++] =
13390 dev_flow->dv.push_vlan_res->action;
13391 action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
13392 break;
13393 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
13394
13395 MLX5_ASSERT(action_flags &
13396 MLX5_FLOW_ACTION_OF_PUSH_VLAN);
13397 break;
13398 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13399 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13400 break;
13401 flow_dev_get_vlan_info_from_items(items, &vlan);
13402 mlx5_update_vlan_vid_pcp(actions, &vlan);
13403
13404 if (flow_dv_convert_action_modify_vlan_vid
13405 (mhdr_res, actions, error))
13406 return -rte_errno;
13407 action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13408 break;
13409 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13410 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13411 if (flow_dv_create_action_l2_encap(dev, actions,
13412 dev_flow,
13413 attr->transfer,
13414 error))
13415 return -rte_errno;
13416 dev_flow->dv.actions[actions_n++] =
13417 dev_flow->dv.encap_decap->action;
13418 action_flags |= MLX5_FLOW_ACTION_ENCAP;
13419 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13420 sample_act->action_flags |=
13421 MLX5_FLOW_ACTION_ENCAP;
13422 break;
13423 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13424 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13425 if (flow_dv_create_action_l2_decap(dev, dev_flow,
13426 attr->transfer,
13427 error))
13428 return -rte_errno;
13429 dev_flow->dv.actions[actions_n++] =
13430 dev_flow->dv.encap_decap->action;
13431 action_flags |= MLX5_FLOW_ACTION_DECAP;
13432 break;
13433 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13434
13435 if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13436 if (flow_dv_create_action_raw_encap
13437 (dev, actions, dev_flow, attr, error))
13438 return -rte_errno;
13439 dev_flow->dv.actions[actions_n++] =
13440 dev_flow->dv.encap_decap->action;
13441 } else {
13442
13443 if (flow_dv_create_action_l2_encap
13444 (dev, actions, dev_flow, attr->transfer,
13445 error))
13446 return -rte_errno;
13447 dev_flow->dv.actions[actions_n++] =
13448 dev_flow->dv.encap_decap->action;
13449 }
13450 action_flags |= MLX5_FLOW_ACTION_ENCAP;
13451 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13452 sample_act->action_flags |=
13453 MLX5_FLOW_ACTION_ENCAP;
13454 break;
13455 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13456 while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13457 ;
13458 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13459 if (flow_dv_create_action_l2_decap
13460 (dev, dev_flow, attr->transfer, error))
13461 return -rte_errno;
13462 dev_flow->dv.actions[actions_n++] =
13463 dev_flow->dv.encap_decap->action;
13464 }
13465
13466 action_flags |= MLX5_FLOW_ACTION_DECAP;
13467 break;
13468 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13469 dev_flow->dv.actions[actions_n++] =
13470 (void *)(uintptr_t)action->conf;
13471 action_flags |= MLX5_FLOW_ACTION_JUMP;
13472 break;
13473 case RTE_FLOW_ACTION_TYPE_JUMP:
13474 jump_group = ((const struct rte_flow_action_jump *)
13475 action->conf)->group;
13476 grp_info.std_tbl_fix = 0;
13477 if (dev_flow->skip_scale &
13478 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13479 grp_info.skip_scale = 1;
13480 else
13481 grp_info.skip_scale = 0;
13482 ret = mlx5_flow_group_to_table(dev, tunnel,
13483 jump_group,
13484 &table,
13485 &grp_info, error);
13486 if (ret)
13487 return ret;
13488 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13489 attr->transfer,
13490 !!dev_flow->external,
13491 tunnel, jump_group, 0,
13492 0, error);
13493 if (!tbl)
13494 return rte_flow_error_set
13495 (error, errno,
13496 RTE_FLOW_ERROR_TYPE_ACTION,
13497 NULL,
13498 "cannot create jump action.");
13499 if (flow_dv_jump_tbl_resource_register
13500 (dev, tbl, dev_flow, error)) {
13501 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13502 return rte_flow_error_set
13503 (error, errno,
13504 RTE_FLOW_ERROR_TYPE_ACTION,
13505 NULL,
13506 "cannot create jump action.");
13507 }
13508 dev_flow->dv.actions[actions_n++] =
13509 dev_flow->dv.jump->action;
13510 action_flags |= MLX5_FLOW_ACTION_JUMP;
13511 dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13512 sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13513 num_of_dest++;
13514 break;
13515 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13516 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13517 if (flow_dv_convert_action_modify_mac
13518 (mhdr_res, actions, error))
13519 return -rte_errno;
13520 action_flags |= actions->type ==
13521 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13522 MLX5_FLOW_ACTION_SET_MAC_SRC :
13523 MLX5_FLOW_ACTION_SET_MAC_DST;
13524 break;
13525 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13526 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13527 if (flow_dv_convert_action_modify_ipv4
13528 (mhdr_res, actions, error))
13529 return -rte_errno;
13530 action_flags |= actions->type ==
13531 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13532 MLX5_FLOW_ACTION_SET_IPV4_SRC :
13533 MLX5_FLOW_ACTION_SET_IPV4_DST;
13534 break;
13535 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13536 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13537 if (flow_dv_convert_action_modify_ipv6
13538 (mhdr_res, actions, error))
13539 return -rte_errno;
13540 action_flags |= actions->type ==
13541 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13542 MLX5_FLOW_ACTION_SET_IPV6_SRC :
13543 MLX5_FLOW_ACTION_SET_IPV6_DST;
13544 break;
13545 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13546 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13547 if (flow_dv_convert_action_modify_tp
13548 (mhdr_res, actions, items,
13549 &flow_attr, dev_flow, !!(action_flags &
13550 MLX5_FLOW_ACTION_DECAP), error))
13551 return -rte_errno;
13552 action_flags |= actions->type ==
13553 RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13554 MLX5_FLOW_ACTION_SET_TP_SRC :
13555 MLX5_FLOW_ACTION_SET_TP_DST;
13556 break;
13557 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13558 if (flow_dv_convert_action_modify_dec_ttl
13559 (mhdr_res, items, &flow_attr, dev_flow,
13560 !!(action_flags &
13561 MLX5_FLOW_ACTION_DECAP), error))
13562 return -rte_errno;
13563 action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13564 break;
13565 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13566 if (flow_dv_convert_action_modify_ttl
13567 (mhdr_res, actions, items, &flow_attr,
13568 dev_flow, !!(action_flags &
13569 MLX5_FLOW_ACTION_DECAP), error))
13570 return -rte_errno;
13571 action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13572 break;
13573 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13574 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13575 if (flow_dv_convert_action_modify_tcp_seq
13576 (mhdr_res, actions, error))
13577 return -rte_errno;
13578 action_flags |= actions->type ==
13579 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13580 MLX5_FLOW_ACTION_INC_TCP_SEQ :
13581 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13582 break;
13583
13584 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13585 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13586 if (flow_dv_convert_action_modify_tcp_ack
13587 (mhdr_res, actions, error))
13588 return -rte_errno;
13589 action_flags |= actions->type ==
13590 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13591 MLX5_FLOW_ACTION_INC_TCP_ACK :
13592 MLX5_FLOW_ACTION_DEC_TCP_ACK;
13593 break;
13594 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13595 if (flow_dv_convert_action_set_reg
13596 (mhdr_res, actions, error))
13597 return -rte_errno;
13598 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13599 break;
13600 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13601 if (flow_dv_convert_action_copy_mreg
13602 (dev, mhdr_res, actions, error))
13603 return -rte_errno;
13604 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13605 break;
13606 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13607 action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13608 dev_flow->handle->fate_action =
13609 MLX5_FLOW_FATE_DEFAULT_MISS;
13610 break;
13611 case RTE_FLOW_ACTION_TYPE_METER:
13612 if (!wks->fm)
13613 return rte_flow_error_set(error, rte_errno,
13614 RTE_FLOW_ERROR_TYPE_ACTION,
13615 NULL, "Failed to get meter in flow.");
13616
13617 dev_flow->dv.actions[actions_n++] =
13618 wks->fm->meter_action_g;
13619 action_flags |= MLX5_FLOW_ACTION_METER;
13620 break;
13621 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13622 if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13623 actions, error))
13624 return -rte_errno;
13625 action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13626 break;
13627 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13628 if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13629 actions, error))
13630 return -rte_errno;
13631 action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13632 break;
13633 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13634 sample_act_pos = actions_n;
13635 sample = (const struct rte_flow_action_sample *)
13636 action->conf;
13637 actions_n++;
13638 action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13639
13640 if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13641 (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13642 sample_act->action_flags |=
13643 MLX5_FLOW_ACTION_ENCAP;
13644 break;
13645 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13646 if (flow_dv_convert_action_modify_field
13647 (dev, mhdr_res, actions, attr, error))
13648 return -rte_errno;
13649 action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13650 break;
13651 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13652 owner_idx = (uint32_t)(uintptr_t)action->conf;
13653 ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13654 if (!ct)
13655 return rte_flow_error_set(error, EINVAL,
13656 RTE_FLOW_ERROR_TYPE_ACTION,
13657 NULL,
13658 "Failed to get CT object.");
13659 if (mlx5_aso_ct_available(priv->sh, ct))
13660 return rte_flow_error_set(error, rte_errno,
13661 RTE_FLOW_ERROR_TYPE_ACTION,
13662 NULL,
13663 "CT is unavailable.");
13664 if (ct->is_original)
13665 dev_flow->dv.actions[actions_n] =
13666 ct->dr_action_orig;
13667 else
13668 dev_flow->dv.actions[actions_n] =
13669 ct->dr_action_rply;
13670 if (flow->ct == 0) {
13671 flow->indirect_type =
13672 MLX5_INDIRECT_ACTION_TYPE_CT;
13673 flow->ct = owner_idx;
13674 __atomic_fetch_add(&ct->refcnt, 1,
13675 __ATOMIC_RELAXED);
13676 }
13677 actions_n++;
13678 action_flags |= MLX5_FLOW_ACTION_CT;
13679 break;
13680 case RTE_FLOW_ACTION_TYPE_END:
13681 actions_end = true;
13682 if (mhdr_res->actions_num) {
13683
13684 if (flow_dv_modify_hdr_resource_register
13685 (dev, mhdr_res, dev_flow, error))
13686 return -rte_errno;
13687 dev_flow->dv.actions[modify_action_position] =
13688 handle->dvh.modify_hdr->action;
13689 }
13690
13691
13692
13693
13694 if (action_flags & MLX5_FLOW_ACTION_AGE) {
13695 if ((non_shared_age && count) ||
13696 !flow_hit_aso_supported(priv->sh, attr)) {
13697
13698 cnt_act = flow_dv_prepare_counter
13699 (dev, dev_flow,
13700 flow, count,
13701 non_shared_age,
13702 error);
13703 if (!cnt_act)
13704 return -rte_errno;
13705 dev_flow->dv.actions[age_act_pos] =
13706 cnt_act->action;
13707 break;
13708 }
13709 if (!flow->age && non_shared_age) {
13710 flow->age = flow_dv_aso_age_alloc
13711 (dev, error);
13712 if (!flow->age)
13713 return -rte_errno;
13714 flow_dv_aso_age_params_init
13715 (dev, flow->age,
13716 non_shared_age->context ?
13717 non_shared_age->context :
13718 (void *)(uintptr_t)
13719 (dev_flow->flow_idx),
13720 non_shared_age->timeout);
13721 }
13722 age_act = flow_aso_age_get_by_idx(dev,
13723 flow->age);
13724 dev_flow->dv.actions[age_act_pos] =
13725 age_act->dr_action;
13726 }
13727 if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13728
13729
13730
13731
13732 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13733 flow, count,
13734 NULL, error);
13735 if (!cnt_act)
13736 return -rte_errno;
13737 dev_flow->dv.actions[actions_n++] =
13738 cnt_act->action;
13739 }
13740 default:
13741 break;
13742 }
13743 if (mhdr_res->actions_num &&
13744 modify_action_position == UINT32_MAX)
13745 modify_action_position = actions_n++;
13746 }
13747 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13748 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13749 int item_type = items->type;
13750
13751 if (!mlx5_flow_os_item_supported(item_type))
13752 return rte_flow_error_set(error, ENOTSUP,
13753 RTE_FLOW_ERROR_TYPE_ITEM,
13754 NULL, "item not supported");
13755 switch (item_type) {
13756 case RTE_FLOW_ITEM_TYPE_ESP:
13757 flow_dv_translate_item_esp(match_mask, match_value,
13758 items, tunnel);
13759 last_item = MLX5_FLOW_ITEM_ESP;
13760 break;
13761 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13762 flow_dv_translate_item_port_id
13763 (dev, match_mask, match_value, items, attr);
13764 last_item = MLX5_FLOW_ITEM_PORT_ID;
13765 break;
13766 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
13767 flow_dv_translate_item_represented_port
13768 (dev, match_mask, match_value, items, attr);
13769 last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
13770 break;
13771 case RTE_FLOW_ITEM_TYPE_ETH:
13772 flow_dv_translate_item_eth(match_mask, match_value,
13773 items, tunnel,
13774 dev_flow->dv.group);
13775 matcher.priority = action_flags &
13776 MLX5_FLOW_ACTION_DEFAULT_MISS &&
13777 !dev_flow->external ?
13778 MLX5_PRIORITY_MAP_L3 :
13779 MLX5_PRIORITY_MAP_L2;
13780 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13781 MLX5_FLOW_LAYER_OUTER_L2;
13782 break;
13783 case RTE_FLOW_ITEM_TYPE_VLAN:
13784 flow_dv_translate_item_vlan(dev_flow,
13785 match_mask, match_value,
13786 items, tunnel,
13787 dev_flow->dv.group);
13788 matcher.priority = MLX5_PRIORITY_MAP_L2;
13789 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13790 MLX5_FLOW_LAYER_INNER_VLAN) :
13791 (MLX5_FLOW_LAYER_OUTER_L2 |
13792 MLX5_FLOW_LAYER_OUTER_VLAN);
13793 break;
13794 case RTE_FLOW_ITEM_TYPE_IPV4:
13795 mlx5_flow_tunnel_ip_check(items, next_protocol,
13796 &item_flags, &tunnel);
13797 flow_dv_translate_item_ipv4(match_mask, match_value,
13798 items, tunnel,
13799 dev_flow->dv.group);
13800 matcher.priority = MLX5_PRIORITY_MAP_L3;
13801 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13802 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13803 if (items->mask != NULL &&
13804 ((const struct rte_flow_item_ipv4 *)
13805 items->mask)->hdr.next_proto_id) {
13806 next_protocol =
13807 ((const struct rte_flow_item_ipv4 *)
13808 (items->spec))->hdr.next_proto_id;
13809 next_protocol &=
13810 ((const struct rte_flow_item_ipv4 *)
13811 (items->mask))->hdr.next_proto_id;
13812 } else {
13813
13814 next_protocol = 0xff;
13815 }
13816 break;
13817 case RTE_FLOW_ITEM_TYPE_IPV6:
13818 mlx5_flow_tunnel_ip_check(items, next_protocol,
13819 &item_flags, &tunnel);
13820 flow_dv_translate_item_ipv6(match_mask, match_value,
13821 items, tunnel,
13822 dev_flow->dv.group);
13823 matcher.priority = MLX5_PRIORITY_MAP_L3;
13824 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13825 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13826 if (items->mask != NULL &&
13827 ((const struct rte_flow_item_ipv6 *)
13828 items->mask)->hdr.proto) {
13829 next_protocol =
13830 ((const struct rte_flow_item_ipv6 *)
13831 items->spec)->hdr.proto;
13832 next_protocol &=
13833 ((const struct rte_flow_item_ipv6 *)
13834 items->mask)->hdr.proto;
13835 } else {
13836
13837 next_protocol = 0xff;
13838 }
13839 break;
13840 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13841 flow_dv_translate_item_ipv6_frag_ext(match_mask,
13842 match_value,
13843 items, tunnel);
13844 last_item = tunnel ?
13845 MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13846 MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13847 if (items->mask != NULL &&
13848 ((const struct rte_flow_item_ipv6_frag_ext *)
13849 items->mask)->hdr.next_header) {
13850 next_protocol =
13851 ((const struct rte_flow_item_ipv6_frag_ext *)
13852 items->spec)->hdr.next_header;
13853 next_protocol &=
13854 ((const struct rte_flow_item_ipv6_frag_ext *)
13855 items->mask)->hdr.next_header;
13856 } else {
13857
13858 next_protocol = 0xff;
13859 }
13860 break;
13861 case RTE_FLOW_ITEM_TYPE_TCP:
13862 flow_dv_translate_item_tcp(match_mask, match_value,
13863 items, tunnel);
13864 matcher.priority = MLX5_PRIORITY_MAP_L4;
13865 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13866 MLX5_FLOW_LAYER_OUTER_L4_TCP;
13867 break;
13868 case RTE_FLOW_ITEM_TYPE_UDP:
13869 flow_dv_translate_item_udp(match_mask, match_value,
13870 items, tunnel);
13871 matcher.priority = MLX5_PRIORITY_MAP_L4;
13872 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13873 MLX5_FLOW_LAYER_OUTER_L4_UDP;
13874 break;
13875 case RTE_FLOW_ITEM_TYPE_GRE:
13876 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13877 last_item = MLX5_FLOW_LAYER_GRE;
13878 tunnel_item = items;
13879 gre_item = items;
13880 break;
13881 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13882 flow_dv_translate_item_gre_key(match_mask,
13883 match_value, items);
13884 last_item = MLX5_FLOW_LAYER_GRE_KEY;
13885 break;
13886 case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
13887 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13888 last_item = MLX5_FLOW_LAYER_GRE;
13889 tunnel_item = items;
13890 break;
13891 case RTE_FLOW_ITEM_TYPE_NVGRE:
13892 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13893 last_item = MLX5_FLOW_LAYER_GRE;
13894 tunnel_item = items;
13895 break;
13896 case RTE_FLOW_ITEM_TYPE_VXLAN:
13897 flow_dv_translate_item_vxlan(dev, attr,
13898 match_mask, match_value,
13899 items, tunnel);
13900 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13901 last_item = MLX5_FLOW_LAYER_VXLAN;
13902 break;
13903 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13904 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13905 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13906 tunnel_item = items;
13907 break;
13908 case RTE_FLOW_ITEM_TYPE_GENEVE:
13909 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13910 last_item = MLX5_FLOW_LAYER_GENEVE;
13911 tunnel_item = items;
13912 break;
13913 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13914 ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13915 match_value,
13916 items, error);
13917 if (ret)
13918 return rte_flow_error_set(error, -ret,
13919 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13920 "cannot create GENEVE TLV option");
13921 flow->geneve_tlv_option = 1;
13922 last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13923 break;
13924 case RTE_FLOW_ITEM_TYPE_MPLS:
13925 flow_dv_translate_item_mpls(match_mask, match_value,
13926 items, last_item, tunnel);
13927 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13928 last_item = MLX5_FLOW_LAYER_MPLS;
13929 break;
13930 case RTE_FLOW_ITEM_TYPE_MARK:
13931 flow_dv_translate_item_mark(dev, match_mask,
13932 match_value, items);
13933 last_item = MLX5_FLOW_ITEM_MARK;
13934 break;
13935 case RTE_FLOW_ITEM_TYPE_META:
13936 flow_dv_translate_item_meta(dev, match_mask,
13937 match_value, attr, items);
13938 last_item = MLX5_FLOW_ITEM_METADATA;
13939 break;
13940 case RTE_FLOW_ITEM_TYPE_ICMP:
13941 flow_dv_translate_item_icmp(match_mask, match_value,
13942 items, tunnel);
13943 matcher.priority = MLX5_PRIORITY_MAP_L4;
13944 last_item = MLX5_FLOW_LAYER_ICMP;
13945 break;
13946 case RTE_FLOW_ITEM_TYPE_ICMP6:
13947 flow_dv_translate_item_icmp6(match_mask, match_value,
13948 items, tunnel);
13949 matcher.priority = MLX5_PRIORITY_MAP_L4;
13950 last_item = MLX5_FLOW_LAYER_ICMP6;
13951 break;
13952 case RTE_FLOW_ITEM_TYPE_TAG:
13953 flow_dv_translate_item_tag(dev, match_mask,
13954 match_value, items);
13955 last_item = MLX5_FLOW_ITEM_TAG;
13956 break;
13957 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13958 flow_dv_translate_mlx5_item_tag(dev, match_mask,
13959 match_value, items);
13960 last_item = MLX5_FLOW_ITEM_TAG;
13961 break;
13962 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13963 flow_dv_translate_item_tx_queue(dev, match_mask,
13964 match_value,
13965 items);
13966 last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13967 break;
13968 case RTE_FLOW_ITEM_TYPE_GTP:
13969 flow_dv_translate_item_gtp(match_mask, match_value,
13970 items, tunnel);
13971 matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13972 last_item = MLX5_FLOW_LAYER_GTP;
13973 break;
13974 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13975 ret = flow_dv_translate_item_gtp_psc(match_mask,
13976 match_value,
13977 items);
13978 if (ret)
13979 return rte_flow_error_set(error, -ret,
13980 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13981 "cannot create GTP PSC item");
13982 last_item = MLX5_FLOW_LAYER_GTP_PSC;
13983 break;
13984 case RTE_FLOW_ITEM_TYPE_ECPRI:
13985 if (!mlx5_flex_parser_ecpri_exist(dev)) {
13986
13987 ret = mlx5_flex_parser_ecpri_alloc(dev);
13988 if (ret)
13989 return rte_flow_error_set
13990 (error, -ret,
13991 RTE_FLOW_ERROR_TYPE_ITEM,
13992 NULL,
13993 "cannot create eCPRI parser");
13994 }
13995 flow_dv_translate_item_ecpri(dev, match_mask,
13996 match_value, items,
13997 last_item);
13998
13999 last_item = MLX5_FLOW_LAYER_ECPRI;
14000 break;
14001 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
14002 flow_dv_translate_item_integrity(items, integrity_items,
14003 &last_item);
14004 break;
14005 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
14006 flow_dv_translate_item_aso_ct(dev, match_mask,
14007 match_value, items);
14008 break;
14009 case RTE_FLOW_ITEM_TYPE_FLEX:
14010 flow_dv_translate_item_flex(dev, match_mask,
14011 match_value, items,
14012 dev_flow, tunnel != 0);
14013 last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
14014 MLX5_FLOW_ITEM_OUTER_FLEX;
14015 break;
14016 default:
14017 break;
14018 }
14019 item_flags |= last_item;
14020 }
14021
14022
14023
14024
14025
14026
14027
14028
14029 if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
14030 !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&
14031 !(attr->egress && !attr->transfer)) {
14032 if (flow_dv_translate_item_port_id(dev, match_mask,
14033 match_value, NULL, attr))
14034 return -rte_errno;
14035 }
14036 if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
14037 flow_dv_translate_item_integrity_post(match_mask, match_value,
14038 integrity_items,
14039 item_flags);
14040 }
14041 if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
14042 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
14043 tunnel_item, item_flags);
14044 else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
14045 flow_dv_translate_item_geneve(match_mask, match_value,
14046 tunnel_item, item_flags);
14047 else if (item_flags & MLX5_FLOW_LAYER_GRE) {
14048 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
14049 flow_dv_translate_item_gre(match_mask, match_value,
14050 tunnel_item, item_flags);
14051 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
14052 flow_dv_translate_item_nvgre(match_mask, match_value,
14053 tunnel_item, item_flags);
14054 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
14055 flow_dv_translate_item_gre_option(match_mask, match_value,
14056 tunnel_item, gre_item, item_flags);
14057 else
14058 MLX5_ASSERT(false);
14059 }
14060#ifdef RTE_LIBRTE_MLX5_DEBUG
14061 MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
14062 dev_flow->dv.value.buf));
14063#endif
14064
14065
14066
14067
14068 handle->layers |= item_flags;
14069 if (action_flags & MLX5_FLOW_ACTION_RSS)
14070 flow_dv_hashfields_set(dev_flow->handle->layers,
14071 rss_desc,
14072 &dev_flow->hash_fields);
14073
14074
14075
14076 if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
14077 ret = flow_dv_translate_action_sample(dev,
14078 sample,
14079 dev_flow, attr,
14080 &num_of_dest,
14081 sample_actions,
14082 &sample_res,
14083 error);
14084 if (ret < 0)
14085 return ret;
14086 ret = flow_dv_create_action_sample(dev,
14087 dev_flow,
14088 num_of_dest,
14089 &sample_res,
14090 &mdest_res,
14091 sample_actions,
14092 action_flags,
14093 error);
14094 if (ret < 0)
14095 return rte_flow_error_set
14096 (error, rte_errno,
14097 RTE_FLOW_ERROR_TYPE_ACTION,
14098 NULL,
14099 "cannot create sample action");
14100 if (num_of_dest > 1) {
14101 dev_flow->dv.actions[sample_act_pos] =
14102 dev_flow->dv.dest_array_res->action;
14103 } else {
14104 dev_flow->dv.actions[sample_act_pos] =
14105 dev_flow->dv.sample_res->verbs_action;
14106 }
14107 }
14108
14109
14110
14111
14112
14113
14114 if (num_of_dest > 1 &&
14115 (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
14116 int i;
14117 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
14118
14119 for (i = 0; i < actions_n; i++) {
14120 if ((sample_act->dr_encap_action &&
14121 sample_act->dr_encap_action ==
14122 dev_flow->dv.actions[i]) ||
14123 (sample_act->dr_port_id_action &&
14124 sample_act->dr_port_id_action ==
14125 dev_flow->dv.actions[i]) ||
14126 (sample_act->dr_jump_action &&
14127 sample_act->dr_jump_action ==
14128 dev_flow->dv.actions[i]))
14129 continue;
14130 temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
14131 }
14132 memcpy((void *)dev_flow->dv.actions,
14133 (void *)temp_actions,
14134 tmp_actions_n * sizeof(void *));
14135 actions_n = tmp_actions_n;
14136 }
14137 dev_flow->dv.actions_n = actions_n;
14138 dev_flow->act_flags = action_flags;
14139 if (wks->skip_matcher_reg)
14140 return 0;
14141
14142 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
14143 matcher.mask.size);
14144 matcher.priority = mlx5_get_matcher_priority(dev, attr,
14145 matcher.priority,
14146 dev_flow->external);
14147
14148
14149
14150
14151
14152 if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
14153 dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
14154 matcher.priority <= MLX5_REG_BITS)
14155 matcher.priority += MLX5_REG_BITS;
14156
14157 tbl_key.is_fdb = attr->transfer;
14158 tbl_key.is_egress = attr->egress;
14159 tbl_key.level = dev_flow->dv.group;
14160 tbl_key.id = dev_flow->dv.table_id;
14161 if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
14162 tunnel, attr->group, error))
14163 return -rte_errno;
14164 return 0;
14165}
14166
14167
14168
14169
14170
14171
14172
14173
14174
14175
14176
14177
14178
14179
14180
14181
14182
14183static int
14184__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
14185 const uint64_t hash_fields,
14186 uint32_t hrxq_idx)
14187{
14188 uint32_t *hrxqs = action->hrxq;
14189
14190 switch (hash_fields & ~IBV_RX_HASH_INNER) {
14191 case MLX5_RSS_HASH_IPV4:
14192
14193 case MLX5_RSS_HASH_IPV4_DST_ONLY:
14194
14195 case MLX5_RSS_HASH_IPV4_SRC_ONLY:
14196 hrxqs[0] = hrxq_idx;
14197 return 0;
14198 case MLX5_RSS_HASH_IPV4_TCP:
14199
14200 case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
14201
14202 case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
14203 hrxqs[1] = hrxq_idx;
14204 return 0;
14205 case MLX5_RSS_HASH_IPV4_UDP:
14206
14207 case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
14208
14209 case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
14210 hrxqs[2] = hrxq_idx;
14211 return 0;
14212 case MLX5_RSS_HASH_IPV6:
14213
14214 case MLX5_RSS_HASH_IPV6_DST_ONLY:
14215
14216 case MLX5_RSS_HASH_IPV6_SRC_ONLY:
14217 hrxqs[3] = hrxq_idx;
14218 return 0;
14219 case MLX5_RSS_HASH_IPV6_TCP:
14220
14221 case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
14222
14223 case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
14224 hrxqs[4] = hrxq_idx;
14225 return 0;
14226 case MLX5_RSS_HASH_IPV6_UDP:
14227
14228 case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
14229
14230 case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
14231 hrxqs[5] = hrxq_idx;
14232 return 0;
14233 case MLX5_RSS_HASH_NONE:
14234 hrxqs[6] = hrxq_idx;
14235 return 0;
14236 case MLX5_RSS_HASH_IPV4_ESP:
14237 hrxqs[7] = hrxq_idx;
14238 return 0;
14239 case MLX5_RSS_HASH_IPV6_ESP:
14240 hrxqs[8] = hrxq_idx;
14241 return 0;
14242 case MLX5_RSS_HASH_ESP_SPI:
14243 hrxqs[9] = hrxq_idx;
14244 return 0;
14245 default:
14246 return -1;
14247 }
14248}
14249
14250
14251
14252
14253
14254
14255
14256
14257
14258
14259
14260
14261
14262
14263
14264
14265
14266uint32_t
14267flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
14268 const uint64_t hash_fields)
14269{
14270 struct mlx5_priv *priv = dev->data->dev_private;
14271 struct mlx5_shared_action_rss *shared_rss =
14272 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14273 const uint32_t *hrxqs = shared_rss->hrxq;
14274
14275 switch (hash_fields & ~IBV_RX_HASH_INNER) {
14276 case MLX5_RSS_HASH_IPV4:
14277
14278 case MLX5_RSS_HASH_IPV4_DST_ONLY:
14279
14280 case MLX5_RSS_HASH_IPV4_SRC_ONLY:
14281 return hrxqs[0];
14282 case MLX5_RSS_HASH_IPV4_TCP:
14283
14284 case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
14285
14286 case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
14287 return hrxqs[1];
14288 case MLX5_RSS_HASH_IPV4_UDP:
14289
14290 case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
14291
14292 case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
14293 return hrxqs[2];
14294 case MLX5_RSS_HASH_IPV6:
14295
14296 case MLX5_RSS_HASH_IPV6_DST_ONLY:
14297
14298 case MLX5_RSS_HASH_IPV6_SRC_ONLY:
14299 return hrxqs[3];
14300 case MLX5_RSS_HASH_IPV6_TCP:
14301
14302 case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
14303
14304 case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
14305 return hrxqs[4];
14306 case MLX5_RSS_HASH_IPV6_UDP:
14307
14308 case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
14309
14310 case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
14311 return hrxqs[5];
14312 case MLX5_RSS_HASH_NONE:
14313 return hrxqs[6];
14314 case MLX5_RSS_HASH_IPV4_ESP:
14315 return hrxqs[7];
14316 case MLX5_RSS_HASH_IPV6_ESP:
14317 return hrxqs[8];
14318 case MLX5_RSS_HASH_ESP_SPI:
14319 return hrxqs[9];
14320 default:
14321 return 0;
14322 }
14323
14324}
14325
14326
14327
14328
14329
14330
14331
14332
14333
14334
14335
14336
14337
14338
14339
14340static int
14341flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
14342 struct rte_flow_error *error)
14343{
14344 struct mlx5_flow_dv_workspace *dv;
14345 struct mlx5_flow_handle *dh;
14346 struct mlx5_flow_handle_dv *dv_h;
14347 struct mlx5_flow *dev_flow;
14348 struct mlx5_priv *priv = dev->data->dev_private;
14349 uint32_t handle_idx;
14350 int n;
14351 int err;
14352 int idx;
14353 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
14354 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
14355 uint8_t misc_mask;
14356
14357 MLX5_ASSERT(wks);
14358 for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
14359 dev_flow = &wks->flows[idx];
14360 dv = &dev_flow->dv;
14361 dh = dev_flow->handle;
14362 dv_h = &dh->dvh;
14363 n = dv->actions_n;
14364 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
14365 if (dv->transfer) {
14366 MLX5_ASSERT(priv->sh->dr_drop_action);
14367 dv->actions[n++] = priv->sh->dr_drop_action;
14368 } else {
14369#ifdef HAVE_MLX5DV_DR
14370
14371 MLX5_ASSERT(priv->sh->dr_drop_action);
14372 dv->actions[n++] = dv->group ?
14373 priv->sh->dr_drop_action :
14374 priv->root_drop_action;
14375#else
14376
14377 MLX5_ASSERT(priv->drop_queue.hrxq);
14378 dv->actions[n++] =
14379 priv->drop_queue.hrxq->action;
14380#endif
14381 }
14382 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
14383 !dv_h->rix_sample && !dv_h->rix_dest_array)) {
14384 struct mlx5_hrxq *hrxq;
14385 uint32_t hrxq_idx;
14386
14387 hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
14388 &hrxq_idx);
14389 if (!hrxq) {
14390 rte_flow_error_set
14391 (error, rte_errno,
14392 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14393 "cannot get hash queue");
14394 goto error;
14395 }
14396 dh->rix_hrxq = hrxq_idx;
14397 dv->actions[n++] = hrxq->action;
14398 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14399 struct mlx5_hrxq *hrxq = NULL;
14400 uint32_t hrxq_idx;
14401
14402 hrxq_idx = flow_dv_action_rss_hrxq_lookup(dev,
14403 rss_desc->shared_rss,
14404 dev_flow->hash_fields);
14405 if (hrxq_idx)
14406 hrxq = mlx5_ipool_get
14407 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
14408 hrxq_idx);
14409 if (!hrxq) {
14410 rte_flow_error_set
14411 (error, rte_errno,
14412 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14413 "cannot get hash queue");
14414 goto error;
14415 }
14416 dh->rix_srss = rss_desc->shared_rss;
14417 dv->actions[n++] = hrxq->action;
14418 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
14419 if (!priv->sh->default_miss_action) {
14420 rte_flow_error_set
14421 (error, rte_errno,
14422 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14423 "default miss action not be created.");
14424 goto error;
14425 }
14426 dv->actions[n++] = priv->sh->default_miss_action;
14427 }
14428 misc_mask = flow_dv_matcher_enable(dv->value.buf);
14429 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
14430 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
14431 (void *)&dv->value, n,
14432 dv->actions, &dh->drv_flow);
14433 if (err) {
14434 rte_flow_error_set
14435 (error, errno,
14436 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14437 NULL,
14438 (!priv->sh->config.allow_duplicate_pattern &&
14439 errno == EEXIST) ?
14440 "duplicating pattern is not allowed" :
14441 "hardware refuses to create flow");
14442 goto error;
14443 }
14444 if (priv->vmwa_context &&
14445 dh->vf_vlan.tag && !dh->vf_vlan.created) {
14446
14447
14448
14449
14450
14451
14452 mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14453 }
14454 }
14455 return 0;
14456error:
14457 err = rte_errno;
14458 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14459 handle_idx, dh, next) {
14460
14461 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14462 mlx5_hrxq_release(dev, dh->rix_hrxq);
14463 dh->rix_hrxq = 0;
14464 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14465 dh->rix_srss = 0;
14466 }
14467 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14468 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14469 }
14470 rte_errno = err;
14471 return -rte_errno;
14472}
14473
14474void
14475flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14476 struct mlx5_list_entry *entry)
14477{
14478 struct mlx5_flow_dv_matcher *resource = container_of(entry,
14479 typeof(*resource),
14480 entry);
14481
14482 claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14483 mlx5_free(resource);
14484}
14485
14486
14487
14488
14489
14490
14491
14492
14493
14494
14495
14496
14497static int
14498flow_dv_matcher_release(struct rte_eth_dev *dev,
14499 struct mlx5_flow_handle *handle)
14500{
14501 struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14502 struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14503 typeof(*tbl), tbl);
14504 int ret;
14505
14506 MLX5_ASSERT(matcher->matcher_object);
14507 ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14508 flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14509 return ret;
14510}
14511
14512void
14513flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14514{
14515 struct mlx5_dev_ctx_shared *sh = tool_ctx;
14516 struct mlx5_flow_dv_encap_decap_resource *res =
14517 container_of(entry, typeof(*res), entry);
14518
14519 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14520 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14521}
14522
14523
14524
14525
14526
14527
14528
14529
14530
14531
14532
14533
14534static int
14535flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14536 uint32_t encap_decap_idx)
14537{
14538 struct mlx5_priv *priv = dev->data->dev_private;
14539 struct mlx5_flow_dv_encap_decap_resource *resource;
14540
14541 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14542 encap_decap_idx);
14543 if (!resource)
14544 return 0;
14545 MLX5_ASSERT(resource->action);
14546 return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14547}
14548
14549
14550
14551
14552
14553
14554
14555
14556
14557
14558
14559
14560static int
14561flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14562 uint32_t rix_jump)
14563{
14564 struct mlx5_priv *priv = dev->data->dev_private;
14565 struct mlx5_flow_tbl_data_entry *tbl_data;
14566
14567 tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14568 rix_jump);
14569 if (!tbl_data)
14570 return 0;
14571 return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14572}
14573
14574void
14575flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14576{
14577 struct mlx5_flow_dv_modify_hdr_resource *res =
14578 container_of(entry, typeof(*res), entry);
14579 struct mlx5_dev_ctx_shared *sh = tool_ctx;
14580
14581 claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14582 mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14583}
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596static int
14597flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14598 struct mlx5_flow_handle *handle)
14599{
14600 struct mlx5_priv *priv = dev->data->dev_private;
14601 struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14602
14603 MLX5_ASSERT(entry->action);
14604 return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14605}
14606
14607void
14608flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14609{
14610 struct mlx5_dev_ctx_shared *sh = tool_ctx;
14611 struct mlx5_flow_dv_port_id_action_resource *resource =
14612 container_of(entry, typeof(*resource), entry);
14613
14614 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14615 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14616}
14617
14618
14619
14620
14621
14622
14623
14624
14625
14626
14627
14628
14629static int
14630flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14631 uint32_t port_id)
14632{
14633 struct mlx5_priv *priv = dev->data->dev_private;
14634 struct mlx5_flow_dv_port_id_action_resource *resource;
14635
14636 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14637 if (!resource)
14638 return 0;
14639 MLX5_ASSERT(resource->action);
14640 return mlx5_list_unregister(priv->sh->port_id_action_list,
14641 &resource->entry);
14642}
14643
14644
14645
14646
14647
14648
14649
14650
14651
14652static void
14653flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14654{
14655 struct mlx5_priv *priv = dev->data->dev_private;
14656 struct mlx5_shared_action_rss *shared_rss;
14657
14658 shared_rss = mlx5_ipool_get
14659 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14660 __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14661}
14662
14663void
14664flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14665{
14666 struct mlx5_dev_ctx_shared *sh = tool_ctx;
14667 struct mlx5_flow_dv_push_vlan_action_resource *resource =
14668 container_of(entry, typeof(*resource), entry);
14669
14670 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14671 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14672}
14673
14674
14675
14676
14677
14678
14679
14680
14681
14682
14683
14684
14685static int
14686flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14687 struct mlx5_flow_handle *handle)
14688{
14689 struct mlx5_priv *priv = dev->data->dev_private;
14690 struct mlx5_flow_dv_push_vlan_action_resource *resource;
14691 uint32_t idx = handle->dvh.rix_push_vlan;
14692
14693 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14694 if (!resource)
14695 return 0;
14696 MLX5_ASSERT(resource->action);
14697 return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14698 &resource->entry);
14699}
14700
14701
14702
14703
14704
14705
14706
14707
14708
14709static void
14710flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14711 struct mlx5_flow_handle *handle)
14712{
14713 if (!handle->rix_fate)
14714 return;
14715 switch (handle->fate_action) {
14716 case MLX5_FLOW_FATE_QUEUE:
14717 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14718 mlx5_hrxq_release(dev, handle->rix_hrxq);
14719 break;
14720 case MLX5_FLOW_FATE_JUMP:
14721 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14722 break;
14723 case MLX5_FLOW_FATE_PORT_ID:
14724 flow_dv_port_id_action_resource_release(dev,
14725 handle->rix_port_id_action);
14726 break;
14727 default:
14728 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14729 break;
14730 }
14731 handle->rix_fate = 0;
14732}
14733
14734void
14735flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14736 struct mlx5_list_entry *entry)
14737{
14738 struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14739 typeof(*resource),
14740 entry);
14741 struct rte_eth_dev *dev = resource->dev;
14742 struct mlx5_priv *priv = dev->data->dev_private;
14743
14744 if (resource->verbs_action)
14745 claim_zero(mlx5_flow_os_destroy_flow_action
14746 (resource->verbs_action));
14747 if (resource->normal_path_tbl)
14748 flow_dv_tbl_resource_release(MLX5_SH(dev),
14749 resource->normal_path_tbl);
14750 flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14751 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14752 DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14753}
14754
14755
14756
14757
14758
14759
14760
14761
14762
14763
14764
14765
14766static int
14767flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14768 struct mlx5_flow_handle *handle)
14769{
14770 struct mlx5_priv *priv = dev->data->dev_private;
14771 struct mlx5_flow_dv_sample_resource *resource;
14772
14773 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14774 handle->dvh.rix_sample);
14775 if (!resource)
14776 return 0;
14777 MLX5_ASSERT(resource->verbs_action);
14778 return mlx5_list_unregister(priv->sh->sample_action_list,
14779 &resource->entry);
14780}
14781
14782void
14783flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14784 struct mlx5_list_entry *entry)
14785{
14786 struct mlx5_flow_dv_dest_array_resource *resource =
14787 container_of(entry, typeof(*resource), entry);
14788 struct rte_eth_dev *dev = resource->dev;
14789 struct mlx5_priv *priv = dev->data->dev_private;
14790 uint32_t i = 0;
14791
14792 MLX5_ASSERT(resource->action);
14793 if (resource->action)
14794 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14795 for (; i < resource->num_of_dest; i++)
14796 flow_dv_sample_sub_actions_release(dev,
14797 &resource->sample_idx[i]);
14798 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14799 DRV_LOG(DEBUG, "destination array resource %p: removed",
14800 (void *)resource);
14801}
14802
14803
14804
14805
14806
14807
14808
14809
14810
14811
14812
14813
14814static int
14815flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14816 struct mlx5_flow_handle *handle)
14817{
14818 struct mlx5_priv *priv = dev->data->dev_private;
14819 struct mlx5_flow_dv_dest_array_resource *resource;
14820
14821 resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14822 handle->dvh.rix_dest_array);
14823 if (!resource)
14824 return 0;
14825 MLX5_ASSERT(resource->action);
14826 return mlx5_list_unregister(priv->sh->dest_array_list,
14827 &resource->entry);
14828}
14829
14830static void
14831flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14832{
14833 struct mlx5_priv *priv = dev->data->dev_private;
14834 struct mlx5_dev_ctx_shared *sh = priv->sh;
14835 struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14836 sh->geneve_tlv_option_resource;
14837 rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14838 if (geneve_opt_resource) {
14839 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14840 __ATOMIC_RELAXED))) {
14841 claim_zero(mlx5_devx_cmd_destroy
14842 (geneve_opt_resource->obj));
14843 mlx5_free(sh->geneve_tlv_option_resource);
14844 sh->geneve_tlv_option_resource = NULL;
14845 }
14846 }
14847 rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14848}
14849
14850
14851
14852
14853
14854
14855
14856
14857
14858
14859static void
14860flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14861{
14862 struct mlx5_flow_handle *dh;
14863 uint32_t handle_idx;
14864 struct mlx5_priv *priv = dev->data->dev_private;
14865
14866 if (!flow)
14867 return;
14868 handle_idx = flow->dev_handles;
14869 while (handle_idx) {
14870 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14871 handle_idx);
14872 if (!dh)
14873 return;
14874 if (dh->drv_flow) {
14875 claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14876 dh->drv_flow = NULL;
14877 }
14878 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14879 flow_dv_fate_resource_release(dev, dh);
14880 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14881 mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14882 handle_idx = dh->next.next;
14883 }
14884}
14885
14886
14887
14888
14889
14890
14891
14892
14893
14894
14895static void
14896flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14897{
14898 struct mlx5_flow_handle *dev_handle;
14899 struct mlx5_priv *priv = dev->data->dev_private;
14900 struct mlx5_flow_meter_info *fm = NULL;
14901 uint32_t srss = 0;
14902
14903 if (!flow)
14904 return;
14905 flow_dv_remove(dev, flow);
14906 if (flow->counter) {
14907 flow_dv_counter_free(dev, flow->counter);
14908 flow->counter = 0;
14909 }
14910 if (flow->meter) {
14911 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14912 if (fm)
14913 mlx5_flow_meter_detach(priv, fm);
14914 flow->meter = 0;
14915 }
14916
14917 if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14918 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14919 else if (flow->age)
14920 flow_dv_aso_age_release(dev, flow->age);
14921 if (flow->geneve_tlv_option) {
14922 flow_dv_geneve_tlv_option_resource_release(dev);
14923 flow->geneve_tlv_option = 0;
14924 }
14925 while (flow->dev_handles) {
14926 uint32_t tmp_idx = flow->dev_handles;
14927
14928 dev_handle = mlx5_ipool_get(priv->sh->ipool
14929 [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14930 if (!dev_handle)
14931 return;
14932 flow->dev_handles = dev_handle->next.next;
14933 while (dev_handle->flex_item) {
14934 int index = rte_bsf32(dev_handle->flex_item);
14935
14936 mlx5_flex_release_index(dev, index);
14937 dev_handle->flex_item &= ~(uint8_t)RTE_BIT32(index);
14938 }
14939 if (dev_handle->dvh.matcher)
14940 flow_dv_matcher_release(dev, dev_handle);
14941 if (dev_handle->dvh.rix_sample)
14942 flow_dv_sample_resource_release(dev, dev_handle);
14943 if (dev_handle->dvh.rix_dest_array)
14944 flow_dv_dest_array_resource_release(dev, dev_handle);
14945 if (dev_handle->dvh.rix_encap_decap)
14946 flow_dv_encap_decap_resource_release(dev,
14947 dev_handle->dvh.rix_encap_decap);
14948 if (dev_handle->dvh.modify_hdr)
14949 flow_dv_modify_hdr_resource_release(dev, dev_handle);
14950 if (dev_handle->dvh.rix_push_vlan)
14951 flow_dv_push_vlan_action_resource_release(dev,
14952 dev_handle);
14953 if (dev_handle->dvh.rix_tag)
14954 flow_dv_tag_release(dev,
14955 dev_handle->dvh.rix_tag);
14956 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14957 flow_dv_fate_resource_release(dev, dev_handle);
14958 else if (!srss)
14959 srss = dev_handle->rix_srss;
14960 if (fm && dev_handle->is_meter_flow_id &&
14961 dev_handle->split_flow_id)
14962 mlx5_ipool_free(fm->flow_ipool,
14963 dev_handle->split_flow_id);
14964 else if (dev_handle->split_flow_id &&
14965 !dev_handle->is_meter_flow_id)
14966 mlx5_ipool_free(priv->sh->ipool
14967 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14968 dev_handle->split_flow_id);
14969 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14970 tmp_idx);
14971 }
14972 if (srss)
14973 flow_dv_shared_rss_action_release(dev, srss);
14974}
14975
14976
14977
14978
14979
14980
14981
14982
14983
14984
14985
14986
14987
14988
14989static int
14990__flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14991 uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14992{
14993 size_t i;
14994 int remaining = 0;
14995
14996 for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14997 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14998
14999 if (!ret)
15000 (*hrxqs)[i] = 0;
15001 remaining += ret;
15002 }
15003 return remaining;
15004}
15005
15006
15007
15008
15009
15010
15011
15012
15013
15014
15015
15016
15017
15018
15019static int
15020__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
15021 struct mlx5_shared_action_rss *shared_rss)
15022{
15023 return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
15024}
15025
15026
15027
15028
15029
15030
15031
15032
15033
15034
15035
15036
15037
15038
15039
15040
15041
15042
15043
15044void
15045flow_dv_action_rss_l34_hash_adjust(uint64_t orig_rss_types,
15046 uint64_t *hash_field)
15047{
15048 uint64_t rss_types = rte_eth_rss_hf_refine(orig_rss_types);
15049
15050 switch (*hash_field & ~IBV_RX_HASH_INNER) {
15051 case MLX5_RSS_HASH_IPV4:
15052 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
15053 *hash_field &= ~MLX5_RSS_HASH_IPV4;
15054 if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
15055 *hash_field |= IBV_RX_HASH_DST_IPV4;
15056 else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
15057 *hash_field |= IBV_RX_HASH_SRC_IPV4;
15058 else
15059 *hash_field |= MLX5_RSS_HASH_IPV4;
15060 }
15061 return;
15062 case MLX5_RSS_HASH_IPV6:
15063 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
15064 *hash_field &= ~MLX5_RSS_HASH_IPV6;
15065 if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
15066 *hash_field |= IBV_RX_HASH_DST_IPV6;
15067 else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
15068 *hash_field |= IBV_RX_HASH_SRC_IPV6;
15069 else
15070 *hash_field |= MLX5_RSS_HASH_IPV6;
15071 }
15072 return;
15073 case MLX5_RSS_HASH_IPV4_UDP:
15074
15075 case MLX5_RSS_HASH_IPV6_UDP:
15076 if (rss_types & RTE_ETH_RSS_UDP) {
15077 *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
15078 if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
15079 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
15080 else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
15081 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
15082 else
15083 *hash_field |= MLX5_UDP_IBV_RX_HASH;
15084 }
15085 return;
15086 case MLX5_RSS_HASH_IPV4_TCP:
15087
15088 case MLX5_RSS_HASH_IPV6_TCP:
15089 if (rss_types & RTE_ETH_RSS_TCP) {
15090 *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
15091 if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
15092 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
15093 else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
15094 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
15095 else
15096 *hash_field |= MLX5_TCP_IBV_RX_HASH;
15097 }
15098 return;
15099 default:
15100 return;
15101 }
15102}
15103
15104
15105
15106
15107
15108
15109
15110
15111
15112
15113
15114
15115
15116
15117
15118
15119
15120
15121
15122static int
15123__flow_dv_action_rss_setup(struct rte_eth_dev *dev,
15124 uint32_t action_idx,
15125 struct mlx5_shared_action_rss *shared_rss,
15126 struct rte_flow_error *error)
15127{
15128 struct mlx5_priv *priv = dev->data->dev_private;
15129 struct mlx5_flow_rss_desc rss_desc = { 0 };
15130 size_t i;
15131 int err;
15132
15133 shared_rss->ind_tbl = mlx5_ind_table_obj_new
15134 (dev, shared_rss->origin.queue,
15135 shared_rss->origin.queue_num,
15136 true,
15137 !!dev->data->dev_started);
15138 if (!shared_rss->ind_tbl)
15139 return rte_flow_error_set(error, rte_errno,
15140 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15141 "cannot setup indirection table");
15142 memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
15143 rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
15144 rss_desc.const_q = shared_rss->origin.queue;
15145 rss_desc.queue_num = shared_rss->origin.queue_num;
15146
15147 rss_desc.shared_rss = action_idx;
15148 rss_desc.ind_tbl = shared_rss->ind_tbl;
15149 if (priv->sh->config.dv_flow_en == 2)
15150 rss_desc.hws_flags = MLX5DR_ACTION_FLAG_HWS_RX;
15151 for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
15152 struct mlx5_hrxq *hrxq;
15153 uint64_t hash_fields = mlx5_rss_hash_fields[i];
15154 int tunnel = 0;
15155
15156 flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,
15157 &hash_fields);
15158 if (shared_rss->origin.level > 1) {
15159 hash_fields |= IBV_RX_HASH_INNER;
15160 tunnel = 1;
15161 }
15162 rss_desc.tunnel = tunnel;
15163 rss_desc.hash_fields = hash_fields;
15164 hrxq = mlx5_hrxq_get(dev, &rss_desc);
15165 if (!hrxq) {
15166 rte_flow_error_set
15167 (error, rte_errno,
15168 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15169 "cannot get hash queue");
15170 goto error_hrxq_new;
15171 }
15172 err = __flow_dv_action_rss_hrxq_set
15173 (shared_rss, hash_fields, hrxq->idx);
15174 MLX5_ASSERT(!err);
15175 }
15176 return 0;
15177error_hrxq_new:
15178 err = rte_errno;
15179 __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
15180 if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
15181 shared_rss->ind_tbl = NULL;
15182 rte_errno = err;
15183 return -rte_errno;
15184}
15185
15186
15187
15188
15189
15190
15191
15192
15193
15194
15195
15196
15197
15198
15199
15200
15201
15202
15203static uint32_t
15204__flow_dv_action_rss_create(struct rte_eth_dev *dev,
15205 const struct rte_flow_indir_action_conf *conf,
15206 const struct rte_flow_action_rss *rss,
15207 struct rte_flow_error *error)
15208{
15209 struct mlx5_priv *priv = dev->data->dev_private;
15210 struct mlx5_shared_action_rss *shared_rss = NULL;
15211 struct rte_flow_action_rss *origin;
15212 const uint8_t *rss_key;
15213 uint32_t idx;
15214
15215 RTE_SET_USED(conf);
15216 shared_rss = mlx5_ipool_zmalloc
15217 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
15218 if (!shared_rss) {
15219 rte_flow_error_set(error, ENOMEM,
15220 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15221 "cannot allocate resource memory");
15222 goto error_rss_init;
15223 }
15224 if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
15225 rte_flow_error_set(error, E2BIG,
15226 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15227 "rss action number out of range");
15228 goto error_rss_init;
15229 }
15230 origin = &shared_rss->origin;
15231 origin->func = rss->func;
15232 origin->level = rss->level;
15233
15234 origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
15235
15236 rss_key = !rss->key ? rss_hash_default_key : rss->key;
15237 memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
15238 origin->key = &shared_rss->key[0];
15239 origin->key_len = MLX5_RSS_HASH_KEY_LEN;
15240 origin->queue = rss->queue;
15241 origin->queue_num = rss->queue_num;
15242 if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
15243 goto error_rss_init;
15244
15245 origin->queue = shared_rss->ind_tbl->queues;
15246 rte_spinlock_init(&shared_rss->action_rss_sl);
15247 __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
15248 rte_spinlock_lock(&priv->shared_act_sl);
15249 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15250 &priv->rss_shared_actions, idx, shared_rss, next);
15251 rte_spinlock_unlock(&priv->shared_act_sl);
15252 return idx;
15253error_rss_init:
15254 if (shared_rss) {
15255 if (shared_rss->ind_tbl)
15256 mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
15257 !!dev->data->dev_started);
15258 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15259 idx);
15260 }
15261 return 0;
15262}
15263
15264
15265
15266
15267
15268
15269
15270
15271
15272
15273
15274
15275
15276
15277
15278
15279static int
15280__flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
15281 struct rte_flow_error *error)
15282{
15283 struct mlx5_priv *priv = dev->data->dev_private;
15284 struct mlx5_shared_action_rss *shared_rss =
15285 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15286 uint32_t old_refcnt = 1;
15287 int remaining;
15288
15289 if (!shared_rss)
15290 return rte_flow_error_set(error, EINVAL,
15291 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15292 "invalid shared action");
15293 if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
15294 0, 0, __ATOMIC_ACQUIRE,
15295 __ATOMIC_RELAXED))
15296 return rte_flow_error_set(error, EBUSY,
15297 RTE_FLOW_ERROR_TYPE_ACTION,
15298 NULL,
15299 "shared rss has references");
15300 remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
15301 if (remaining)
15302 return rte_flow_error_set(error, EBUSY,
15303 RTE_FLOW_ERROR_TYPE_ACTION,
15304 NULL,
15305 "shared rss hrxq has references");
15306 remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
15307 !!dev->data->dev_started);
15308 if (remaining)
15309 return rte_flow_error_set(error, EBUSY,
15310 RTE_FLOW_ERROR_TYPE_ACTION,
15311 NULL,
15312 "shared rss indirection table has"
15313 " references");
15314 rte_spinlock_lock(&priv->shared_act_sl);
15315 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15316 &priv->rss_shared_actions, idx, shared_rss, next);
15317 rte_spinlock_unlock(&priv->shared_act_sl);
15318 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15319 idx);
15320 return 0;
15321}
15322
15323
15324
15325
15326
15327
15328
15329
15330
15331
15332
15333
15334
15335
15336
15337
15338
15339
15340
15341
15342struct rte_flow_action_handle *
15343flow_dv_action_create(struct rte_eth_dev *dev,
15344 const struct rte_flow_indir_action_conf *conf,
15345 const struct rte_flow_action *action,
15346 struct rte_flow_error *err)
15347{
15348 struct mlx5_priv *priv = dev->data->dev_private;
15349 uint32_t age_idx = 0;
15350 uint32_t idx = 0;
15351 uint32_t ret = 0;
15352
15353 switch (action->type) {
15354 case RTE_FLOW_ACTION_TYPE_RSS:
15355 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
15356 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
15357 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
15358 break;
15359 case RTE_FLOW_ACTION_TYPE_AGE:
15360 age_idx = flow_dv_aso_age_alloc(dev, err);
15361 if (!age_idx) {
15362 ret = -rte_errno;
15363 break;
15364 }
15365 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
15366 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
15367 flow_dv_aso_age_params_init(dev, age_idx,
15368 ((const struct rte_flow_action_age *)
15369 action->conf)->context ?
15370 ((const struct rte_flow_action_age *)
15371 action->conf)->context :
15372 (void *)(uintptr_t)idx,
15373 ((const struct rte_flow_action_age *)
15374 action->conf)->timeout);
15375 ret = age_idx;
15376 break;
15377 case RTE_FLOW_ACTION_TYPE_COUNT:
15378 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
15379 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
15380 MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
15381 break;
15382 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
15383 ret = flow_dv_translate_create_conntrack(dev, action->conf,
15384 err);
15385 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
15386 break;
15387 default:
15388 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
15389 NULL, "action type not supported");
15390 break;
15391 }
15392 return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
15393}
15394
15395
15396
15397
15398
15399
15400
15401
15402
15403
15404
15405
15406
15407
15408
15409
15410
15411
15412int
15413flow_dv_action_destroy(struct rte_eth_dev *dev,
15414 struct rte_flow_action_handle *handle,
15415 struct rte_flow_error *error)
15416{
15417 uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15418 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15419 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15420 struct mlx5_flow_counter *cnt;
15421 uint32_t no_flow_refcnt = 1;
15422 int ret;
15423
15424 switch (type) {
15425 case MLX5_INDIRECT_ACTION_TYPE_RSS:
15426 return __flow_dv_action_rss_release(dev, idx, error);
15427 case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15428 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15429 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15430 &no_flow_refcnt, 1, false,
15431 __ATOMIC_ACQUIRE,
15432 __ATOMIC_RELAXED))
15433 return rte_flow_error_set(error, EBUSY,
15434 RTE_FLOW_ERROR_TYPE_ACTION,
15435 NULL,
15436 "Indirect count action has references");
15437 flow_dv_counter_free(dev, idx);
15438 return 0;
15439 case MLX5_INDIRECT_ACTION_TYPE_AGE:
15440 ret = flow_dv_aso_age_release(dev, idx);
15441 if (ret)
15442
15443
15444
15445
15446 DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15447 " released with references %d.", idx, ret);
15448 return 0;
15449 case MLX5_INDIRECT_ACTION_TYPE_CT:
15450 ret = flow_dv_aso_ct_release(dev, idx, error);
15451 if (ret < 0)
15452 return ret;
15453 if (ret > 0)
15454 DRV_LOG(DEBUG, "Connection tracking object %u still "
15455 "has references %d.", idx, ret);
15456 return 0;
15457 default:
15458 return rte_flow_error_set(error, ENOTSUP,
15459 RTE_FLOW_ERROR_TYPE_ACTION,
15460 NULL,
15461 "action type not supported");
15462 }
15463}
15464
15465
15466
15467
15468
15469
15470
15471
15472
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482static int
15483__flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15484 const struct rte_flow_action_rss *action_conf,
15485 struct rte_flow_error *error)
15486{
15487 struct mlx5_priv *priv = dev->data->dev_private;
15488 struct mlx5_shared_action_rss *shared_rss =
15489 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15490 int ret = 0;
15491 void *queue = NULL;
15492 void *queue_i = NULL;
15493 uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15494 bool dev_started = !!dev->data->dev_started;
15495
15496 if (!shared_rss)
15497 return rte_flow_error_set(error, EINVAL,
15498 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15499 "invalid shared action to update");
15500 if (priv->obj_ops.ind_table_modify == NULL)
15501 return rte_flow_error_set(error, ENOTSUP,
15502 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15503 "cannot modify indirection table");
15504 queue = mlx5_malloc(MLX5_MEM_ZERO,
15505 RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15506 0, SOCKET_ID_ANY);
15507 if (!queue)
15508 return rte_flow_error_set(error, ENOMEM,
15509 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15510 NULL,
15511 "cannot allocate resource memory");
15512 memcpy(queue, action_conf->queue, queue_size);
15513 MLX5_ASSERT(shared_rss->ind_tbl);
15514 rte_spinlock_lock(&shared_rss->action_rss_sl);
15515 queue_i = shared_rss->ind_tbl->queues;
15516 ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15517 queue, action_conf->queue_num,
15518 true ,
15519 dev_started ,
15520 dev_started );
15521 if (ret) {
15522 ret = rte_flow_error_set(error, rte_errno,
15523 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15524 "cannot update indirection table");
15525 } else {
15526
15527 memcpy(queue_i, queue, queue_size);
15528 shared_rss->ind_tbl->queues = queue_i;
15529 shared_rss->origin.queue_num = action_conf->queue_num;
15530 }
15531 mlx5_free(queue);
15532 rte_spinlock_unlock(&shared_rss->action_rss_sl);
15533 return ret;
15534}
15535
15536
15537
15538
15539
15540
15541
15542
15543
15544
15545
15546
15547
15548
15549
15550
15551
15552
15553static int
15554__flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15555 const struct rte_flow_modify_conntrack *update,
15556 struct rte_flow_error *error)
15557{
15558 struct mlx5_priv *priv = dev->data->dev_private;
15559 struct mlx5_aso_ct_action *ct;
15560 const struct rte_flow_action_conntrack *new_prf;
15561 int ret = 0;
15562 uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15563 uint32_t dev_idx;
15564
15565 if (PORT_ID(priv) != owner)
15566 return rte_flow_error_set(error, EACCES,
15567 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15568 NULL,
15569 "CT object owned by another port");
15570 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15571 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15572 if (!ct->refcnt)
15573 return rte_flow_error_set(error, ENOMEM,
15574 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15575 NULL,
15576 "CT object is inactive");
15577 new_prf = &update->new_ct;
15578 if (update->direction)
15579 ct->is_original = !!new_prf->is_original_dir;
15580 if (update->state) {
15581
15582 ret = mlx5_validate_action_ct(dev, new_prf, error);
15583 if (ret)
15584 return ret;
15585 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15586 if (ret)
15587 return rte_flow_error_set(error, EIO,
15588 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15589 NULL,
15590 "Failed to send CT context update WQE");
15591
15592 ret = mlx5_aso_ct_available(priv->sh, ct);
15593 if (ret)
15594 rte_flow_error_set(error, rte_errno,
15595 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15596 NULL,
15597 "Timeout to get the CT update");
15598 }
15599 return ret;
15600}
15601
15602
15603
15604
15605
15606
15607
15608
15609
15610
15611
15612
15613
15614
15615
15616
15617
15618
15619
15620
15621
15622int
15623flow_dv_action_update(struct rte_eth_dev *dev,
15624 struct rte_flow_action_handle *handle,
15625 const void *update,
15626 struct rte_flow_error *err)
15627{
15628 uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15629 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15630 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15631 const void *action_conf;
15632
15633 switch (type) {
15634 case MLX5_INDIRECT_ACTION_TYPE_RSS:
15635 action_conf = ((const struct rte_flow_action *)update)->conf;
15636 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15637 case MLX5_INDIRECT_ACTION_TYPE_CT:
15638 return __flow_dv_action_ct_update(dev, idx, update, err);
15639 default:
15640 return rte_flow_error_set(err, ENOTSUP,
15641 RTE_FLOW_ERROR_TYPE_ACTION,
15642 NULL,
15643 "action type update not supported");
15644 }
15645}
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656static void
15657__flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15658 struct mlx5_flow_meter_sub_policy *sub_policy)
15659{
15660 struct mlx5_priv *priv = dev->data->dev_private;
15661 struct mlx5_flow_tbl_data_entry *tbl;
15662 struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15663 struct mlx5_flow_meter_info *next_fm;
15664 struct mlx5_sub_policy_color_rule *color_rule;
15665 void *tmp;
15666 uint32_t i;
15667
15668 for (i = 0; i < RTE_COLORS; i++) {
15669 next_fm = NULL;
15670 if (i <= RTE_COLOR_YELLOW && policy &&
15671 policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15672 next_fm = mlx5_flow_meter_find(priv,
15673 policy->act_cnt[i].next_mtr_id, NULL);
15674 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15675 next_port, tmp) {
15676 claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15677 tbl = container_of(color_rule->matcher->tbl,
15678 typeof(*tbl), tbl);
15679 mlx5_list_unregister(tbl->matchers,
15680 &color_rule->matcher->entry);
15681 TAILQ_REMOVE(&sub_policy->color_rules[i],
15682 color_rule, next_port);
15683 mlx5_free(color_rule);
15684 if (next_fm)
15685 mlx5_flow_meter_detach(priv, next_fm);
15686 }
15687 }
15688 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15689 if (sub_policy->rix_hrxq[i]) {
15690 if (policy && !policy->is_hierarchy)
15691 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15692 sub_policy->rix_hrxq[i] = 0;
15693 }
15694 if (sub_policy->jump_tbl[i]) {
15695 flow_dv_tbl_resource_release(MLX5_SH(dev),
15696 sub_policy->jump_tbl[i]);
15697 sub_policy->jump_tbl[i] = NULL;
15698 }
15699 }
15700 if (sub_policy->tbl_rsc) {
15701 flow_dv_tbl_resource_release(MLX5_SH(dev),
15702 sub_policy->tbl_rsc);
15703 sub_policy->tbl_rsc = NULL;
15704 }
15705}
15706
15707
15708
15709
15710
15711
15712
15713
15714
15715
15716
15717static void
15718flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15719 struct mlx5_flow_meter_policy *mtr_policy)
15720{
15721 uint32_t i, j;
15722 struct mlx5_flow_meter_sub_policy *sub_policy;
15723 uint16_t sub_policy_num;
15724
15725 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15726 sub_policy_num = (mtr_policy->sub_policy_num >>
15727 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15728 MLX5_MTR_SUB_POLICY_NUM_MASK;
15729 for (j = 0; j < sub_policy_num; j++) {
15730 sub_policy = mtr_policy->sub_policys[i][j];
15731 if (sub_policy)
15732 __flow_dv_destroy_sub_policy_rules(dev,
15733 sub_policy);
15734 }
15735 }
15736}
15737
15738
15739
15740
15741
15742
15743
15744
15745
15746
15747
15748static void
15749flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15750 struct mlx5_flow_meter_policy *mtr_policy)
15751{
15752 struct rte_flow_action *rss_action;
15753 struct mlx5_flow_handle dev_handle;
15754 uint32_t i, j;
15755
15756 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15757 if (mtr_policy->act_cnt[i].rix_mark) {
15758 flow_dv_tag_release(dev,
15759 mtr_policy->act_cnt[i].rix_mark);
15760 mtr_policy->act_cnt[i].rix_mark = 0;
15761 }
15762 if (mtr_policy->act_cnt[i].modify_hdr) {
15763 dev_handle.dvh.modify_hdr =
15764 mtr_policy->act_cnt[i].modify_hdr;
15765 flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15766 }
15767 switch (mtr_policy->act_cnt[i].fate_action) {
15768 case MLX5_FLOW_FATE_SHARED_RSS:
15769 rss_action = mtr_policy->act_cnt[i].rss;
15770 mlx5_free(rss_action);
15771 break;
15772 case MLX5_FLOW_FATE_PORT_ID:
15773 if (mtr_policy->act_cnt[i].rix_port_id_action) {
15774 flow_dv_port_id_action_resource_release(dev,
15775 mtr_policy->act_cnt[i].rix_port_id_action);
15776 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15777 }
15778 break;
15779 case MLX5_FLOW_FATE_DROP:
15780 case MLX5_FLOW_FATE_JUMP:
15781 for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15782 mtr_policy->act_cnt[i].dr_jump_action[j] =
15783 NULL;
15784 break;
15785 default:
15786
15787 break;
15788 }
15789 }
15790 for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15791 mtr_policy->dr_drop_action[j] = NULL;
15792}
15793
15794
15795
15796
15797
15798
15799
15800
15801
15802
15803
15804
15805
15806
15807
15808static int
15809__flow_dv_create_mtr_yellow_action(struct rte_eth_dev *dev,
15810 struct mlx5_flow_meter_info *fm,
15811 struct rte_mtr_error *error)
15812{
15813#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
15814 struct mlx5_priv *priv = dev->data->dev_private;
15815 struct rte_flow_error flow_err;
15816 struct mlx5_aso_mtr *aso_mtr;
15817 struct mlx5_aso_mtr_pool *pool;
15818 uint8_t reg_id;
15819
15820 aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
15821 pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool, mtrs[aso_mtr->offset]);
15822 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15823 fm->meter_action_y =
15824 mlx5_glue->dv_create_flow_action_aso(priv->sh->rx_domain,
15825 pool->devx_obj->obj,
15826 aso_mtr->offset,
15827 (1 << MLX5_FLOW_COLOR_YELLOW),
15828 reg_id - REG_C_0);
15829#else
15830 RTE_SET_USED(dev);
15831#endif
15832 if (!fm->meter_action_y) {
15833 return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15834 "Fail to create yellow meter action.");
15835 }
15836 return 0;
15837}
15838
15839
15840
15841
15842
15843
15844
15845
15846
15847
15848
15849
15850
15851
15852
15853
15854
15855
15856
15857
15858
15859static int
15860__flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15861 struct mlx5_flow_meter_policy *mtr_policy,
15862 const struct rte_flow_action *actions[RTE_COLORS],
15863 struct rte_flow_attr *attr,
15864 enum mlx5_meter_domain domain,
15865 struct rte_mtr_error *error)
15866{
15867 struct mlx5_priv *priv = dev->data->dev_private;
15868 struct rte_flow_error flow_err;
15869 const struct rte_flow_action *act;
15870 uint64_t action_flags;
15871 struct mlx5_flow_handle dh;
15872 struct mlx5_flow dev_flow;
15873 struct mlx5_flow_dv_port_id_action_resource port_id_action;
15874 int i, ret;
15875 uint8_t egress, transfer;
15876 struct mlx5_meter_policy_action_container *act_cnt = NULL;
15877 union {
15878 struct mlx5_flow_dv_modify_hdr_resource res;
15879 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15880 sizeof(struct mlx5_modification_cmd) *
15881 (MLX5_MAX_MODIFY_NUM + 1)];
15882 } mhdr_dummy;
15883 struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15884
15885 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15886 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15887 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15888 memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15889 memset(&port_id_action, 0,
15890 sizeof(struct mlx5_flow_dv_port_id_action_resource));
15891 memset(mhdr_res, 0, sizeof(*mhdr_res));
15892 mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15893 (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15894 MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15895 dev_flow.handle = &dh;
15896 dev_flow.dv.port_id_action = &port_id_action;
15897 dev_flow.external = true;
15898 for (i = 0; i < RTE_COLORS; i++) {
15899 if (i < MLX5_MTR_RTE_COLORS)
15900 act_cnt = &mtr_policy->act_cnt[i];
15901
15902 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15903 (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15904 continue;
15905 action_flags = 0;
15906 for (act = actions[i];
15907 act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15908 switch (act->type) {
15909 case RTE_FLOW_ACTION_TYPE_MARK:
15910 {
15911 uint32_t tag_be = mlx5_flow_mark_set
15912 (((const struct rte_flow_action_mark *)
15913 (act->conf))->id);
15914
15915 if (i >= MLX5_MTR_RTE_COLORS)
15916 return -rte_mtr_error_set(error,
15917 ENOTSUP,
15918 RTE_MTR_ERROR_TYPE_METER_POLICY,
15919 NULL,
15920 "cannot create policy "
15921 "mark action for this color");
15922 if (flow_dv_tag_resource_register(dev, tag_be,
15923 &dev_flow, &flow_err))
15924 return -rte_mtr_error_set(error,
15925 ENOTSUP,
15926 RTE_MTR_ERROR_TYPE_METER_POLICY,
15927 NULL,
15928 "cannot setup policy mark action");
15929 MLX5_ASSERT(dev_flow.dv.tag_resource);
15930 act_cnt->rix_mark =
15931 dev_flow.handle->dvh.rix_tag;
15932 action_flags |= MLX5_FLOW_ACTION_MARK;
15933 mtr_policy->mark = 1;
15934 break;
15935 }
15936 case RTE_FLOW_ACTION_TYPE_SET_TAG:
15937 if (i >= MLX5_MTR_RTE_COLORS)
15938 return -rte_mtr_error_set(error,
15939 ENOTSUP,
15940 RTE_MTR_ERROR_TYPE_METER_POLICY,
15941 NULL,
15942 "cannot create policy "
15943 "set tag action for this color");
15944 if (flow_dv_convert_action_set_tag
15945 (dev, mhdr_res,
15946 (const struct rte_flow_action_set_tag *)
15947 act->conf, &flow_err))
15948 return -rte_mtr_error_set(error,
15949 ENOTSUP,
15950 RTE_MTR_ERROR_TYPE_METER_POLICY,
15951 NULL, "cannot convert policy "
15952 "set tag action");
15953 if (!mhdr_res->actions_num)
15954 return -rte_mtr_error_set(error,
15955 ENOTSUP,
15956 RTE_MTR_ERROR_TYPE_METER_POLICY,
15957 NULL, "cannot find policy "
15958 "set tag action");
15959 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15960 break;
15961 case RTE_FLOW_ACTION_TYPE_DROP:
15962 {
15963 struct mlx5_flow_mtr_mng *mtrmng =
15964 priv->sh->mtrmng;
15965 struct mlx5_flow_tbl_data_entry *tbl_data;
15966
15967
15968
15969
15970
15971 if (!mtrmng->drop_tbl[domain]) {
15972 mtrmng->drop_tbl[domain] =
15973 flow_dv_tbl_resource_get(dev,
15974 MLX5_FLOW_TABLE_LEVEL_METER,
15975 egress, transfer, false, NULL, 0,
15976 0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15977 if (!mtrmng->drop_tbl[domain])
15978 return -rte_mtr_error_set
15979 (error, ENOTSUP,
15980 RTE_MTR_ERROR_TYPE_METER_POLICY,
15981 NULL,
15982 "Failed to create meter drop table");
15983 }
15984 tbl_data = container_of
15985 (mtrmng->drop_tbl[domain],
15986 struct mlx5_flow_tbl_data_entry, tbl);
15987 if (i < MLX5_MTR_RTE_COLORS) {
15988 act_cnt->dr_jump_action[domain] =
15989 tbl_data->jump.action;
15990 act_cnt->fate_action =
15991 MLX5_FLOW_FATE_DROP;
15992 }
15993 if (i == RTE_COLOR_RED)
15994 mtr_policy->dr_drop_action[domain] =
15995 tbl_data->jump.action;
15996 action_flags |= MLX5_FLOW_ACTION_DROP;
15997 break;
15998 }
15999 case RTE_FLOW_ACTION_TYPE_QUEUE:
16000 {
16001 if (i >= MLX5_MTR_RTE_COLORS)
16002 return -rte_mtr_error_set(error,
16003 ENOTSUP,
16004 RTE_MTR_ERROR_TYPE_METER_POLICY,
16005 NULL, "cannot create policy "
16006 "fate queue for this color");
16007 act_cnt->queue =
16008 ((const struct rte_flow_action_queue *)
16009 (act->conf))->index;
16010 act_cnt->fate_action =
16011 MLX5_FLOW_FATE_QUEUE;
16012 dev_flow.handle->fate_action =
16013 MLX5_FLOW_FATE_QUEUE;
16014 mtr_policy->is_queue = 1;
16015 action_flags |= MLX5_FLOW_ACTION_QUEUE;
16016 break;
16017 }
16018 case RTE_FLOW_ACTION_TYPE_RSS:
16019 {
16020 int rss_size;
16021
16022 if (i >= MLX5_MTR_RTE_COLORS)
16023 return -rte_mtr_error_set(error,
16024 ENOTSUP,
16025 RTE_MTR_ERROR_TYPE_METER_POLICY,
16026 NULL,
16027 "cannot create policy "
16028 "rss action for this color");
16029
16030
16031
16032
16033 rss_size = (int)rte_flow_conv
16034 (RTE_FLOW_CONV_OP_ACTION,
16035 NULL, 0, act, &flow_err);
16036 if (rss_size <= 0)
16037 return -rte_mtr_error_set(error,
16038 ENOTSUP,
16039 RTE_MTR_ERROR_TYPE_METER_POLICY,
16040 NULL, "Get the wrong "
16041 "rss action struct size");
16042 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
16043 rss_size, 0, SOCKET_ID_ANY);
16044 if (!act_cnt->rss)
16045 return -rte_mtr_error_set(error,
16046 ENOTSUP,
16047 RTE_MTR_ERROR_TYPE_METER_POLICY,
16048 NULL,
16049 "Fail to malloc rss action memory");
16050 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
16051 act_cnt->rss, rss_size,
16052 act, &flow_err);
16053 if (ret < 0)
16054 return -rte_mtr_error_set(error,
16055 ENOTSUP,
16056 RTE_MTR_ERROR_TYPE_METER_POLICY,
16057 NULL, "Fail to save "
16058 "rss action into policy struct");
16059 act_cnt->fate_action =
16060 MLX5_FLOW_FATE_SHARED_RSS;
16061 action_flags |= MLX5_FLOW_ACTION_RSS;
16062 break;
16063 }
16064 case RTE_FLOW_ACTION_TYPE_PORT_ID:
16065 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
16066 {
16067 struct mlx5_flow_dv_port_id_action_resource
16068 port_id_resource;
16069 uint32_t port_id = 0;
16070
16071 if (i >= MLX5_MTR_RTE_COLORS)
16072 return -rte_mtr_error_set(error,
16073 ENOTSUP,
16074 RTE_MTR_ERROR_TYPE_METER_POLICY,
16075 NULL, "cannot create policy "
16076 "port action for this color");
16077 memset(&port_id_resource, 0,
16078 sizeof(port_id_resource));
16079 if (flow_dv_translate_action_port_id(dev, act,
16080 &port_id, &flow_err))
16081 return -rte_mtr_error_set(error,
16082 ENOTSUP,
16083 RTE_MTR_ERROR_TYPE_METER_POLICY,
16084 NULL, "cannot translate "
16085 "policy port action");
16086 port_id_resource.port_id = port_id;
16087 if (flow_dv_port_id_action_resource_register
16088 (dev, &port_id_resource,
16089 &dev_flow, &flow_err))
16090 return -rte_mtr_error_set(error,
16091 ENOTSUP,
16092 RTE_MTR_ERROR_TYPE_METER_POLICY,
16093 NULL, "cannot setup "
16094 "policy port action");
16095 act_cnt->rix_port_id_action =
16096 dev_flow.handle->rix_port_id_action;
16097 act_cnt->fate_action =
16098 MLX5_FLOW_FATE_PORT_ID;
16099 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16100 break;
16101 }
16102 case RTE_FLOW_ACTION_TYPE_JUMP:
16103 {
16104 uint32_t jump_group = 0;
16105 uint32_t table = 0;
16106 struct mlx5_flow_tbl_data_entry *tbl_data;
16107 struct flow_grp_info grp_info = {
16108 .external = !!dev_flow.external,
16109 .transfer = !!transfer,
16110 .fdb_def_rule = !!priv->fdb_def_rule,
16111 .std_tbl_fix = 0,
16112 .skip_scale = dev_flow.skip_scale &
16113 (1 << MLX5_SCALE_FLOW_GROUP_BIT),
16114 };
16115 struct mlx5_flow_meter_sub_policy *sub_policy =
16116 mtr_policy->sub_policys[domain][0];
16117
16118 if (i >= MLX5_MTR_RTE_COLORS)
16119 return -rte_mtr_error_set(error,
16120 ENOTSUP,
16121 RTE_MTR_ERROR_TYPE_METER_POLICY,
16122 NULL,
16123 "cannot create policy "
16124 "jump action for this color");
16125 jump_group =
16126 ((const struct rte_flow_action_jump *)
16127 act->conf)->group;
16128 if (mlx5_flow_group_to_table(dev, NULL,
16129 jump_group,
16130 &table,
16131 &grp_info, &flow_err))
16132 return -rte_mtr_error_set(error,
16133 ENOTSUP,
16134 RTE_MTR_ERROR_TYPE_METER_POLICY,
16135 NULL, "cannot setup "
16136 "policy jump action");
16137 sub_policy->jump_tbl[i] =
16138 flow_dv_tbl_resource_get(dev,
16139 table, egress,
16140 transfer,
16141 !!dev_flow.external,
16142 NULL, jump_group, 0,
16143 0, &flow_err);
16144 if
16145 (!sub_policy->jump_tbl[i])
16146 return -rte_mtr_error_set(error,
16147 ENOTSUP,
16148 RTE_MTR_ERROR_TYPE_METER_POLICY,
16149 NULL, "cannot create jump action.");
16150 tbl_data = container_of
16151 (sub_policy->jump_tbl[i],
16152 struct mlx5_flow_tbl_data_entry, tbl);
16153 act_cnt->dr_jump_action[domain] =
16154 tbl_data->jump.action;
16155 act_cnt->fate_action =
16156 MLX5_FLOW_FATE_JUMP;
16157 action_flags |= MLX5_FLOW_ACTION_JUMP;
16158 break;
16159 }
16160 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
16161 {
16162 if (i >= MLX5_MTR_RTE_COLORS)
16163 return -rte_mtr_error_set(error,
16164 ENOTSUP,
16165 RTE_MTR_ERROR_TYPE_METER_POLICY,
16166 NULL,
16167 "cannot create policy modify field for this color");
16168 if (flow_dv_convert_action_modify_field
16169 (dev, mhdr_res, act, attr, &flow_err))
16170 return -rte_mtr_error_set(error,
16171 ENOTSUP,
16172 RTE_MTR_ERROR_TYPE_METER_POLICY,
16173 NULL, "cannot setup policy modify field action");
16174 if (!mhdr_res->actions_num)
16175 return -rte_mtr_error_set(error,
16176 ENOTSUP,
16177 RTE_MTR_ERROR_TYPE_METER_POLICY,
16178 NULL, "cannot find policy modify field action");
16179 action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
16180 break;
16181 }
16182
16183
16184
16185
16186 case RTE_FLOW_ACTION_TYPE_METER:
16187 {
16188 const struct rte_flow_action_meter *mtr;
16189 struct mlx5_flow_meter_info *next_fm;
16190 struct mlx5_flow_meter_policy *next_policy;
16191 struct rte_flow_action tag_action;
16192 struct mlx5_rte_flow_action_set_tag set_tag;
16193 uint32_t next_mtr_idx = 0;
16194
16195 mtr = act->conf;
16196 next_fm = mlx5_flow_meter_find(priv,
16197 mtr->mtr_id,
16198 &next_mtr_idx);
16199 if (!next_fm)
16200 return -rte_mtr_error_set(error, EINVAL,
16201 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
16202 "Fail to find next meter.");
16203 if (next_fm->def_policy)
16204 return -rte_mtr_error_set(error, EINVAL,
16205 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
16206 "Hierarchy only supports termination meter.");
16207 next_policy = mlx5_flow_meter_policy_find(dev,
16208 next_fm->policy_id, NULL);
16209 MLX5_ASSERT(next_policy);
16210 if (next_fm->drop_cnt) {
16211 set_tag.id =
16212 (enum modify_reg)
16213 mlx5_flow_get_reg_id(dev,
16214 MLX5_MTR_ID,
16215 0,
16216 (struct rte_flow_error *)error);
16217 set_tag.offset = (priv->mtr_reg_share ?
16218 MLX5_MTR_COLOR_BITS : 0);
16219 set_tag.length = (priv->mtr_reg_share ?
16220 MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
16221 MLX5_REG_BITS);
16222 set_tag.data = next_mtr_idx;
16223 tag_action.type =
16224 (enum rte_flow_action_type)
16225 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
16226 tag_action.conf = &set_tag;
16227 if (flow_dv_convert_action_set_reg
16228 (mhdr_res, &tag_action,
16229 (struct rte_flow_error *)error))
16230 return -rte_errno;
16231 action_flags |=
16232 MLX5_FLOW_ACTION_SET_TAG;
16233 }
16234 if (i == RTE_COLOR_YELLOW && next_fm->color_aware &&
16235 !next_fm->meter_action_y)
16236 if (__flow_dv_create_mtr_yellow_action(dev, next_fm, error))
16237 return -rte_errno;
16238 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
16239 act_cnt->next_mtr_id = next_fm->meter_id;
16240 act_cnt->next_sub_policy = NULL;
16241 mtr_policy->is_hierarchy = 1;
16242 mtr_policy->dev = next_policy->dev;
16243 if (next_policy->mark)
16244 mtr_policy->mark = 1;
16245 action_flags |=
16246 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
16247 break;
16248 }
16249 default:
16250 return -rte_mtr_error_set(error, ENOTSUP,
16251 RTE_MTR_ERROR_TYPE_METER_POLICY,
16252 NULL, "action type not supported");
16253 }
16254 if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) ||
16255 (action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD)) {
16256
16257 dev_flow.dv.group = 1;
16258 if (flow_dv_modify_hdr_resource_register
16259 (dev, mhdr_res, &dev_flow, &flow_err))
16260 return -rte_mtr_error_set(error,
16261 ENOTSUP,
16262 RTE_MTR_ERROR_TYPE_METER_POLICY,
16263 NULL, "cannot register policy set tag/modify field action");
16264 act_cnt->modify_hdr =
16265 dev_flow.handle->dvh.modify_hdr;
16266 }
16267 }
16268 }
16269 return 0;
16270}
16271
16272
16273
16274
16275
16276
16277
16278
16279
16280
16281
16282
16283
16284
16285
16286
16287
16288
16289
16290
16291
16292static int
16293flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
16294 struct mlx5_flow_meter_policy *mtr_policy,
16295 const struct rte_flow_action *actions[RTE_COLORS],
16296 struct rte_flow_attr *attr,
16297 struct rte_mtr_error *error)
16298{
16299 int ret, i;
16300 uint16_t sub_policy_num;
16301
16302 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16303 sub_policy_num = (mtr_policy->sub_policy_num >>
16304 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16305 MLX5_MTR_SUB_POLICY_NUM_MASK;
16306 if (sub_policy_num) {
16307 ret = __flow_dv_create_domain_policy_acts(dev,
16308 mtr_policy, actions, attr,
16309 (enum mlx5_meter_domain)i, error);
16310
16311 if (ret)
16312 return ret;
16313 }
16314 }
16315 return 0;
16316}
16317
16318
16319
16320
16321
16322
16323
16324
16325
16326
16327
16328
16329
16330
16331
16332
16333static int
16334flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
16335 struct rte_flow_error *error)
16336{
16337 struct mlx5_priv *priv = dev->data->dev_private;
16338 struct rte_flow_query_count *qc = data;
16339
16340 if (!priv->sh->cdev->config.devx)
16341 return rte_flow_error_set(error, ENOTSUP,
16342 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16343 NULL,
16344 "counters are not supported");
16345 if (cnt_idx) {
16346 uint64_t pkts, bytes;
16347 struct mlx5_flow_counter *cnt;
16348 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
16349
16350 if (err)
16351 return rte_flow_error_set(error, -err,
16352 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16353 NULL, "cannot read counters");
16354 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
16355 qc->hits_set = 1;
16356 qc->bytes_set = 1;
16357 qc->hits = pkts - cnt->hits;
16358 qc->bytes = bytes - cnt->bytes;
16359 if (qc->reset) {
16360 cnt->hits = pkts;
16361 cnt->bytes = bytes;
16362 }
16363 return 0;
16364 }
16365 return rte_flow_error_set(error, EINVAL,
16366 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16367 NULL,
16368 "counters are not available");
16369}
16370
16371int
16372flow_dv_action_query(struct rte_eth_dev *dev,
16373 const struct rte_flow_action_handle *handle, void *data,
16374 struct rte_flow_error *error)
16375{
16376 struct mlx5_age_param *age_param;
16377 struct rte_flow_query_age *resp;
16378 uint32_t act_idx = (uint32_t)(uintptr_t)handle;
16379 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
16380 uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
16381 struct mlx5_priv *priv = dev->data->dev_private;
16382 struct mlx5_aso_ct_action *ct;
16383 uint16_t owner;
16384 uint32_t dev_idx;
16385
16386 switch (type) {
16387 case MLX5_INDIRECT_ACTION_TYPE_AGE:
16388 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
16389 resp = data;
16390 resp->aged = __atomic_load_n(&age_param->state,
16391 __ATOMIC_RELAXED) == AGE_TMOUT ?
16392 1 : 0;
16393 resp->sec_since_last_hit_valid = !resp->aged;
16394 if (resp->sec_since_last_hit_valid)
16395 resp->sec_since_last_hit = __atomic_load_n
16396 (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16397 return 0;
16398 case MLX5_INDIRECT_ACTION_TYPE_COUNT:
16399 return flow_dv_query_count(dev, idx, data, error);
16400 case MLX5_INDIRECT_ACTION_TYPE_CT:
16401 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
16402 if (owner != PORT_ID(priv))
16403 return rte_flow_error_set(error, EACCES,
16404 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16405 NULL,
16406 "CT object owned by another port");
16407 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
16408 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
16409 MLX5_ASSERT(ct);
16410 if (!ct->refcnt)
16411 return rte_flow_error_set(error, EFAULT,
16412 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16413 NULL,
16414 "CT object is inactive");
16415 ((struct rte_flow_action_conntrack *)data)->peer_port =
16416 ct->peer;
16417 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
16418 ct->is_original;
16419 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
16420 return rte_flow_error_set(error, EIO,
16421 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16422 NULL,
16423 "Failed to query CT context");
16424 return 0;
16425 default:
16426 return rte_flow_error_set(error, ENOTSUP,
16427 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16428 "action type query not supported");
16429 }
16430}
16431
16432
16433
16434
16435
16436
16437
16438
16439
16440
16441
16442
16443
16444
16445
16446
16447static int
16448flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
16449 void *data, struct rte_flow_error *error)
16450{
16451 struct rte_flow_query_age *resp = data;
16452 struct mlx5_age_param *age_param;
16453
16454 if (flow->age) {
16455 struct mlx5_aso_age_action *act =
16456 flow_aso_age_get_by_idx(dev, flow->age);
16457
16458 age_param = &act->age_params;
16459 } else if (flow->counter) {
16460 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
16461
16462 if (!age_param || !age_param->timeout)
16463 return rte_flow_error_set
16464 (error, EINVAL,
16465 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16466 NULL, "cannot read age data");
16467 } else {
16468 return rte_flow_error_set(error, EINVAL,
16469 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16470 NULL, "age data not available");
16471 }
16472 resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16473 AGE_TMOUT ? 1 : 0;
16474 resp->sec_since_last_hit_valid = !resp->aged;
16475 if (resp->sec_since_last_hit_valid)
16476 resp->sec_since_last_hit = __atomic_load_n
16477 (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16478 return 0;
16479}
16480
16481
16482
16483
16484
16485
16486
16487static int
16488flow_dv_query(struct rte_eth_dev *dev,
16489 struct rte_flow *flow __rte_unused,
16490 const struct rte_flow_action *actions __rte_unused,
16491 void *data __rte_unused,
16492 struct rte_flow_error *error __rte_unused)
16493{
16494 int ret = -EINVAL;
16495
16496 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16497 switch (actions->type) {
16498 case RTE_FLOW_ACTION_TYPE_VOID:
16499 break;
16500 case RTE_FLOW_ACTION_TYPE_COUNT:
16501 ret = flow_dv_query_count(dev, flow->counter, data,
16502 error);
16503 break;
16504 case RTE_FLOW_ACTION_TYPE_AGE:
16505 ret = flow_dv_query_age(dev, flow, data, error);
16506 break;
16507 default:
16508 return rte_flow_error_set(error, ENOTSUP,
16509 RTE_FLOW_ERROR_TYPE_ACTION,
16510 actions,
16511 "action not supported");
16512 }
16513 }
16514 return ret;
16515}
16516
16517
16518
16519
16520
16521
16522
16523
16524
16525
16526static void
16527flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16528 struct mlx5_flow_meter_info *fm)
16529{
16530 struct mlx5_priv *priv = dev->data->dev_private;
16531 int i;
16532
16533 if (!fm || !priv->sh->config.dv_flow_en)
16534 return;
16535 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16536 if (fm->drop_rule[i]) {
16537 claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16538 fm->drop_rule[i] = NULL;
16539 }
16540 }
16541}
16542
16543static void
16544flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16545{
16546 struct mlx5_priv *priv = dev->data->dev_private;
16547 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16548 struct mlx5_flow_tbl_data_entry *tbl;
16549 int i, j;
16550
16551 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16552 if (mtrmng->def_rule[i]) {
16553 claim_zero(mlx5_flow_os_destroy_flow
16554 (mtrmng->def_rule[i]));
16555 mtrmng->def_rule[i] = NULL;
16556 }
16557 if (mtrmng->def_matcher[i]) {
16558 tbl = container_of(mtrmng->def_matcher[i]->tbl,
16559 struct mlx5_flow_tbl_data_entry, tbl);
16560 mlx5_list_unregister(tbl->matchers,
16561 &mtrmng->def_matcher[i]->entry);
16562 mtrmng->def_matcher[i] = NULL;
16563 }
16564 for (j = 0; j < MLX5_REG_BITS; j++) {
16565 if (mtrmng->drop_matcher[i][j]) {
16566 tbl =
16567 container_of(mtrmng->drop_matcher[i][j]->tbl,
16568 struct mlx5_flow_tbl_data_entry,
16569 tbl);
16570 mlx5_list_unregister(tbl->matchers,
16571 &mtrmng->drop_matcher[i][j]->entry);
16572 mtrmng->drop_matcher[i][j] = NULL;
16573 }
16574 }
16575 if (mtrmng->drop_tbl[i]) {
16576 flow_dv_tbl_resource_release(MLX5_SH(dev),
16577 mtrmng->drop_tbl[i]);
16578 mtrmng->drop_tbl[i] = NULL;
16579 }
16580 }
16581}
16582
16583
16584#define METER_ACTIONS 2
16585
16586static void
16587__flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16588 enum mlx5_meter_domain domain)
16589{
16590 struct mlx5_priv *priv = dev->data->dev_private;
16591 struct mlx5_flow_meter_def_policy *def_policy =
16592 priv->sh->mtrmng->def_policy[domain];
16593
16594 __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16595 mlx5_free(def_policy);
16596 priv->sh->mtrmng->def_policy[domain] = NULL;
16597}
16598
16599
16600
16601
16602
16603
16604
16605static void
16606flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16607{
16608 struct mlx5_priv *priv = dev->data->dev_private;
16609 int i;
16610
16611 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16612 if (priv->sh->mtrmng->def_policy[i])
16613 __flow_dv_destroy_domain_def_policy(dev,
16614 (enum mlx5_meter_domain)i);
16615 priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16616}
16617
16618static int
16619__flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16620 uint32_t color_reg_c_idx,
16621 enum rte_color color, void *matcher_object,
16622 int actions_n, void *actions,
16623 bool match_src_port, const struct rte_flow_item *item,
16624 void **rule, const struct rte_flow_attr *attr)
16625{
16626 int ret;
16627 struct mlx5_flow_dv_match_params value = {
16628 .size = sizeof(value.buf),
16629 };
16630 struct mlx5_flow_dv_match_params matcher = {
16631 .size = sizeof(matcher.buf),
16632 };
16633 struct mlx5_priv *priv = dev->data->dev_private;
16634 uint8_t misc_mask;
16635
16636 if (match_src_port && priv->sh->esw_mode) {
16637 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16638 value.buf, item, attr)) {
16639 DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16640 " value with port.", color);
16641 return -1;
16642 }
16643 }
16644 flow_dv_match_meta_reg(matcher.buf, value.buf,
16645 (enum modify_reg)color_reg_c_idx,
16646 rte_col_2_mlx5_col(color), UINT32_MAX);
16647 misc_mask = flow_dv_matcher_enable(value.buf);
16648 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16649 ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16650 actions_n, actions, rule);
16651 if (ret) {
16652 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16653 return -1;
16654 }
16655 return 0;
16656}
16657
16658static int
16659__flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16660 uint32_t color_reg_c_idx,
16661 uint16_t priority,
16662 struct mlx5_flow_meter_sub_policy *sub_policy,
16663 const struct rte_flow_attr *attr,
16664 bool match_src_port,
16665 const struct rte_flow_item *item,
16666 struct mlx5_flow_dv_matcher **policy_matcher,
16667 struct rte_flow_error *error)
16668{
16669 struct mlx5_list_entry *entry;
16670 struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16671 struct mlx5_flow_dv_matcher matcher = {
16672 .mask = {
16673 .size = sizeof(matcher.mask.buf),
16674 },
16675 .tbl = tbl_rsc,
16676 };
16677 struct mlx5_flow_dv_match_params value = {
16678 .size = sizeof(value.buf),
16679 };
16680 struct mlx5_flow_cb_ctx ctx = {
16681 .error = error,
16682 .data = &matcher,
16683 };
16684 struct mlx5_flow_tbl_data_entry *tbl_data;
16685 struct mlx5_priv *priv = dev->data->dev_private;
16686 const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16687
16688 if (match_src_port && priv->sh->esw_mode) {
16689 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16690 value.buf, item, attr)) {
16691 DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16692 " with port.", priority);
16693 return -1;
16694 }
16695 }
16696 tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16697 if (priority < RTE_COLOR_RED)
16698 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16699 (enum modify_reg)color_reg_c_idx, 0, color_mask);
16700 matcher.priority = priority;
16701 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16702 matcher.mask.size);
16703 entry = mlx5_list_register(tbl_data->matchers, &ctx);
16704 if (!entry) {
16705 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16706 return -1;
16707 }
16708 *policy_matcher =
16709 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16710 return 0;
16711}
16712
16713
16714
16715
16716
16717
16718
16719
16720
16721
16722
16723
16724
16725
16726
16727
16728
16729
16730static int
16731__flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16732 struct mlx5_flow_meter_sub_policy *sub_policy,
16733 uint8_t egress, uint8_t transfer, bool match_src_port,
16734 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16735{
16736 struct mlx5_priv *priv = dev->data->dev_private;
16737 struct rte_flow_error flow_err;
16738 uint32_t color_reg_c_idx;
16739 struct rte_flow_attr attr = {
16740 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16741 .priority = 0,
16742 .ingress = 0,
16743 .egress = !!egress,
16744 .transfer = !!transfer,
16745 .reserved = 0,
16746 };
16747 int i;
16748 int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16749 struct mlx5_sub_policy_color_rule *color_rule;
16750 bool svport_match;
16751 struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16752
16753 if (ret < 0)
16754 return -1;
16755
16756 if (!sub_policy->tbl_rsc)
16757 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16758 MLX5_FLOW_TABLE_LEVEL_POLICY,
16759 egress, transfer, false, NULL, 0, 0,
16760 sub_policy->idx, &flow_err);
16761 if (!sub_policy->tbl_rsc) {
16762 DRV_LOG(ERR,
16763 "Failed to create meter sub policy table.");
16764 return -1;
16765 }
16766
16767 color_reg_c_idx = ret;
16768 for (i = 0; i < RTE_COLORS; i++) {
16769 TAILQ_INIT(&sub_policy->color_rules[i]);
16770 if (!acts[i].actions_n)
16771 continue;
16772 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16773 sizeof(struct mlx5_sub_policy_color_rule),
16774 0, SOCKET_ID_ANY);
16775 if (!color_rule) {
16776 DRV_LOG(ERR, "No memory to create color rule.");
16777 goto err_exit;
16778 }
16779 tmp_rules[i] = color_rule;
16780 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16781 color_rule, next_port);
16782 color_rule->src_port = priv->representor_id;
16783
16784 attr.priority = i;
16785
16786 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16787 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16788 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16789 &attr, svport_match, NULL,
16790 &color_rule->matcher, &flow_err)) {
16791 DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16792 goto err_exit;
16793 }
16794
16795 if (__flow_dv_create_policy_flow(dev,
16796 color_reg_c_idx, (enum rte_color)i,
16797 color_rule->matcher->matcher_object,
16798 acts[i].actions_n, acts[i].dv_actions,
16799 svport_match, NULL, &color_rule->rule,
16800 &attr)) {
16801 DRV_LOG(ERR, "Failed to create color%u rule.", i);
16802 goto err_exit;
16803 }
16804 }
16805 return 0;
16806err_exit:
16807
16808 do {
16809 color_rule = tmp_rules[i];
16810 if (color_rule) {
16811 if (color_rule->rule)
16812 mlx5_flow_os_destroy_flow(color_rule->rule);
16813 if (color_rule->matcher) {
16814 struct mlx5_flow_tbl_data_entry *tbl =
16815 container_of(color_rule->matcher->tbl,
16816 typeof(*tbl), tbl);
16817 mlx5_list_unregister(tbl->matchers,
16818 &color_rule->matcher->entry);
16819 }
16820 TAILQ_REMOVE(&sub_policy->color_rules[i],
16821 color_rule, next_port);
16822 mlx5_free(color_rule);
16823 }
16824 } while (i--);
16825 return -1;
16826}
16827
16828static int
16829__flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16830 struct mlx5_flow_meter_policy *mtr_policy,
16831 struct mlx5_flow_meter_sub_policy *sub_policy,
16832 uint32_t domain)
16833{
16834 struct mlx5_priv *priv = dev->data->dev_private;
16835 struct mlx5_meter_policy_acts acts[RTE_COLORS];
16836 struct mlx5_flow_dv_tag_resource *tag;
16837 struct mlx5_flow_dv_port_id_action_resource *port_action;
16838 struct mlx5_hrxq *hrxq;
16839 struct mlx5_flow_meter_info *next_fm[RTE_COLORS] = {NULL};
16840 struct mlx5_flow_meter_policy *next_policy;
16841 struct mlx5_flow_meter_sub_policy *next_sub_policy;
16842 struct mlx5_flow_tbl_data_entry *tbl_data;
16843 struct rte_flow_error error;
16844 uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16845 uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16846 bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16847 bool match_src_port = false;
16848 int i;
16849
16850
16851 for (i = 0; i < RTE_COLORS; i++) {
16852 acts[i].actions_n = 0;
16853 if (i == RTE_COLOR_RED) {
16854
16855 acts[i].dv_actions[0] =
16856 mtr_policy->dr_drop_action[domain];
16857 acts[i].actions_n = 1;
16858 continue;
16859 }
16860 if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16861 struct rte_flow_attr attr = {
16862 .transfer = transfer
16863 };
16864
16865 next_fm[i] = mlx5_flow_meter_find(priv,
16866 mtr_policy->act_cnt[i].next_mtr_id,
16867 NULL);
16868 if (!next_fm[i]) {
16869 DRV_LOG(ERR,
16870 "Failed to get next hierarchy meter.");
16871 goto err_exit;
16872 }
16873 if (mlx5_flow_meter_attach(priv, next_fm[i],
16874 &attr, &error)) {
16875 DRV_LOG(ERR, "%s", error.message);
16876 next_fm[i] = NULL;
16877 goto err_exit;
16878 }
16879
16880 if (mtr_first) {
16881 acts[i].dv_actions[acts[i].actions_n] =
16882 (next_fm[i]->color_aware && i == RTE_COLOR_YELLOW) ?
16883 next_fm[i]->meter_action_y :
16884 next_fm[i]->meter_action_g;
16885 acts[i].actions_n++;
16886 }
16887 }
16888 if (mtr_policy->act_cnt[i].rix_mark) {
16889 tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16890 mtr_policy->act_cnt[i].rix_mark);
16891 if (!tag) {
16892 DRV_LOG(ERR, "Failed to find "
16893 "mark action for policy.");
16894 goto err_exit;
16895 }
16896 acts[i].dv_actions[acts[i].actions_n] = tag->action;
16897 acts[i].actions_n++;
16898 }
16899 if (mtr_policy->act_cnt[i].modify_hdr) {
16900 acts[i].dv_actions[acts[i].actions_n] =
16901 mtr_policy->act_cnt[i].modify_hdr->action;
16902 acts[i].actions_n++;
16903 }
16904 if (mtr_policy->act_cnt[i].fate_action) {
16905 switch (mtr_policy->act_cnt[i].fate_action) {
16906 case MLX5_FLOW_FATE_PORT_ID:
16907 port_action = mlx5_ipool_get
16908 (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16909 mtr_policy->act_cnt[i].rix_port_id_action);
16910 if (!port_action) {
16911 DRV_LOG(ERR, "Failed to find "
16912 "port action for policy.");
16913 goto err_exit;
16914 }
16915 acts[i].dv_actions[acts[i].actions_n] =
16916 port_action->action;
16917 acts[i].actions_n++;
16918 mtr_policy->dev = dev;
16919 match_src_port = true;
16920 break;
16921 case MLX5_FLOW_FATE_DROP:
16922 case MLX5_FLOW_FATE_JUMP:
16923 acts[i].dv_actions[acts[i].actions_n] =
16924 mtr_policy->act_cnt[i].dr_jump_action[domain];
16925 acts[i].actions_n++;
16926 break;
16927 case MLX5_FLOW_FATE_SHARED_RSS:
16928 case MLX5_FLOW_FATE_QUEUE:
16929 hrxq = mlx5_ipool_get
16930 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16931 sub_policy->rix_hrxq[i]);
16932 if (!hrxq) {
16933 DRV_LOG(ERR, "Failed to find "
16934 "queue action for policy.");
16935 goto err_exit;
16936 }
16937 acts[i].dv_actions[acts[i].actions_n] =
16938 hrxq->action;
16939 acts[i].actions_n++;
16940 break;
16941 case MLX5_FLOW_FATE_MTR:
16942 if (!next_fm[i]) {
16943 DRV_LOG(ERR,
16944 "No next hierarchy meter.");
16945 goto err_exit;
16946 }
16947 if (!mtr_first) {
16948 acts[i].dv_actions[acts[i].actions_n] =
16949 (next_fm[i]->color_aware && i == RTE_COLOR_YELLOW) ?
16950 next_fm[i]->meter_action_y :
16951 next_fm[i]->meter_action_g;
16952 acts[i].actions_n++;
16953 }
16954 if (mtr_policy->act_cnt[i].next_sub_policy) {
16955 next_sub_policy =
16956 mtr_policy->act_cnt[i].next_sub_policy;
16957 } else {
16958 next_policy =
16959 mlx5_flow_meter_policy_find(dev,
16960 next_fm[i]->policy_id, NULL);
16961 MLX5_ASSERT(next_policy);
16962 next_sub_policy =
16963 next_policy->sub_policys[domain][0];
16964 }
16965 tbl_data =
16966 container_of(next_sub_policy->tbl_rsc,
16967 struct mlx5_flow_tbl_data_entry, tbl);
16968 acts[i].dv_actions[acts[i].actions_n++] =
16969 tbl_data->jump.action;
16970 if (mtr_policy->act_cnt[i].modify_hdr)
16971 match_src_port = !!transfer;
16972 break;
16973 default:
16974
16975 break;
16976 }
16977 }
16978 }
16979 if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16980 egress, transfer, match_src_port, acts)) {
16981 DRV_LOG(ERR,
16982 "Failed to create policy rules per domain.");
16983 goto err_exit;
16984 }
16985 return 0;
16986err_exit:
16987 for (i = 0; i < RTE_COLORS; i++)
16988 if (next_fm[i])
16989 mlx5_flow_meter_detach(priv, next_fm[i]);
16990 return -1;
16991}
16992
16993
16994
16995
16996
16997
16998
16999
17000
17001
17002
17003
17004static int
17005flow_dv_create_policy_rules(struct rte_eth_dev *dev,
17006 struct mlx5_flow_meter_policy *mtr_policy)
17007{
17008 int i;
17009 uint16_t sub_policy_num;
17010
17011 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
17012 sub_policy_num = (mtr_policy->sub_policy_num >>
17013 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
17014 MLX5_MTR_SUB_POLICY_NUM_MASK;
17015 if (!sub_policy_num)
17016 continue;
17017
17018 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
17019 mtr_policy->sub_policys[i][0], i)) {
17020 DRV_LOG(ERR, "Failed to create policy action "
17021 "list per domain.");
17022 return -1;
17023 }
17024 }
17025 return 0;
17026}
17027
17028static int
17029__flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
17030{
17031 struct mlx5_priv *priv = dev->data->dev_private;
17032 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
17033 struct mlx5_flow_meter_def_policy *def_policy;
17034 struct mlx5_flow_tbl_resource *jump_tbl;
17035 struct mlx5_flow_tbl_data_entry *tbl_data;
17036 uint8_t egress, transfer;
17037 struct rte_flow_error error;
17038 struct mlx5_meter_policy_acts acts[RTE_COLORS];
17039 int ret;
17040
17041 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
17042 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
17043 def_policy = mtrmng->def_policy[domain];
17044 if (!def_policy) {
17045 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
17046 sizeof(struct mlx5_flow_meter_def_policy),
17047 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
17048 if (!def_policy) {
17049 DRV_LOG(ERR, "Failed to alloc default policy table.");
17050 goto def_policy_error;
17051 }
17052 mtrmng->def_policy[domain] = def_policy;
17053
17054 jump_tbl = flow_dv_tbl_resource_get(dev,
17055 MLX5_FLOW_TABLE_LEVEL_METER,
17056 egress, transfer, false, NULL, 0,
17057 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
17058 if (!jump_tbl) {
17059 DRV_LOG(ERR,
17060 "Failed to create meter suffix table.");
17061 goto def_policy_error;
17062 }
17063 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
17064 tbl_data = container_of(jump_tbl,
17065 struct mlx5_flow_tbl_data_entry, tbl);
17066 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
17067 tbl_data->jump.action;
17068 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
17069 acts[RTE_COLOR_GREEN].actions_n = 1;
17070
17071
17072
17073
17074
17075
17076 jump_tbl = flow_dv_tbl_resource_get(dev,
17077 MLX5_FLOW_TABLE_LEVEL_METER,
17078 egress, transfer, false, NULL, 0,
17079 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
17080 if (!jump_tbl) {
17081 DRV_LOG(ERR,
17082 "Failed to get meter suffix table.");
17083 goto def_policy_error;
17084 }
17085 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
17086 tbl_data = container_of(jump_tbl,
17087 struct mlx5_flow_tbl_data_entry, tbl);
17088 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
17089 tbl_data->jump.action;
17090 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
17091 acts[RTE_COLOR_YELLOW].actions_n = 1;
17092
17093 if (!mtrmng->drop_tbl[domain]) {
17094 mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
17095 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
17096 egress, transfer, false, NULL, 0,
17097 0, MLX5_MTR_TABLE_ID_DROP, &error);
17098 if (!mtrmng->drop_tbl[domain]) {
17099 DRV_LOG(ERR, "Failed to create meter "
17100 "drop table for default policy.");
17101 goto def_policy_error;
17102 }
17103 }
17104
17105 tbl_data = container_of(mtrmng->drop_tbl[domain],
17106 struct mlx5_flow_tbl_data_entry, tbl);
17107 def_policy->dr_jump_action[RTE_COLOR_RED] =
17108 tbl_data->jump.action;
17109 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
17110 acts[RTE_COLOR_RED].actions_n = 1;
17111
17112 ret = __flow_dv_create_domain_policy_rules(dev,
17113 &def_policy->sub_policy,
17114 egress, transfer, false, acts);
17115 if (ret) {
17116 DRV_LOG(ERR, "Failed to create default policy rules.");
17117 goto def_policy_error;
17118 }
17119 }
17120 return 0;
17121def_policy_error:
17122 __flow_dv_destroy_domain_def_policy(dev,
17123 (enum mlx5_meter_domain)domain);
17124 return -1;
17125}
17126
17127
17128
17129
17130
17131
17132
17133
17134
17135static int
17136flow_dv_create_def_policy(struct rte_eth_dev *dev)
17137{
17138 struct mlx5_priv *priv = dev->data->dev_private;
17139 int i;
17140
17141
17142 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
17143 if (!priv->sh->config.dv_esw_en &&
17144 i == MLX5_MTR_DOMAIN_TRANSFER)
17145 continue;
17146 if (__flow_dv_create_domain_def_policy(dev, i)) {
17147 DRV_LOG(ERR, "Failed to create default policy");
17148
17149 flow_dv_destroy_def_policy(dev);
17150 return -1;
17151 }
17152 }
17153 return 0;
17154}
17155
17156
17157
17158
17159
17160
17161
17162
17163
17164
17165
17166
17167
17168
17169
17170
17171static int
17172flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
17173 struct mlx5_flow_meter_info *fm,
17174 uint32_t mtr_idx,
17175 uint8_t domain_bitmap)
17176{
17177 struct mlx5_priv *priv = dev->data->dev_private;
17178 struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
17179 struct rte_flow_error error;
17180 struct mlx5_flow_tbl_data_entry *tbl_data;
17181 uint8_t egress, transfer;
17182 void *actions[METER_ACTIONS];
17183 int domain, ret, i;
17184 struct mlx5_flow_counter *cnt;
17185 struct mlx5_flow_dv_match_params value = {
17186 .size = sizeof(value.buf),
17187 };
17188 struct mlx5_flow_dv_match_params matcher_para = {
17189 .size = sizeof(matcher_para.buf),
17190 };
17191 int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
17192 0, &error);
17193 uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
17194 uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
17195 struct mlx5_list_entry *entry;
17196 struct mlx5_flow_dv_matcher matcher = {
17197 .mask = {
17198 .size = sizeof(matcher.mask.buf),
17199 },
17200 };
17201 struct mlx5_flow_dv_matcher *drop_matcher;
17202 struct mlx5_flow_cb_ctx ctx = {
17203 .error = &error,
17204 .data = &matcher,
17205 };
17206 uint8_t misc_mask;
17207
17208 if (!priv->mtr_en || mtr_id_reg_c < 0) {
17209 rte_errno = ENOTSUP;
17210 return -1;
17211 }
17212 for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
17213 if (!(domain_bitmap & (1 << domain)) ||
17214 (mtrmng->def_rule[domain] && !fm->drop_cnt))
17215 continue;
17216 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
17217 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
17218
17219 if (!mtrmng->drop_tbl[domain]) {
17220 mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
17221 MLX5_FLOW_TABLE_LEVEL_METER,
17222 egress, transfer, false, NULL, 0,
17223 0, MLX5_MTR_TABLE_ID_DROP, &error);
17224 if (!mtrmng->drop_tbl[domain]) {
17225 DRV_LOG(ERR, "Failed to create meter drop table.");
17226 goto policy_error;
17227 }
17228 }
17229
17230 matcher.tbl = mtrmng->drop_tbl[domain],
17231 tbl_data = container_of(mtrmng->drop_tbl[domain],
17232 struct mlx5_flow_tbl_data_entry, tbl);
17233 if (!mtrmng->def_matcher[domain]) {
17234 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
17235 (enum modify_reg)mtr_id_reg_c,
17236 0, 0);
17237 matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
17238 matcher.crc = rte_raw_cksum
17239 ((const void *)matcher.mask.buf,
17240 matcher.mask.size);
17241 entry = mlx5_list_register(tbl_data->matchers, &ctx);
17242 if (!entry) {
17243 DRV_LOG(ERR, "Failed to register meter "
17244 "drop default matcher.");
17245 goto policy_error;
17246 }
17247 mtrmng->def_matcher[domain] = container_of(entry,
17248 struct mlx5_flow_dv_matcher, entry);
17249 }
17250
17251 if (!mtrmng->def_rule[domain]) {
17252 i = 0;
17253 actions[i++] = priv->sh->dr_drop_action;
17254 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
17255 (enum modify_reg)mtr_id_reg_c, 0, 0);
17256 misc_mask = flow_dv_matcher_enable(value.buf);
17257 __flow_dv_adjust_buf_size(&value.size, misc_mask);
17258 ret = mlx5_flow_os_create_flow
17259 (mtrmng->def_matcher[domain]->matcher_object,
17260 (void *)&value, i, actions,
17261 &mtrmng->def_rule[domain]);
17262 if (ret) {
17263 DRV_LOG(ERR, "Failed to create meter "
17264 "default drop rule for drop table.");
17265 goto policy_error;
17266 }
17267 }
17268 if (!fm->drop_cnt)
17269 continue;
17270 MLX5_ASSERT(mtrmng->max_mtr_bits);
17271 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
17272
17273 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
17274 (enum modify_reg)mtr_id_reg_c, 0,
17275 (mtr_id_mask << mtr_id_offset));
17276 matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
17277 matcher.crc = rte_raw_cksum
17278 ((const void *)matcher.mask.buf,
17279 matcher.mask.size);
17280 entry = mlx5_list_register(tbl_data->matchers, &ctx);
17281 if (!entry) {
17282 DRV_LOG(ERR,
17283 "Failed to register meter drop matcher.");
17284 goto policy_error;
17285 }
17286 mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
17287 container_of(entry, struct mlx5_flow_dv_matcher,
17288 entry);
17289 }
17290 drop_matcher =
17291 mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
17292
17293 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
17294 (enum modify_reg)mtr_id_reg_c,
17295 (mtr_idx << mtr_id_offset), UINT32_MAX);
17296 i = 0;
17297 cnt = flow_dv_counter_get_by_idx(dev,
17298 fm->drop_cnt, NULL);
17299 actions[i++] = cnt->action;
17300 actions[i++] = priv->sh->dr_drop_action;
17301 misc_mask = flow_dv_matcher_enable(value.buf);
17302 __flow_dv_adjust_buf_size(&value.size, misc_mask);
17303 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
17304 (void *)&value, i, actions,
17305 &fm->drop_rule[domain]);
17306 if (ret) {
17307 DRV_LOG(ERR, "Failed to create meter "
17308 "drop rule for drop table.");
17309 goto policy_error;
17310 }
17311 }
17312 return 0;
17313policy_error:
17314 for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
17315 if (fm->drop_rule[i]) {
17316 claim_zero(mlx5_flow_os_destroy_flow
17317 (fm->drop_rule[i]));
17318 fm->drop_rule[i] = NULL;
17319 }
17320 }
17321 return -1;
17322}
17323
17324static struct mlx5_flow_meter_sub_policy *
17325__flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
17326 struct mlx5_flow_meter_policy *mtr_policy,
17327 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
17328 struct mlx5_flow_meter_sub_policy *next_sub_policy,
17329 bool *is_reuse)
17330{
17331 struct mlx5_priv *priv = dev->data->dev_private;
17332 struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17333 uint32_t sub_policy_idx = 0;
17334 uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
17335 uint32_t i, j;
17336 struct mlx5_hrxq *hrxq;
17337 struct mlx5_flow_handle dh;
17338 struct mlx5_meter_policy_action_container *act_cnt;
17339 uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17340 uint16_t sub_policy_num;
17341 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
17342
17343 MLX5_ASSERT(wks);
17344 rte_spinlock_lock(&mtr_policy->sl);
17345 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17346 if (!rss_desc[i])
17347 continue;
17348 hrxq = mlx5_hrxq_get(dev, rss_desc[i]);
17349 if (!hrxq) {
17350 rte_spinlock_unlock(&mtr_policy->sl);
17351 return NULL;
17352 }
17353 hrxq_idx[i] = hrxq->idx;
17354 }
17355 sub_policy_num = (mtr_policy->sub_policy_num >>
17356 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17357 MLX5_MTR_SUB_POLICY_NUM_MASK;
17358 for (j = 0; j < sub_policy_num; j++) {
17359 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17360 if (rss_desc[i] &&
17361 hrxq_idx[i] !=
17362 mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
17363 break;
17364 }
17365 if (i >= MLX5_MTR_RTE_COLORS) {
17366
17367
17368
17369
17370 rte_spinlock_unlock(&mtr_policy->sl);
17371 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
17372 mlx5_hrxq_release(dev, hrxq_idx[i]);
17373 *is_reuse = true;
17374 return mtr_policy->sub_policys[domain][j];
17375 }
17376 }
17377
17378 if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_GREEN] &&
17379 !mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_YELLOW]) {
17380
17381 sub_policy = mtr_policy->sub_policys[domain][0];
17382 sub_policy_idx = sub_policy->idx;
17383 } else {
17384 sub_policy = mlx5_ipool_zmalloc
17385 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17386 &sub_policy_idx);
17387 if (!sub_policy ||
17388 sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
17389 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
17390 mlx5_hrxq_release(dev, hrxq_idx[i]);
17391 goto rss_sub_policy_error;
17392 }
17393 sub_policy->idx = sub_policy_idx;
17394 sub_policy->main_policy = mtr_policy;
17395 }
17396 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17397 if (!rss_desc[i])
17398 continue;
17399 sub_policy->rix_hrxq[i] = hrxq_idx[i];
17400 if (mtr_policy->is_hierarchy) {
17401 act_cnt = &mtr_policy->act_cnt[i];
17402 act_cnt->next_sub_policy = next_sub_policy;
17403 mlx5_hrxq_release(dev, hrxq_idx[i]);
17404 } else {
17405
17406
17407
17408
17409 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
17410 hrxq_idx[i]);
17411 if (!hrxq) {
17412 DRV_LOG(ERR, "Failed to get policy hrxq");
17413 goto rss_sub_policy_error;
17414 }
17415 act_cnt = &mtr_policy->act_cnt[i];
17416 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
17417 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
17418 if (act_cnt->rix_mark)
17419 wks->mark = 1;
17420 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
17421 dh.rix_hrxq = hrxq_idx[i];
17422 flow_drv_rxq_flags_set(dev, &dh);
17423 }
17424 }
17425 }
17426 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
17427 sub_policy, domain)) {
17428 DRV_LOG(ERR, "Failed to create policy "
17429 "rules for ingress domain.");
17430 goto rss_sub_policy_error;
17431 }
17432 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17433 i = (mtr_policy->sub_policy_num >>
17434 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17435 MLX5_MTR_SUB_POLICY_NUM_MASK;
17436 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
17437 DRV_LOG(ERR, "No free sub-policy slot.");
17438 goto rss_sub_policy_error;
17439 }
17440 mtr_policy->sub_policys[domain][i] = sub_policy;
17441 i++;
17442 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17443 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17444 mtr_policy->sub_policy_num |=
17445 (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17446 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17447 }
17448 rte_spinlock_unlock(&mtr_policy->sl);
17449 *is_reuse = false;
17450 return sub_policy;
17451rss_sub_policy_error:
17452 if (sub_policy) {
17453 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17454 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17455 i = (mtr_policy->sub_policy_num >>
17456 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17457 MLX5_MTR_SUB_POLICY_NUM_MASK;
17458 mtr_policy->sub_policys[domain][i] = NULL;
17459 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17460 sub_policy->idx);
17461 }
17462 }
17463 rte_spinlock_unlock(&mtr_policy->sl);
17464 return NULL;
17465}
17466
17467
17468
17469
17470
17471
17472
17473
17474
17475
17476
17477
17478
17479static struct mlx5_flow_meter_sub_policy *
17480flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17481 struct mlx5_flow_meter_policy *mtr_policy,
17482 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17483{
17484 struct mlx5_priv *priv = dev->data->dev_private;
17485 struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17486 struct mlx5_flow_meter_info *next_fm;
17487 struct mlx5_flow_meter_policy *next_policy;
17488 struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17489 struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17490 struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17491 uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17492 bool reuse_sub_policy;
17493 uint32_t i = 0;
17494 uint32_t j = 0;
17495
17496 while (true) {
17497
17498 policies[i++] = mtr_policy;
17499 if (!mtr_policy->is_hierarchy)
17500 break;
17501 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17502 DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17503 return NULL;
17504 }
17505 rte_spinlock_lock(&mtr_policy->sl);
17506 next_fm = mlx5_flow_meter_hierarchy_next_meter(priv, mtr_policy, NULL);
17507 rte_spinlock_unlock(&mtr_policy->sl);
17508 if (!next_fm) {
17509 DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17510 return NULL;
17511 }
17512 next_policy =
17513 mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17514 NULL);
17515 MLX5_ASSERT(next_policy);
17516 mtr_policy = next_policy;
17517 }
17518 while (i) {
17519
17520
17521
17522
17523 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17524 policies[--i],
17525 rss_desc,
17526 next_sub_policy,
17527 &reuse_sub_policy);
17528 if (!sub_policy) {
17529 DRV_LOG(ERR, "Failed to get the sub policy.");
17530 goto err_exit;
17531 }
17532 if (!reuse_sub_policy)
17533 sub_policies[j++] = sub_policy;
17534 next_sub_policy = sub_policy;
17535 }
17536 return sub_policy;
17537err_exit:
17538 while (j) {
17539 uint16_t sub_policy_num;
17540
17541 sub_policy = sub_policies[--j];
17542 mtr_policy = sub_policy->main_policy;
17543 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17544 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17545 sub_policy_num = (mtr_policy->sub_policy_num >>
17546 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17547 MLX5_MTR_SUB_POLICY_NUM_MASK;
17548 mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17549 NULL;
17550 sub_policy_num--;
17551 mtr_policy->sub_policy_num &=
17552 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17553 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17554 mtr_policy->sub_policy_num |=
17555 (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17556 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17557 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17558 sub_policy->idx);
17559 }
17560 }
17561 return NULL;
17562}
17563
17564
17565
17566
17567
17568
17569
17570
17571
17572
17573
17574
17575
17576
17577
17578
17579
17580
17581
17582static int
17583mlx5_meter_hierarchy_skip_tag_rule(struct mlx5_priv *priv,
17584 struct mlx5_flow_meter_policy *mtr_policy,
17585 int32_t src_port,
17586 struct mlx5_flow_meter_info **next_fm,
17587 bool *skip,
17588 struct rte_flow_error *error)
17589{
17590 struct mlx5_flow_meter_sub_policy *sub_policy;
17591 struct mlx5_sub_policy_color_rule *color_rule;
17592 uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17593 int ret = 0;
17594 int i;
17595
17596 *next_fm = NULL;
17597 *skip = false;
17598 rte_spinlock_lock(&mtr_policy->sl);
17599 if (!mtr_policy->is_hierarchy)
17600 goto exit;
17601 *next_fm = mlx5_flow_meter_hierarchy_next_meter(priv, mtr_policy, NULL);
17602 if (!*next_fm) {
17603 ret = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
17604 NULL, "Failed to find next meter in hierarchy.");
17605 goto exit;
17606 }
17607 if (!(*next_fm)->drop_cnt) {
17608 *skip = true;
17609 goto exit;
17610 }
17611 sub_policy = mtr_policy->sub_policys[domain][0];
17612 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17613 if (mtr_policy->act_cnt[i].fate_action != MLX5_FLOW_FATE_MTR)
17614 continue;
17615 TAILQ_FOREACH(color_rule, &sub_policy->color_rules[i], next_port)
17616 if (color_rule->src_port == src_port) {
17617 *skip = true;
17618 goto exit;
17619 }
17620 }
17621exit:
17622 rte_spinlock_unlock(&mtr_policy->sl);
17623 return ret;
17624}
17625
17626
17627
17628
17629
17630
17631
17632
17633
17634
17635
17636
17637
17638
17639
17640
17641
17642static int
17643flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17644 struct mlx5_flow_meter_info *fm,
17645 int32_t src_port,
17646 const struct rte_flow_item *item,
17647 struct rte_flow_error *error)
17648{
17649 struct mlx5_priv *priv = dev->data->dev_private;
17650 struct mlx5_flow_meter_policy *mtr_policy;
17651 struct mlx5_flow_meter_sub_policy *sub_policy;
17652 struct mlx5_flow_meter_info *next_fm = NULL;
17653 struct mlx5_flow_meter_policy *next_policy;
17654 struct mlx5_flow_meter_sub_policy *next_sub_policy;
17655 struct mlx5_flow_tbl_data_entry *tbl_data;
17656 struct mlx5_sub_policy_color_rule *color_rule;
17657 struct mlx5_meter_policy_acts acts;
17658 uint32_t color_reg_c_idx;
17659 bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17660 struct rte_flow_attr attr = {
17661 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17662 .priority = 0,
17663 .ingress = 0,
17664 .egress = 0,
17665 .transfer = 1,
17666 .reserved = 0,
17667 };
17668 uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17669 struct {
17670 struct mlx5_flow_meter_policy *fm_policy;
17671 struct mlx5_flow_meter_info *next_fm;
17672 struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS];
17673 } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} };
17674 uint32_t fm_cnt = 0;
17675 uint32_t i, j;
17676
17677 color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17678
17679 do {
17680 bool skip = false;
17681
17682 mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17683 MLX5_ASSERT(mtr_policy);
17684 if (mlx5_meter_hierarchy_skip_tag_rule(priv, mtr_policy, src_port,
17685 &next_fm, &skip, error))
17686 goto err_exit;
17687 if (next_fm && !skip) {
17688 fm_info[fm_cnt].fm_policy = mtr_policy;
17689 fm_info[fm_cnt].next_fm = next_fm;
17690 if (++fm_cnt >= MLX5_MTR_CHAIN_MAX_NUM) {
17691 rte_flow_error_set(error, errno,
17692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17693 "Exceed max meter number in hierarchy.");
17694 goto err_exit;
17695 }
17696 }
17697 fm = next_fm;
17698 } while (fm);
17699
17700 for (i = 0; i < fm_cnt; i++) {
17701 void *mtr_action;
17702
17703 mtr_policy = fm_info[i].fm_policy;
17704 rte_spinlock_lock(&mtr_policy->sl);
17705 sub_policy = mtr_policy->sub_policys[domain][0];
17706 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
17707 if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR)
17708 continue;
17709 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17710 sizeof(struct mlx5_sub_policy_color_rule),
17711 0, SOCKET_ID_ANY);
17712 if (!color_rule) {
17713 rte_spinlock_unlock(&mtr_policy->sl);
17714 rte_flow_error_set(error, ENOMEM,
17715 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17716 "No memory to create tag color rule.");
17717 goto err_exit;
17718 }
17719 color_rule->src_port = src_port;
17720 next_fm = fm_info[i].next_fm;
17721 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17722 mlx5_free(color_rule);
17723 rte_spinlock_unlock(&mtr_policy->sl);
17724 goto err_exit;
17725 }
17726 fm_info[i].tag_rule[j] = color_rule;
17727 TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port);
17728
17729 mtr_action = (next_fm->color_aware && j == RTE_COLOR_YELLOW) ?
17730 next_fm->meter_action_y :
17731 next_fm->meter_action_g;
17732 next_policy = mlx5_flow_meter_policy_find(dev, next_fm->policy_id, NULL);
17733 MLX5_ASSERT(next_policy);
17734 next_sub_policy = next_policy->sub_policys[domain][0];
17735 tbl_data = container_of(next_sub_policy->tbl_rsc,
17736 struct mlx5_flow_tbl_data_entry, tbl);
17737 if (mtr_first) {
17738 acts.dv_actions[0] = mtr_action;
17739 acts.dv_actions[1] = mtr_policy->act_cnt[j].modify_hdr->action;
17740 } else {
17741 acts.dv_actions[0] = mtr_policy->act_cnt[j].modify_hdr->action;
17742 acts.dv_actions[1] = mtr_action;
17743 }
17744 acts.dv_actions[2] = tbl_data->jump.action;
17745 acts.actions_n = 3;
17746 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17747 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17748 &attr, true, item, &color_rule->matcher, error)) {
17749 rte_spinlock_unlock(&mtr_policy->sl);
17750 rte_flow_error_set(error, errno,
17751 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17752 "Failed to create hierarchy meter matcher.");
17753 goto err_exit;
17754 }
17755 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)j,
17756 color_rule->matcher->matcher_object,
17757 acts.actions_n, acts.dv_actions,
17758 true, item, &color_rule->rule, &attr)) {
17759 rte_spinlock_unlock(&mtr_policy->sl);
17760 rte_flow_error_set(error, errno,
17761 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17762 "Failed to create hierarchy meter rule.");
17763 goto err_exit;
17764 }
17765 }
17766 rte_spinlock_unlock(&mtr_policy->sl);
17767 }
17768 return 0;
17769err_exit:
17770 for (i = 0; i < fm_cnt; i++) {
17771 mtr_policy = fm_info[i].fm_policy;
17772 rte_spinlock_lock(&mtr_policy->sl);
17773 sub_policy = mtr_policy->sub_policys[domain][0];
17774 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
17775 color_rule = fm_info[i].tag_rule[j];
17776 if (!color_rule)
17777 continue;
17778 if (color_rule->rule)
17779 mlx5_flow_os_destroy_flow(color_rule->rule);
17780 if (color_rule->matcher) {
17781 struct mlx5_flow_tbl_data_entry *tbl =
17782 container_of(color_rule->matcher->tbl, typeof(*tbl), tbl);
17783 mlx5_list_unregister(tbl->matchers, &color_rule->matcher->entry);
17784 }
17785 if (fm_info[i].next_fm)
17786 mlx5_flow_meter_detach(priv, fm_info[i].next_fm);
17787 TAILQ_REMOVE(&sub_policy->color_rules[j], color_rule, next_port);
17788 mlx5_free(color_rule);
17789 }
17790 rte_spinlock_unlock(&mtr_policy->sl);
17791 }
17792 return -rte_errno;
17793}
17794
17795
17796
17797
17798
17799
17800
17801
17802
17803static void
17804flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17805 struct mlx5_flow_meter_policy *mtr_policy)
17806{
17807 struct mlx5_priv *priv = dev->data->dev_private;
17808 struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17809 uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17810 uint32_t i, j;
17811 uint16_t sub_policy_num, new_policy_num;
17812
17813 rte_spinlock_lock(&mtr_policy->sl);
17814 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17815 switch (mtr_policy->act_cnt[i].fate_action) {
17816 case MLX5_FLOW_FATE_SHARED_RSS:
17817 sub_policy_num = (mtr_policy->sub_policy_num >>
17818 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17819 MLX5_MTR_SUB_POLICY_NUM_MASK;
17820 new_policy_num = sub_policy_num;
17821 for (j = 0; j < sub_policy_num; j++) {
17822 sub_policy =
17823 mtr_policy->sub_policys[domain][j];
17824 if (sub_policy) {
17825 __flow_dv_destroy_sub_policy_rules(dev,
17826 sub_policy);
17827 if (sub_policy !=
17828 mtr_policy->sub_policys[domain][0]) {
17829 mtr_policy->sub_policys[domain][j] =
17830 NULL;
17831 mlx5_ipool_free
17832 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17833 sub_policy->idx);
17834 new_policy_num--;
17835 }
17836 }
17837 }
17838 if (new_policy_num != sub_policy_num) {
17839 mtr_policy->sub_policy_num &=
17840 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17841 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17842 mtr_policy->sub_policy_num |=
17843 (new_policy_num &
17844 MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17845 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17846 }
17847 break;
17848 case MLX5_FLOW_FATE_QUEUE:
17849 sub_policy = mtr_policy->sub_policys[domain][0];
17850 __flow_dv_destroy_sub_policy_rules(dev,
17851 sub_policy);
17852 break;
17853 default:
17854
17855 break;
17856 }
17857 }
17858 rte_spinlock_unlock(&mtr_policy->sl);
17859}
17860
17861
17862
17863
17864
17865
17866
17867
17868
17869
17870
17871
17872int
17873mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17874{
17875 struct mlx5_priv *priv = dev->data->dev_private;
17876 struct mlx5_dev_ctx_shared *sh = priv->sh;
17877 struct mlx5_flow_dv_match_params mask = {
17878 .size = sizeof(mask.buf),
17879 };
17880 struct mlx5_flow_dv_match_params value = {
17881 .size = sizeof(value.buf),
17882 };
17883 struct mlx5dv_flow_matcher_attr dv_attr = {
17884 .type = IBV_FLOW_ATTR_NORMAL,
17885 .priority = 0,
17886 .match_criteria_enable = 0,
17887 .match_mask = (void *)&mask,
17888 };
17889 struct mlx5_flow_tbl_resource *tbl = NULL;
17890 void *matcher = NULL;
17891 void *flow = NULL;
17892 int ret = -1;
17893
17894 tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17895 0, 0, 0, NULL);
17896 if (!tbl)
17897 goto err;
17898 dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17899 __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17900 ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17901 tbl->obj, &matcher);
17902 if (ret)
17903 goto err;
17904 __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17905 ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17906 &sh->dr_drop_action, &flow);
17907err:
17908
17909
17910
17911
17912 if (!flow) {
17913 if (matcher &&
17914 (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17915 DRV_LOG(INFO, "DR drop action is not supported in root table.");
17916 else
17917 DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17918 ret = -1;
17919 } else {
17920 claim_zero(mlx5_flow_os_destroy_flow(flow));
17921 }
17922 if (matcher)
17923 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17924 if (tbl)
17925 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17926 return ret;
17927}
17928
17929
17930
17931
17932
17933
17934
17935
17936
17937
17938
17939
17940
17941int
17942mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17943{
17944 struct mlx5_priv *priv = dev->data->dev_private;
17945 struct mlx5_dev_ctx_shared *sh = priv->sh;
17946 struct mlx5_flow_dv_match_params mask = {
17947 .size = sizeof(mask.buf),
17948 };
17949 struct mlx5_flow_dv_match_params value = {
17950 .size = sizeof(value.buf),
17951 };
17952 struct mlx5dv_flow_matcher_attr dv_attr = {
17953 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17954 .priority = 0,
17955 .match_criteria_enable = 0,
17956 .match_mask = (void *)&mask,
17957 };
17958 void *actions[2] = { 0 };
17959 struct mlx5_flow_tbl_resource *tbl = NULL;
17960 struct mlx5_devx_obj *dcs = NULL;
17961 void *matcher = NULL;
17962 void *flow = NULL;
17963 int ret = -1;
17964
17965 tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17966 0, 0, 0, NULL);
17967 if (!tbl)
17968 goto err;
17969 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17970 if (!dcs)
17971 goto err;
17972 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17973 &actions[0]);
17974 if (ret)
17975 goto err;
17976 dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17977 __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17978 ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17979 tbl->obj, &matcher);
17980 if (ret)
17981 goto err;
17982 __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17983 ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17984 actions, &flow);
17985err:
17986
17987
17988
17989
17990
17991
17992
17993 if (flow) {
17994 DRV_LOG(INFO, "Batch counter is not supported in root "
17995 "table. Switch to fallback mode.");
17996 rte_errno = ENOTSUP;
17997 ret = -rte_errno;
17998 claim_zero(mlx5_flow_os_destroy_flow(flow));
17999 } else {
18000
18001 if (!matcher || (matcher && errno != EINVAL))
18002 DRV_LOG(ERR, "Unexpected error in counter offset "
18003 "support detection");
18004 ret = 0;
18005 }
18006 if (actions[0])
18007 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
18008 if (matcher)
18009 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
18010 if (tbl)
18011 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
18012 if (dcs)
18013 claim_zero(mlx5_devx_cmd_destroy(dcs));
18014 return ret;
18015}
18016
18017
18018
18019
18020
18021
18022
18023
18024
18025
18026
18027
18028
18029
18030
18031
18032
18033
18034static int
18035flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
18036 uint64_t *pkts, uint64_t *bytes, void **action)
18037{
18038 struct mlx5_priv *priv = dev->data->dev_private;
18039 struct mlx5_flow_counter *cnt;
18040 uint64_t inn_pkts, inn_bytes;
18041 int ret;
18042
18043 if (!priv->sh->cdev->config.devx)
18044 return -1;
18045
18046 ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
18047 if (ret)
18048 return -1;
18049 cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
18050 if (cnt && action)
18051 *action = cnt->action;
18052
18053 *pkts = inn_pkts - cnt->hits;
18054 *bytes = inn_bytes - cnt->bytes;
18055 if (clear) {
18056 cnt->hits = inn_pkts;
18057 cnt->bytes = inn_bytes;
18058 }
18059 return 0;
18060}
18061
18062
18063
18064
18065
18066
18067
18068
18069
18070
18071
18072
18073
18074
18075
18076
18077
18078
18079
18080
18081
18082static int
18083flow_dv_get_aged_flows(struct rte_eth_dev *dev,
18084 void **context,
18085 uint32_t nb_contexts,
18086 struct rte_flow_error *error)
18087{
18088 struct mlx5_priv *priv = dev->data->dev_private;
18089 struct mlx5_age_info *age_info;
18090 struct mlx5_age_param *age_param;
18091 struct mlx5_flow_counter *counter;
18092 struct mlx5_aso_age_action *act;
18093 int nb_flows = 0;
18094
18095 if (nb_contexts && !context)
18096 return rte_flow_error_set(error, EINVAL,
18097 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
18098 NULL, "empty context");
18099 age_info = GET_PORT_AGE_INFO(priv);
18100 rte_spinlock_lock(&age_info->aged_sl);
18101 LIST_FOREACH(act, &age_info->aged_aso, next) {
18102 nb_flows++;
18103 if (nb_contexts) {
18104 context[nb_flows - 1] =
18105 act->age_params.context;
18106 if (!(--nb_contexts))
18107 break;
18108 }
18109 }
18110 TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
18111 nb_flows++;
18112 if (nb_contexts) {
18113 age_param = MLX5_CNT_TO_AGE(counter);
18114 context[nb_flows - 1] = age_param->context;
18115 if (!(--nb_contexts))
18116 break;
18117 }
18118 }
18119 rte_spinlock_unlock(&age_info->aged_sl);
18120 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
18121 return nb_flows;
18122}
18123
18124
18125
18126
18127static uint32_t
18128flow_dv_counter_allocate(struct rte_eth_dev *dev)
18129{
18130 return flow_dv_counter_alloc(dev, 0);
18131}
18132
18133
18134
18135
18136
18137
18138
18139
18140
18141
18142
18143
18144
18145
18146
18147
18148
18149
18150int
18151flow_dv_action_validate(struct rte_eth_dev *dev,
18152 const struct rte_flow_indir_action_conf *conf,
18153 const struct rte_flow_action *action,
18154 struct rte_flow_error *err)
18155{
18156 struct mlx5_priv *priv = dev->data->dev_private;
18157
18158 RTE_SET_USED(conf);
18159 switch (action->type) {
18160 case RTE_FLOW_ACTION_TYPE_RSS:
18161
18162
18163
18164
18165
18166
18167
18168
18169 if (priv->obj_ops.ind_table_modify == NULL)
18170 return rte_flow_error_set
18171 (err, ENOTSUP,
18172 RTE_FLOW_ERROR_TYPE_ACTION,
18173 NULL,
18174 "Indirect RSS action not supported");
18175 return mlx5_validate_action_rss(dev, action, err);
18176 case RTE_FLOW_ACTION_TYPE_AGE:
18177 if (!priv->sh->aso_age_mng)
18178 return rte_flow_error_set(err, ENOTSUP,
18179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
18180 NULL,
18181 "Indirect age action not supported");
18182 return flow_dv_validate_action_age(0, action, dev, err);
18183 case RTE_FLOW_ACTION_TYPE_COUNT:
18184 return flow_dv_validate_action_count(dev, true, 0, NULL, err);
18185 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
18186 if (!priv->sh->ct_aso_en)
18187 return rte_flow_error_set(err, ENOTSUP,
18188 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
18189 "ASO CT is not supported");
18190 return mlx5_validate_action_ct(dev, action->conf, err);
18191 default:
18192 return rte_flow_error_set(err, ENOTSUP,
18193 RTE_FLOW_ERROR_TYPE_ACTION,
18194 NULL,
18195 "action type not supported");
18196 }
18197}
18198
18199
18200
18201
18202
18203
18204
18205
18206
18207
18208
18209
18210
18211static inline int
18212flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
18213 const struct rte_flow_action_rss *r2)
18214{
18215 if (r1 == NULL || r2 == NULL)
18216 return 0;
18217 if (!(r1->level <= 1 && r2->level <= 1) &&
18218 !(r1->level > 1 && r2->level > 1))
18219 return 1;
18220 if (r1->types != r2->types &&
18221 !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
18222 (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
18223 return 1;
18224 if (r1->key || r2->key) {
18225 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
18226 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
18227
18228 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
18229 return 1;
18230 }
18231 return 0;
18232}
18233
18234
18235
18236
18237
18238
18239
18240
18241
18242
18243
18244
18245
18246
18247
18248
18249
18250
18251
18252
18253
18254static int
18255flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
18256 uint32_t meter_id,
18257 uint64_t action_flags,
18258 bool *is_rss,
18259 uint8_t *hierarchy_domain,
18260 struct rte_mtr_error *error)
18261{
18262 struct mlx5_priv *priv = dev->data->dev_private;
18263 struct mlx5_flow_meter_info *fm;
18264 struct mlx5_flow_meter_policy *policy;
18265 uint8_t cnt = 1;
18266
18267 if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
18268 MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18269 return -rte_mtr_error_set(error, EINVAL,
18270 RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
18271 NULL,
18272 "Multiple fate actions not supported.");
18273 *hierarchy_domain = 0;
18274 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
18275 while (true) {
18276 if (!fm)
18277 return -rte_mtr_error_set(error, EINVAL,
18278 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
18279 "Meter not found in meter hierarchy.");
18280 if (fm->def_policy)
18281 return -rte_mtr_error_set(error, EINVAL,
18282 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
18283 "Non termination meter not supported in hierarchy.");
18284 if (!fm->shared)
18285 return -rte_mtr_error_set(error, EINVAL,
18286 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
18287 "Only shared meter supported in hierarchy.");
18288 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
18289 MLX5_ASSERT(policy);
18290
18291
18292
18293
18294
18295 if (!*hierarchy_domain) {
18296 if (policy->transfer)
18297 *hierarchy_domain |=
18298 MLX5_MTR_DOMAIN_TRANSFER_BIT;
18299 if (policy->ingress)
18300 *hierarchy_domain |=
18301 MLX5_MTR_DOMAIN_INGRESS_BIT;
18302 if (policy->egress)
18303 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
18304 }
18305 if (!policy->is_hierarchy) {
18306 *is_rss = policy->is_rss;
18307 break;
18308 }
18309 rte_spinlock_lock(&policy->sl);
18310 fm = mlx5_flow_meter_hierarchy_next_meter(priv, policy, NULL);
18311 rte_spinlock_unlock(&policy->sl);
18312 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
18313 return -rte_mtr_error_set(error, EINVAL,
18314 RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
18315 "Exceed max hierarchy meter number.");
18316 }
18317 return 0;
18318}
18319
18320
18321
18322
18323
18324
18325
18326
18327
18328
18329
18330
18331
18332
18333
18334
18335
18336
18337static int
18338flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
18339 const struct rte_flow_action *actions[RTE_COLORS],
18340 struct rte_flow_attr *attr,
18341 bool *is_rss,
18342 uint8_t *domain_bitmap,
18343 uint8_t *policy_mode,
18344 struct rte_mtr_error *error)
18345{
18346 struct mlx5_priv *priv = dev->data->dev_private;
18347 struct mlx5_sh_config *dev_conf = &priv->sh->config;
18348 const struct rte_flow_action *act;
18349 uint64_t action_flags[RTE_COLORS] = {0};
18350 int actions_n;
18351 int i, ret;
18352 struct rte_flow_error flow_err;
18353 uint8_t domain_color[RTE_COLORS] = {0};
18354 uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
18355 uint8_t hierarchy_domain = 0;
18356 const struct rte_flow_action_meter *mtr;
18357 const struct rte_flow_action_meter *next_mtr = NULL;
18358 bool def_green = false;
18359 bool def_yellow = false;
18360 const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
18361
18362 if (!dev_conf->dv_esw_en)
18363 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18364 *domain_bitmap = def_domain;
18365
18366 if (!actions[RTE_COLOR_RED] ||
18367 actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
18368 return -rte_mtr_error_set(error, ENOTSUP,
18369 RTE_MTR_ERROR_TYPE_METER_POLICY,
18370 NULL, "Red color only supports drop action.");
18371
18372
18373
18374
18375
18376 if (!actions[RTE_COLOR_GREEN] ||
18377 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
18378 def_green = true;
18379 if (!actions[RTE_COLOR_YELLOW] ||
18380 actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
18381 def_yellow = true;
18382 if (def_green && def_yellow) {
18383 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
18384 return 0;
18385 } else if (!def_green && def_yellow) {
18386 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
18387 } else if (def_green && !def_yellow) {
18388 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
18389 } else {
18390 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
18391 }
18392
18393 flow_err.message = "";
18394 for (i = 0; i < RTE_COLORS; i++) {
18395 act = actions[i];
18396 for (action_flags[i] = 0, actions_n = 0;
18397 act && act->type != RTE_FLOW_ACTION_TYPE_END;
18398 act++) {
18399 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
18400 return -rte_mtr_error_set(error, ENOTSUP,
18401 RTE_MTR_ERROR_TYPE_METER_POLICY,
18402 NULL, "too many actions");
18403 switch (act->type) {
18404 case RTE_FLOW_ACTION_TYPE_PORT_ID:
18405 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
18406 if (!dev_conf->dv_esw_en)
18407 return -rte_mtr_error_set(error,
18408 ENOTSUP,
18409 RTE_MTR_ERROR_TYPE_METER_POLICY,
18410 NULL, "PORT action validate check"
18411 " fail for ESW disable");
18412 ret = flow_dv_validate_action_port_id(dev,
18413 action_flags[i],
18414 act, attr, &flow_err);
18415 if (ret)
18416 return -rte_mtr_error_set(error,
18417 ENOTSUP,
18418 RTE_MTR_ERROR_TYPE_METER_POLICY,
18419 NULL, flow_err.message ?
18420 flow_err.message :
18421 "PORT action validate check fail");
18422 ++actions_n;
18423 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
18424 break;
18425 case RTE_FLOW_ACTION_TYPE_MARK:
18426 ret = flow_dv_validate_action_mark(dev, act,
18427 action_flags[i],
18428 attr, &flow_err);
18429 if (ret < 0)
18430 return -rte_mtr_error_set(error,
18431 ENOTSUP,
18432 RTE_MTR_ERROR_TYPE_METER_POLICY,
18433 NULL, flow_err.message ?
18434 flow_err.message :
18435 "Mark action validate check fail");
18436 if (dev_conf->dv_xmeta_en !=
18437 MLX5_XMETA_MODE_LEGACY)
18438 return -rte_mtr_error_set(error,
18439 ENOTSUP,
18440 RTE_MTR_ERROR_TYPE_METER_POLICY,
18441 NULL, "Extend MARK action is "
18442 "not supported. Please try use "
18443 "default policy for meter.");
18444 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
18445 ++actions_n;
18446 break;
18447 case RTE_FLOW_ACTION_TYPE_SET_TAG:
18448 ret = flow_dv_validate_action_set_tag(dev,
18449 act, action_flags[i],
18450 attr, &flow_err);
18451 if (ret)
18452 return -rte_mtr_error_set(error,
18453 ENOTSUP,
18454 RTE_MTR_ERROR_TYPE_METER_POLICY,
18455 NULL, flow_err.message ?
18456 flow_err.message :
18457 "Set tag action validate check fail");
18458 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
18459 ++actions_n;
18460 break;
18461 case RTE_FLOW_ACTION_TYPE_DROP:
18462 ret = mlx5_flow_validate_action_drop
18463 (action_flags[i], attr, &flow_err);
18464 if (ret < 0)
18465 return -rte_mtr_error_set(error,
18466 ENOTSUP,
18467 RTE_MTR_ERROR_TYPE_METER_POLICY,
18468 NULL, flow_err.message ?
18469 flow_err.message :
18470 "Drop action validate check fail");
18471 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
18472 ++actions_n;
18473 break;
18474 case RTE_FLOW_ACTION_TYPE_QUEUE:
18475
18476
18477
18478
18479 if (dev_conf->dv_flow_en &&
18480 (dev_conf->dv_xmeta_en !=
18481 MLX5_XMETA_MODE_LEGACY) &&
18482 mlx5_flow_ext_mreg_supported(dev))
18483 return -rte_mtr_error_set(error,
18484 ENOTSUP,
18485 RTE_MTR_ERROR_TYPE_METER_POLICY,
18486 NULL, "Queue action with meta "
18487 "is not supported. Please try use "
18488 "default policy for meter.");
18489 ret = mlx5_flow_validate_action_queue(act,
18490 action_flags[i], dev,
18491 attr, &flow_err);
18492 if (ret < 0)
18493 return -rte_mtr_error_set(error,
18494 ENOTSUP,
18495 RTE_MTR_ERROR_TYPE_METER_POLICY,
18496 NULL, flow_err.message ?
18497 flow_err.message :
18498 "Queue action validate check fail");
18499 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
18500 ++actions_n;
18501 break;
18502 case RTE_FLOW_ACTION_TYPE_RSS:
18503 if (dev_conf->dv_flow_en &&
18504 (dev_conf->dv_xmeta_en !=
18505 MLX5_XMETA_MODE_LEGACY) &&
18506 mlx5_flow_ext_mreg_supported(dev))
18507 return -rte_mtr_error_set(error,
18508 ENOTSUP,
18509 RTE_MTR_ERROR_TYPE_METER_POLICY,
18510 NULL, "RSS action with meta "
18511 "is not supported. Please try use "
18512 "default policy for meter.");
18513 ret = mlx5_validate_action_rss(dev, act,
18514 &flow_err);
18515 if (ret < 0)
18516 return -rte_mtr_error_set(error,
18517 ENOTSUP,
18518 RTE_MTR_ERROR_TYPE_METER_POLICY,
18519 NULL, flow_err.message ?
18520 flow_err.message :
18521 "RSS action validate check fail");
18522 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
18523 ++actions_n;
18524
18525 rss_color[i] = act->conf;
18526 break;
18527 case RTE_FLOW_ACTION_TYPE_JUMP:
18528 ret = flow_dv_validate_action_jump(dev,
18529 NULL, act, action_flags[i],
18530 attr, true, &flow_err);
18531 if (ret)
18532 return -rte_mtr_error_set(error,
18533 ENOTSUP,
18534 RTE_MTR_ERROR_TYPE_METER_POLICY,
18535 NULL, flow_err.message ?
18536 flow_err.message :
18537 "Jump action validate check fail");
18538 ++actions_n;
18539 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
18540 break;
18541 case RTE_FLOW_ACTION_TYPE_METER:
18542 mtr = act->conf;
18543 if (next_mtr && next_mtr->mtr_id != mtr->mtr_id)
18544 return -rte_mtr_error_set(error, ENOTSUP,
18545 RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
18546 "Green and Yellow must use the same meter.");
18547 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18548 mtr->mtr_id,
18549 action_flags[i],
18550 is_rss,
18551 &hierarchy_domain,
18552 error);
18553 if (ret)
18554 return ret;
18555 ++actions_n;
18556 action_flags[i] |=
18557 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18558 next_mtr = mtr;
18559 break;
18560 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
18561 ret = flow_dv_validate_action_modify_field(dev,
18562 action_flags[i], act, attr, &flow_err);
18563 if (ret < 0)
18564 return -rte_mtr_error_set(error,
18565 ENOTSUP,
18566 RTE_MTR_ERROR_TYPE_METER_POLICY,
18567 NULL, flow_err.message ?
18568 flow_err.message :
18569 "Modify field action validate check fail");
18570 ++actions_n;
18571 action_flags[i] |= MLX5_FLOW_ACTION_MODIFY_FIELD;
18572 break;
18573 default:
18574 return -rte_mtr_error_set(error, ENOTSUP,
18575 RTE_MTR_ERROR_TYPE_METER_POLICY,
18576 NULL,
18577 "Doesn't support optional action");
18578 }
18579 }
18580 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18581 domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18582 } else if ((action_flags[i] &
18583 (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18584 (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18585
18586
18587
18588
18589 domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18590 } else {
18591 domain_color[i] = def_domain;
18592 if (action_flags[i] &&
18593 !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18594 domain_color[i] &=
18595 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18596 }
18597 if (action_flags[i] &
18598 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18599 domain_color[i] &= hierarchy_domain;
18600
18601
18602
18603
18604
18605
18606
18607 if ((def_green && i == RTE_COLOR_GREEN) ||
18608 (def_yellow && i == RTE_COLOR_YELLOW))
18609 continue;
18610
18611
18612
18613
18614
18615 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18616 (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18617 return -rte_mtr_error_set(error, ENOTSUP,
18618 RTE_MTR_ERROR_TYPE_METER_POLICY,
18619 NULL, "Drop action is mutually-exclusive "
18620 "with any other action");
18621 }
18622
18623 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18624 if (!mlx5_flow_ext_mreg_supported(dev) &&
18625 action_flags[i] & MLX5_FLOW_ACTION_MARK)
18626 return -rte_mtr_error_set(error, ENOTSUP,
18627 RTE_MTR_ERROR_TYPE_METER_POLICY,
18628 NULL, "unsupported action MARK");
18629 if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18630 return -rte_mtr_error_set(error, ENOTSUP,
18631 RTE_MTR_ERROR_TYPE_METER_POLICY,
18632 NULL, "unsupported action QUEUE");
18633 if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18634 return -rte_mtr_error_set(error, ENOTSUP,
18635 RTE_MTR_ERROR_TYPE_METER_POLICY,
18636 NULL, "unsupported action RSS");
18637 if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18638 return -rte_mtr_error_set(error, ENOTSUP,
18639 RTE_MTR_ERROR_TYPE_METER_POLICY,
18640 NULL, "no fate action is found");
18641 } else {
18642 if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18643 (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18644 if ((domain_color[i] &
18645 MLX5_MTR_DOMAIN_EGRESS_BIT))
18646 domain_color[i] =
18647 MLX5_MTR_DOMAIN_EGRESS_BIT;
18648 else
18649 return -rte_mtr_error_set(error,
18650 ENOTSUP,
18651 RTE_MTR_ERROR_TYPE_METER_POLICY,
18652 NULL,
18653 "no fate action is found");
18654 }
18655 }
18656 }
18657 if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) {
18658 if (!(action_flags[RTE_COLOR_GREEN] & action_flags[RTE_COLOR_YELLOW] &
18659 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY))
18660 return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY,
18661 NULL,
18662 "Meter hierarchy supports meter action only.");
18663 }
18664
18665 if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18666 rss_color[RTE_COLOR_YELLOW]))
18667 return -rte_mtr_error_set(error, EINVAL,
18668 RTE_MTR_ERROR_TYPE_METER_POLICY,
18669 NULL, "policy RSS attr conflict");
18670 if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18671 *is_rss = true;
18672
18673 if (!def_green && !def_yellow &&
18674 domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18675 !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18676 !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18677 return -rte_mtr_error_set(error, EINVAL,
18678 RTE_MTR_ERROR_TYPE_METER_POLICY,
18679 NULL, "policy domains conflict");
18680
18681
18682
18683
18684 *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18685 domain_color[RTE_COLOR_YELLOW];
18686 return 0;
18687}
18688
18689static int
18690flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18691{
18692 struct mlx5_priv *priv = dev->data->dev_private;
18693 int ret = 0;
18694
18695 if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18696 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18697 flags);
18698 if (ret != 0)
18699 return ret;
18700 }
18701 if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18702 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18703 if (ret != 0)
18704 return ret;
18705 }
18706 if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18707 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18708 if (ret != 0)
18709 return ret;
18710 }
18711 return 0;
18712}
18713
18714
18715
18716
18717
18718
18719
18720
18721
18722
18723
18724
18725
18726
18727
18728
18729static int
18730flow_dv_discover_priorities(struct rte_eth_dev *dev,
18731 const uint16_t *vprio, int vprio_n)
18732{
18733 struct mlx5_priv *priv = dev->data->dev_private;
18734 struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18735 struct rte_flow_item_eth eth;
18736 struct rte_flow_item item = {
18737 .type = RTE_FLOW_ITEM_TYPE_ETH,
18738 .spec = ð,
18739 .mask = ð,
18740 };
18741 struct mlx5_flow_dv_matcher matcher = {
18742 .mask = {
18743 .size = sizeof(matcher.mask.buf),
18744 },
18745 };
18746 union mlx5_flow_tbl_key tbl_key;
18747 struct mlx5_flow flow;
18748 void *action;
18749 struct rte_flow_error error;
18750 uint8_t misc_mask;
18751 int i, err, ret = -ENOTSUP;
18752
18753
18754
18755
18756
18757 action = priv->drop_queue.hrxq->action;
18758 if (action == NULL) {
18759 DRV_LOG(ERR, "Priority discovery requires a drop action");
18760 rte_errno = ENOTSUP;
18761 return -rte_errno;
18762 }
18763 memset(&flow, 0, sizeof(flow));
18764 flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18765 if (flow.handle == NULL) {
18766 DRV_LOG(ERR, "Cannot create flow handle");
18767 rte_errno = ENOMEM;
18768 return -rte_errno;
18769 }
18770 flow.ingress = true;
18771 flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18772 flow.dv.actions[0] = action;
18773 flow.dv.actions_n = 1;
18774 memset(ð, 0, sizeof(eth));
18775 flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18776 &item, false, 0);
18777 matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18778 for (i = 0; i < vprio_n; i++) {
18779
18780 matcher.priority = vprio[i] - 1;
18781 memset(&tbl_key, 0, sizeof(tbl_key));
18782 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18783 NULL,
18784 0,
18785 &error);
18786 if (err != 0) {
18787
18788 DRV_LOG(ERR, "Cannot register matcher");
18789 ret = -rte_errno;
18790 break;
18791 }
18792
18793 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18794 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18795 err = mlx5_flow_os_create_flow
18796 (flow.handle->dvh.matcher->matcher_object,
18797 (void *)&flow.dv.value, flow.dv.actions_n,
18798 flow.dv.actions, &flow.handle->drv_flow);
18799 if (err == 0) {
18800 claim_zero(mlx5_flow_os_destroy_flow
18801 (flow.handle->drv_flow));
18802 flow.handle->drv_flow = NULL;
18803 }
18804 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18805 if (err != 0)
18806 break;
18807 ret = vprio[i];
18808 }
18809 mlx5_ipool_free(pool, flow.handle_idx);
18810
18811 if (ret < 0)
18812 rte_errno = -ret;
18813 return ret;
18814}
18815
18816const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18817 .validate = flow_dv_validate,
18818 .prepare = flow_dv_prepare,
18819 .translate = flow_dv_translate,
18820 .apply = flow_dv_apply,
18821 .remove = flow_dv_remove,
18822 .destroy = flow_dv_destroy,
18823 .query = flow_dv_query,
18824 .create_mtr_tbls = flow_dv_create_mtr_tbls,
18825 .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18826 .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18827 .create_meter = flow_dv_mtr_alloc,
18828 .free_meter = flow_dv_aso_mtr_release_to_pool,
18829 .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18830 .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18831 .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18832 .create_policy_rules = flow_dv_create_policy_rules,
18833 .destroy_policy_rules = flow_dv_destroy_policy_rules,
18834 .create_def_policy = flow_dv_create_def_policy,
18835 .destroy_def_policy = flow_dv_destroy_def_policy,
18836 .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18837 .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18838 .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18839 .counter_alloc = flow_dv_counter_allocate,
18840 .counter_free = flow_dv_counter_free,
18841 .counter_query = flow_dv_counter_query,
18842 .get_aged_flows = flow_dv_get_aged_flows,
18843 .action_validate = flow_dv_action_validate,
18844 .action_create = flow_dv_action_create,
18845 .action_destroy = flow_dv_action_destroy,
18846 .action_update = flow_dv_action_update,
18847 .action_query = flow_dv_action_query,
18848 .sync_domain = flow_dv_sync_domain,
18849 .discover_priorities = flow_dv_discover_priorities,
18850 .item_create = flow_dv_item_create,
18851 .item_release = flow_dv_item_release,
18852};
18853
18854#endif
18855