1
2
3
4
5
6#include <sys/queue.h>
7#include <rte_bus_pci.h>
8#include <rte_malloc.h>
9#include <rte_flow.h>
10#include <rte_flow_driver.h>
11
12#include "txgbe_ethdev.h"
13
14#define TXGBE_MIN_N_TUPLE_PRIO 1
15#define TXGBE_MAX_N_TUPLE_PRIO 7
16#define TXGBE_MAX_FLX_SOURCE_OFF 62
17
18
19struct txgbe_ntuple_filter_ele {
20 TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
21 struct rte_eth_ntuple_filter filter_info;
22};
23
24struct txgbe_ethertype_filter_ele {
25 TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
26 struct rte_eth_ethertype_filter filter_info;
27};
28
29struct txgbe_eth_syn_filter_ele {
30 TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
31 struct rte_eth_syn_filter filter_info;
32};
33
34struct txgbe_fdir_rule_ele {
35 TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
36 struct txgbe_fdir_rule filter_info;
37};
38
39struct txgbe_eth_l2_tunnel_conf_ele {
40 TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
41 struct txgbe_l2_tunnel_conf filter_info;
42};
43
44struct txgbe_rss_conf_ele {
45 TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
46 struct txgbe_rte_flow_rss_conf filter_info;
47};
48
49struct txgbe_flow_mem {
50 TAILQ_ENTRY(txgbe_flow_mem) entries;
51 struct rte_flow *flow;
52};
53
54TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
55TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
56TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
57TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
58TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
59TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
60TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
61
62static struct txgbe_ntuple_filter_list filter_ntuple_list;
63static struct txgbe_ethertype_filter_list filter_ethertype_list;
64static struct txgbe_syn_filter_list filter_syn_list;
65static struct txgbe_fdir_rule_filter_list filter_fdir_list;
66static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
67static struct txgbe_rss_filter_list filter_rss_list;
68static struct txgbe_flow_mem_list txgbe_flow_list;
69
70
71
72
73
74
75static inline
76const struct rte_flow_item *next_no_void_pattern(
77 const struct rte_flow_item pattern[],
78 const struct rte_flow_item *cur)
79{
80 const struct rte_flow_item *next =
81 cur ? cur + 1 : &pattern[0];
82 while (1) {
83 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
84 return next;
85 next++;
86 }
87}
88
89static inline
90const struct rte_flow_action *next_no_void_action(
91 const struct rte_flow_action actions[],
92 const struct rte_flow_action *cur)
93{
94 const struct rte_flow_action *next =
95 cur ? cur + 1 : &actions[0];
96 while (1) {
97 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
98 return next;
99 next++;
100 }
101}
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137static int
138cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
139 const struct rte_flow_item pattern[],
140 const struct rte_flow_action actions[],
141 struct rte_eth_ntuple_filter *filter,
142 struct rte_flow_error *error)
143{
144 const struct rte_flow_item *item;
145 const struct rte_flow_action *act;
146 const struct rte_flow_item_ipv4 *ipv4_spec;
147 const struct rte_flow_item_ipv4 *ipv4_mask;
148 const struct rte_flow_item_tcp *tcp_spec;
149 const struct rte_flow_item_tcp *tcp_mask;
150 const struct rte_flow_item_udp *udp_spec;
151 const struct rte_flow_item_udp *udp_mask;
152 const struct rte_flow_item_sctp *sctp_spec;
153 const struct rte_flow_item_sctp *sctp_mask;
154 const struct rte_flow_item_eth *eth_spec;
155 const struct rte_flow_item_eth *eth_mask;
156 const struct rte_flow_item_vlan *vlan_spec;
157 const struct rte_flow_item_vlan *vlan_mask;
158 struct rte_flow_item_eth eth_null;
159 struct rte_flow_item_vlan vlan_null;
160
161 if (!pattern) {
162 rte_flow_error_set(error,
163 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
164 NULL, "NULL pattern.");
165 return -rte_errno;
166 }
167
168 if (!actions) {
169 rte_flow_error_set(error, EINVAL,
170 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
171 NULL, "NULL action.");
172 return -rte_errno;
173 }
174 if (!attr) {
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ATTR,
177 NULL, "NULL attribute.");
178 return -rte_errno;
179 }
180
181 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
182 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
183
184#ifdef RTE_LIB_SECURITY
185
186
187
188 act = next_no_void_action(actions, NULL);
189 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
190 const void *conf = act->conf;
191
192 act = next_no_void_action(actions, act);
193 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
194 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
195 rte_flow_error_set(error, EINVAL,
196 RTE_FLOW_ERROR_TYPE_ACTION,
197 act, "Not supported action.");
198 return -rte_errno;
199 }
200
201
202 item = next_no_void_pattern(pattern, NULL);
203 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
204 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
205 if (item->last ||
206 item->type == RTE_FLOW_ITEM_TYPE_END) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "IP pattern missing.");
210 return -rte_errno;
211 }
212 item = next_no_void_pattern(pattern, item);
213 }
214
215 filter->proto = IPPROTO_ESP;
216 return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
217 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
218 }
219#endif
220
221
222 item = next_no_void_pattern(pattern, NULL);
223
224 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
225 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ITEM,
228 item, "Not supported by ntuple filter");
229 return -rte_errno;
230 }
231
232 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
233 eth_spec = item->spec;
234 eth_mask = item->mask;
235
236 if (item->last) {
237 rte_flow_error_set(error,
238 EINVAL,
239 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
240 item, "Not supported last point for range");
241 return -rte_errno;
242 }
243
244 if ((item->spec && memcmp(eth_spec, ð_null,
245 sizeof(struct rte_flow_item_eth))) ||
246 (item->mask && memcmp(eth_mask, ð_null,
247 sizeof(struct rte_flow_item_eth)))) {
248 rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ITEM,
250 item, "Not supported by ntuple filter");
251 return -rte_errno;
252 }
253
254 item = next_no_void_pattern(pattern, item);
255 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
256 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
257 rte_flow_error_set(error,
258 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
259 item, "Not supported by ntuple filter");
260 return -rte_errno;
261 }
262 }
263
264 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
265 vlan_spec = item->spec;
266 vlan_mask = item->mask;
267
268 if (item->last) {
269 rte_flow_error_set(error,
270 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
271 item, "Not supported last point for range");
272 return -rte_errno;
273 }
274
275 if ((item->spec && memcmp(vlan_spec, &vlan_null,
276 sizeof(struct rte_flow_item_vlan))) ||
277 (item->mask && memcmp(vlan_mask, &vlan_null,
278 sizeof(struct rte_flow_item_vlan)))) {
279 rte_flow_error_set(error, EINVAL,
280 RTE_FLOW_ERROR_TYPE_ITEM,
281 item, "Not supported by ntuple filter");
282 return -rte_errno;
283 }
284
285 item = next_no_void_pattern(pattern, item);
286 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
287 rte_flow_error_set(error,
288 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
289 item, "Not supported by ntuple filter");
290 return -rte_errno;
291 }
292 }
293
294 if (item->mask) {
295
296 if (!item->spec || !item->mask) {
297 rte_flow_error_set(error, EINVAL,
298 RTE_FLOW_ERROR_TYPE_ITEM,
299 item, "Invalid ntuple mask");
300 return -rte_errno;
301 }
302
303 if (item->last) {
304 rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
306 item, "Not supported last point for range");
307 return -rte_errno;
308 }
309
310 ipv4_mask = item->mask;
311
312
313
314
315 if (ipv4_mask->hdr.version_ihl ||
316 ipv4_mask->hdr.type_of_service ||
317 ipv4_mask->hdr.total_length ||
318 ipv4_mask->hdr.packet_id ||
319 ipv4_mask->hdr.fragment_offset ||
320 ipv4_mask->hdr.time_to_live ||
321 ipv4_mask->hdr.hdr_checksum) {
322 rte_flow_error_set(error,
323 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
324 item, "Not supported by ntuple filter");
325 return -rte_errno;
326 }
327 if ((ipv4_mask->hdr.src_addr != 0 &&
328 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
329 (ipv4_mask->hdr.dst_addr != 0 &&
330 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
331 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
332 ipv4_mask->hdr.next_proto_id != 0)) {
333 rte_flow_error_set(error,
334 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
335 item, "Not supported by ntuple filter");
336 return -rte_errno;
337 }
338
339 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
340 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
341 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
342
343 ipv4_spec = item->spec;
344 filter->dst_ip = ipv4_spec->hdr.dst_addr;
345 filter->src_ip = ipv4_spec->hdr.src_addr;
346 filter->proto = ipv4_spec->hdr.next_proto_id;
347 }
348
349
350 item = next_no_void_pattern(pattern, item);
351 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354 item->type != RTE_FLOW_ITEM_TYPE_END) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
359 return -rte_errno;
360 }
361
362 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
363 (!item->spec && !item->mask)) {
364 goto action;
365 }
366
367
368 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
369 (!item->spec || !item->mask)) {
370 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
371 rte_flow_error_set(error, EINVAL,
372 RTE_FLOW_ERROR_TYPE_ITEM,
373 item, "Invalid ntuple mask");
374 return -rte_errno;
375 }
376
377
378 if (item->last) {
379 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
380 rte_flow_error_set(error, EINVAL,
381 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
382 item, "Not supported last point for range");
383 return -rte_errno;
384 }
385
386 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
387 tcp_mask = item->mask;
388
389
390
391
392
393 if (tcp_mask->hdr.sent_seq ||
394 tcp_mask->hdr.recv_ack ||
395 tcp_mask->hdr.data_off ||
396 tcp_mask->hdr.rx_win ||
397 tcp_mask->hdr.cksum ||
398 tcp_mask->hdr.tcp_urp) {
399 memset(filter, 0,
400 sizeof(struct rte_eth_ntuple_filter));
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ITEM,
403 item, "Not supported by ntuple filter");
404 return -rte_errno;
405 }
406 if ((tcp_mask->hdr.src_port != 0 &&
407 tcp_mask->hdr.src_port != UINT16_MAX) ||
408 (tcp_mask->hdr.dst_port != 0 &&
409 tcp_mask->hdr.dst_port != UINT16_MAX)) {
410 rte_flow_error_set(error,
411 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Not supported by ntuple filter");
413 return -rte_errno;
414 }
415
416 filter->dst_port_mask = tcp_mask->hdr.dst_port;
417 filter->src_port_mask = tcp_mask->hdr.src_port;
418 if (tcp_mask->hdr.tcp_flags == 0xFF) {
419 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
420 } else if (!tcp_mask->hdr.tcp_flags) {
421 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
422 } else {
423 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_ITEM,
426 item, "Not supported by ntuple filter");
427 return -rte_errno;
428 }
429
430 tcp_spec = item->spec;
431 filter->dst_port = tcp_spec->hdr.dst_port;
432 filter->src_port = tcp_spec->hdr.src_port;
433 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
434 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
435 udp_mask = item->mask;
436
437
438
439
440
441 if (udp_mask->hdr.dgram_len ||
442 udp_mask->hdr.dgram_cksum) {
443 memset(filter, 0,
444 sizeof(struct rte_eth_ntuple_filter));
445 rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ITEM,
447 item, "Not supported by ntuple filter");
448 return -rte_errno;
449 }
450 if ((udp_mask->hdr.src_port != 0 &&
451 udp_mask->hdr.src_port != UINT16_MAX) ||
452 (udp_mask->hdr.dst_port != 0 &&
453 udp_mask->hdr.dst_port != UINT16_MAX)) {
454 rte_flow_error_set(error,
455 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
456 item, "Not supported by ntuple filter");
457 return -rte_errno;
458 }
459
460 filter->dst_port_mask = udp_mask->hdr.dst_port;
461 filter->src_port_mask = udp_mask->hdr.src_port;
462
463 udp_spec = item->spec;
464 filter->dst_port = udp_spec->hdr.dst_port;
465 filter->src_port = udp_spec->hdr.src_port;
466 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
467 sctp_mask = item->mask;
468
469
470
471
472
473 if (sctp_mask->hdr.tag ||
474 sctp_mask->hdr.cksum) {
475 memset(filter, 0,
476 sizeof(struct rte_eth_ntuple_filter));
477 rte_flow_error_set(error, EINVAL,
478 RTE_FLOW_ERROR_TYPE_ITEM,
479 item, "Not supported by ntuple filter");
480 return -rte_errno;
481 }
482
483 filter->dst_port_mask = sctp_mask->hdr.dst_port;
484 filter->src_port_mask = sctp_mask->hdr.src_port;
485
486 sctp_spec = item->spec;
487 filter->dst_port = sctp_spec->hdr.dst_port;
488 filter->src_port = sctp_spec->hdr.src_port;
489 } else {
490 goto action;
491 }
492
493
494 item = next_no_void_pattern(pattern, item);
495 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
496 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM,
499 item, "Not supported by ntuple filter");
500 return -rte_errno;
501 }
502
503action:
504
505
506
507
508
509 act = next_no_void_action(actions, NULL);
510 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
511 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512 rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ACTION,
514 act, "Not supported action.");
515 return -rte_errno;
516 }
517 filter->queue =
518 ((const struct rte_flow_action_queue *)act->conf)->index;
519
520
521 act = next_no_void_action(actions, act);
522 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
523 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
524 rte_flow_error_set(error, EINVAL,
525 RTE_FLOW_ERROR_TYPE_ACTION,
526 act, "Not supported action.");
527 return -rte_errno;
528 }
529
530
531
532 if (!attr->ingress) {
533 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
536 attr, "Only support ingress.");
537 return -rte_errno;
538 }
539
540
541 if (attr->egress) {
542 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
543 rte_flow_error_set(error, EINVAL,
544 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
545 attr, "Not support egress.");
546 return -rte_errno;
547 }
548
549
550 if (attr->transfer) {
551 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
554 attr, "No support for transfer.");
555 return -rte_errno;
556 }
557
558 if (attr->priority > 0xFFFF) {
559 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
560 rte_flow_error_set(error, EINVAL,
561 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
562 attr, "Error priority.");
563 return -rte_errno;
564 }
565 filter->priority = (uint16_t)attr->priority;
566 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
567 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
568 filter->priority = 1;
569
570 return 0;
571}
572
573
574static int
575txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
576 const struct rte_flow_attr *attr,
577 const struct rte_flow_item pattern[],
578 const struct rte_flow_action actions[],
579 struct rte_eth_ntuple_filter *filter,
580 struct rte_flow_error *error)
581{
582 int ret;
583
584 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
585
586 if (ret)
587 return ret;
588
589#ifdef RTE_LIB_SECURITY
590
591 if (filter->proto == IPPROTO_ESP)
592 return 0;
593#endif
594
595
596 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
597 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
598 rte_flow_error_set(error, EINVAL,
599 RTE_FLOW_ERROR_TYPE_ITEM,
600 NULL, "Not supported by ntuple filter");
601 return -rte_errno;
602 }
603
604
605 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
606 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
607 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
608 rte_flow_error_set(error, EINVAL,
609 RTE_FLOW_ERROR_TYPE_ITEM,
610 NULL, "Priority not supported by ntuple filter");
611 return -rte_errno;
612 }
613
614 if (filter->queue >= dev->data->nb_rx_queues) {
615 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
616 rte_flow_error_set(error, EINVAL,
617 RTE_FLOW_ERROR_TYPE_ITEM,
618 NULL, "Not supported by ntuple filter");
619 return -rte_errno;
620 }
621
622
623 filter->flags = RTE_5TUPLE_FLAGS;
624 return 0;
625}
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643static int
644cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
645 const struct rte_flow_item *pattern,
646 const struct rte_flow_action *actions,
647 struct rte_eth_ethertype_filter *filter,
648 struct rte_flow_error *error)
649{
650 const struct rte_flow_item *item;
651 const struct rte_flow_action *act;
652 const struct rte_flow_item_eth *eth_spec;
653 const struct rte_flow_item_eth *eth_mask;
654 const struct rte_flow_action_queue *act_q;
655
656 if (!pattern) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
659 NULL, "NULL pattern.");
660 return -rte_errno;
661 }
662
663 if (!actions) {
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
666 NULL, "NULL action.");
667 return -rte_errno;
668 }
669
670 if (!attr) {
671 rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ATTR,
673 NULL, "NULL attribute.");
674 return -rte_errno;
675 }
676
677 item = next_no_void_pattern(pattern, NULL);
678
679 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
682 item, "Not supported by ethertype filter");
683 return -rte_errno;
684 }
685
686
687 if (item->last) {
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
690 item, "Not supported last point for range");
691 return -rte_errno;
692 }
693
694
695 if (!item->spec || !item->mask) {
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
698 item, "Not supported by ethertype filter");
699 return -rte_errno;
700 }
701
702 eth_spec = item->spec;
703 eth_mask = item->mask;
704
705
706
707
708
709 if (!rte_is_zero_ether_addr(ð_mask->src) ||
710 (!rte_is_zero_ether_addr(ð_mask->dst) &&
711 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
712 rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ITEM,
714 item, "Invalid ether address mask");
715 return -rte_errno;
716 }
717
718 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ITEM,
721 item, "Invalid ethertype mask");
722 return -rte_errno;
723 }
724
725
726
727
728 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
729 filter->mac_addr = eth_spec->dst;
730 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
731 } else {
732 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
733 }
734 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
735
736
737 item = next_no_void_pattern(pattern, item);
738 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
739 rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ITEM,
741 item, "Not supported by ethertype filter.");
742 return -rte_errno;
743 }
744
745
746
747 act = next_no_void_action(actions, NULL);
748 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
749 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ACTION,
752 act, "Not supported action.");
753 return -rte_errno;
754 }
755
756 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
757 act_q = (const struct rte_flow_action_queue *)act->conf;
758 filter->queue = act_q->index;
759 } else {
760 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
761 }
762
763
764 act = next_no_void_action(actions, act);
765 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ACTION,
768 act, "Not supported action.");
769 return -rte_errno;
770 }
771
772
773
774 if (!attr->ingress) {
775 rte_flow_error_set(error, EINVAL,
776 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
777 attr, "Only support ingress.");
778 return -rte_errno;
779 }
780
781
782 if (attr->egress) {
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
785 attr, "Not support egress.");
786 return -rte_errno;
787 }
788
789
790 if (attr->transfer) {
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
793 attr, "No support for transfer.");
794 return -rte_errno;
795 }
796
797
798 if (attr->priority) {
799 rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
801 attr, "Not support priority.");
802 return -rte_errno;
803 }
804
805
806 if (attr->group) {
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
809 attr, "Not support group.");
810 return -rte_errno;
811 }
812
813 return 0;
814}
815
816static int
817txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
818 const struct rte_flow_attr *attr,
819 const struct rte_flow_item pattern[],
820 const struct rte_flow_action actions[],
821 struct rte_eth_ethertype_filter *filter,
822 struct rte_flow_error *error)
823{
824 int ret;
825
826 ret = cons_parse_ethertype_filter(attr, pattern,
827 actions, filter, error);
828
829 if (ret)
830 return ret;
831
832 if (filter->queue >= dev->data->nb_rx_queues) {
833 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
834 rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_ITEM,
836 NULL, "queue index much too big");
837 return -rte_errno;
838 }
839
840 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
841 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
842 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
843 rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ITEM,
845 NULL, "IPv4/IPv6 not supported by ethertype filter");
846 return -rte_errno;
847 }
848
849 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
850 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
851 rte_flow_error_set(error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ITEM,
853 NULL, "mac compare is unsupported");
854 return -rte_errno;
855 }
856
857 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
858 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
859 rte_flow_error_set(error, EINVAL,
860 RTE_FLOW_ERROR_TYPE_ITEM,
861 NULL, "drop option is unsupported");
862 return -rte_errno;
863 }
864
865 return 0;
866}
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888static int
889cons_parse_syn_filter(const struct rte_flow_attr *attr,
890 const struct rte_flow_item pattern[],
891 const struct rte_flow_action actions[],
892 struct rte_eth_syn_filter *filter,
893 struct rte_flow_error *error)
894{
895 const struct rte_flow_item *item;
896 const struct rte_flow_action *act;
897 const struct rte_flow_item_tcp *tcp_spec;
898 const struct rte_flow_item_tcp *tcp_mask;
899 const struct rte_flow_action_queue *act_q;
900
901 if (!pattern) {
902 rte_flow_error_set(error, EINVAL,
903 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
904 NULL, "NULL pattern.");
905 return -rte_errno;
906 }
907
908 if (!actions) {
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
911 NULL, "NULL action.");
912 return -rte_errno;
913 }
914
915 if (!attr) {
916 rte_flow_error_set(error, EINVAL,
917 RTE_FLOW_ERROR_TYPE_ATTR,
918 NULL, "NULL attribute.");
919 return -rte_errno;
920 }
921
922
923
924 item = next_no_void_pattern(pattern, NULL);
925 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
926 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
927 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
928 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
929 rte_flow_error_set(error, EINVAL,
930 RTE_FLOW_ERROR_TYPE_ITEM,
931 item, "Not supported by syn filter");
932 return -rte_errno;
933 }
934
935 if (item->last) {
936 rte_flow_error_set(error, EINVAL,
937 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
938 item, "Not supported last point for range");
939 return -rte_errno;
940 }
941
942
943 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
944
945 if (item->spec || item->mask) {
946 rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ITEM,
948 item, "Invalid SYN address mask");
949 return -rte_errno;
950 }
951
952
953 item = next_no_void_pattern(pattern, item);
954 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
955 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ITEM,
958 item, "Not supported by syn filter");
959 return -rte_errno;
960 }
961 }
962
963
964 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
965 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
966
967 if (item->spec || item->mask) {
968 rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ITEM,
970 item, "Invalid SYN mask");
971 return -rte_errno;
972 }
973
974
975 item = next_no_void_pattern(pattern, item);
976 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
979 item, "Not supported by syn filter");
980 return -rte_errno;
981 }
982 }
983
984
985 if (!item->spec || !item->mask) {
986 rte_flow_error_set(error, EINVAL,
987 RTE_FLOW_ERROR_TYPE_ITEM,
988 item, "Invalid SYN mask");
989 return -rte_errno;
990 }
991
992 if (item->last) {
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
995 item, "Not supported last point for range");
996 return -rte_errno;
997 }
998
999 tcp_spec = item->spec;
1000 tcp_mask = item->mask;
1001 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1002 tcp_mask->hdr.src_port ||
1003 tcp_mask->hdr.dst_port ||
1004 tcp_mask->hdr.sent_seq ||
1005 tcp_mask->hdr.recv_ack ||
1006 tcp_mask->hdr.data_off ||
1007 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1008 tcp_mask->hdr.rx_win ||
1009 tcp_mask->hdr.cksum ||
1010 tcp_mask->hdr.tcp_urp) {
1011 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1012 rte_flow_error_set(error, EINVAL,
1013 RTE_FLOW_ERROR_TYPE_ITEM,
1014 item, "Not supported by syn filter");
1015 return -rte_errno;
1016 }
1017
1018
1019 item = next_no_void_pattern(pattern, item);
1020 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1021 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1022 rte_flow_error_set(error, EINVAL,
1023 RTE_FLOW_ERROR_TYPE_ITEM,
1024 item, "Not supported by syn filter");
1025 return -rte_errno;
1026 }
1027
1028
1029 act = next_no_void_action(actions, NULL);
1030 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1031 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ACTION,
1034 act, "Not supported action.");
1035 return -rte_errno;
1036 }
1037
1038 act_q = (const struct rte_flow_action_queue *)act->conf;
1039 filter->queue = act_q->index;
1040 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
1041 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ACTION,
1044 act, "Not supported action.");
1045 return -rte_errno;
1046 }
1047
1048
1049 act = next_no_void_action(actions, act);
1050 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1051 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1052 rte_flow_error_set(error, EINVAL,
1053 RTE_FLOW_ERROR_TYPE_ACTION,
1054 act, "Not supported action.");
1055 return -rte_errno;
1056 }
1057
1058
1059
1060 if (!attr->ingress) {
1061 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1064 attr, "Only support ingress.");
1065 return -rte_errno;
1066 }
1067
1068
1069 if (attr->egress) {
1070 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1073 attr, "Not support egress.");
1074 return -rte_errno;
1075 }
1076
1077
1078 if (attr->transfer) {
1079 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1080 rte_flow_error_set(error, EINVAL,
1081 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1082 attr, "No support for transfer.");
1083 return -rte_errno;
1084 }
1085
1086
1087 if (!attr->priority) {
1088 filter->hig_pri = 0;
1089 } else if (attr->priority == (uint32_t)~0U) {
1090 filter->hig_pri = 1;
1091 } else {
1092 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1093 rte_flow_error_set(error, EINVAL,
1094 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1095 attr, "Not support priority.");
1096 return -rte_errno;
1097 }
1098
1099 return 0;
1100}
1101
1102static int
1103txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1104 const struct rte_flow_attr *attr,
1105 const struct rte_flow_item pattern[],
1106 const struct rte_flow_action actions[],
1107 struct rte_eth_syn_filter *filter,
1108 struct rte_flow_error *error)
1109{
1110 int ret;
1111
1112 ret = cons_parse_syn_filter(attr, pattern,
1113 actions, filter, error);
1114
1115 if (filter->queue >= dev->data->nb_rx_queues)
1116 return -rte_errno;
1117
1118 if (ret)
1119 return ret;
1120
1121 return 0;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static int
1143cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1144 const struct rte_flow_attr *attr,
1145 const struct rte_flow_item pattern[],
1146 const struct rte_flow_action actions[],
1147 struct txgbe_l2_tunnel_conf *filter,
1148 struct rte_flow_error *error)
1149{
1150 const struct rte_flow_item *item;
1151 const struct rte_flow_item_e_tag *e_tag_spec;
1152 const struct rte_flow_item_e_tag *e_tag_mask;
1153 const struct rte_flow_action *act;
1154 const struct rte_flow_action_vf *act_vf;
1155 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1156
1157 if (!pattern) {
1158 rte_flow_error_set(error, EINVAL,
1159 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1160 NULL, "NULL pattern.");
1161 return -rte_errno;
1162 }
1163
1164 if (!actions) {
1165 rte_flow_error_set(error, EINVAL,
1166 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1167 NULL, "NULL action.");
1168 return -rte_errno;
1169 }
1170
1171 if (!attr) {
1172 rte_flow_error_set(error, EINVAL,
1173 RTE_FLOW_ERROR_TYPE_ATTR,
1174 NULL, "NULL attribute.");
1175 return -rte_errno;
1176 }
1177
1178
1179 item = next_no_void_pattern(pattern, NULL);
1180 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1181 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1182 rte_flow_error_set(error, EINVAL,
1183 RTE_FLOW_ERROR_TYPE_ITEM,
1184 item, "Not supported by L2 tunnel filter");
1185 return -rte_errno;
1186 }
1187
1188 if (!item->spec || !item->mask) {
1189 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1190 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1191 item, "Not supported by L2 tunnel filter");
1192 return -rte_errno;
1193 }
1194
1195
1196 if (item->last) {
1197 rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1199 item, "Not supported last point for range");
1200 return -rte_errno;
1201 }
1202
1203 e_tag_spec = item->spec;
1204 e_tag_mask = item->mask;
1205
1206
1207 if (e_tag_mask->epcp_edei_in_ecid_b ||
1208 e_tag_mask->in_ecid_e ||
1209 e_tag_mask->ecid_e ||
1210 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1211 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1212 rte_flow_error_set(error, EINVAL,
1213 RTE_FLOW_ERROR_TYPE_ITEM,
1214 item, "Not supported by L2 tunnel filter");
1215 return -rte_errno;
1216 }
1217
1218 filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
1219
1220
1221
1222
1223 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1224
1225
1226 item = next_no_void_pattern(pattern, item);
1227 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1228 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ITEM,
1231 item, "Not supported by L2 tunnel filter");
1232 return -rte_errno;
1233 }
1234
1235
1236
1237 if (!attr->ingress) {
1238 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1241 attr, "Only support ingress.");
1242 return -rte_errno;
1243 }
1244
1245
1246 if (attr->egress) {
1247 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1250 attr, "Not support egress.");
1251 return -rte_errno;
1252 }
1253
1254
1255 if (attr->transfer) {
1256 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1257 rte_flow_error_set(error, EINVAL,
1258 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1259 attr, "No support for transfer.");
1260 return -rte_errno;
1261 }
1262
1263
1264 if (attr->priority) {
1265 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1266 rte_flow_error_set(error, EINVAL,
1267 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1268 attr, "Not support priority.");
1269 return -rte_errno;
1270 }
1271
1272
1273 act = next_no_void_action(actions, NULL);
1274 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1275 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1276 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1277 rte_flow_error_set(error, EINVAL,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1279 act, "Not supported action.");
1280 return -rte_errno;
1281 }
1282
1283 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1284 act_vf = (const struct rte_flow_action_vf *)act->conf;
1285 filter->pool = act_vf->id;
1286 } else {
1287 filter->pool = pci_dev->max_vfs;
1288 }
1289
1290
1291 act = next_no_void_action(actions, act);
1292 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1293 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1294 rte_flow_error_set(error, EINVAL,
1295 RTE_FLOW_ERROR_TYPE_ACTION,
1296 act, "Not supported action.");
1297 return -rte_errno;
1298 }
1299
1300 return 0;
1301}
1302
1303static int
1304txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1305 const struct rte_flow_attr *attr,
1306 const struct rte_flow_item pattern[],
1307 const struct rte_flow_action actions[],
1308 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1309 struct rte_flow_error *error)
1310{
1311 int ret = 0;
1312 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1313 uint16_t vf_num;
1314
1315 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1316 actions, l2_tn_filter, error);
1317
1318 vf_num = pci_dev->max_vfs;
1319
1320 if (l2_tn_filter->pool > vf_num)
1321 return -rte_errno;
1322
1323 return ret;
1324}
1325
1326
1327static int
1328txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1329 const struct rte_flow_action actions[],
1330 struct txgbe_fdir_rule *rule,
1331 struct rte_flow_error *error)
1332{
1333 const struct rte_flow_action *act;
1334 const struct rte_flow_action_queue *act_q;
1335 const struct rte_flow_action_mark *mark;
1336
1337
1338
1339 if (!attr->ingress) {
1340 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1341 rte_flow_error_set(error, EINVAL,
1342 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1343 attr, "Only support ingress.");
1344 return -rte_errno;
1345 }
1346
1347
1348 if (attr->egress) {
1349 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1352 attr, "Not support egress.");
1353 return -rte_errno;
1354 }
1355
1356
1357 if (attr->transfer) {
1358 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1359 rte_flow_error_set(error, EINVAL,
1360 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1361 attr, "No support for transfer.");
1362 return -rte_errno;
1363 }
1364
1365
1366 if (attr->priority) {
1367 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1368 rte_flow_error_set(error, EINVAL,
1369 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1370 attr, "Not support priority.");
1371 return -rte_errno;
1372 }
1373
1374
1375 act = next_no_void_action(actions, NULL);
1376 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1377 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1378 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1379 rte_flow_error_set(error, EINVAL,
1380 RTE_FLOW_ERROR_TYPE_ACTION,
1381 act, "Not supported action.");
1382 return -rte_errno;
1383 }
1384
1385 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1386 act_q = (const struct rte_flow_action_queue *)act->conf;
1387 rule->queue = act_q->index;
1388 } else {
1389
1390 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1391 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1392 rte_flow_error_set(error, EINVAL,
1393 RTE_FLOW_ERROR_TYPE_ACTION,
1394 act, "Not supported action.");
1395 return -rte_errno;
1396 }
1397 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1398 }
1399
1400
1401 act = next_no_void_action(actions, act);
1402 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1403 act->type != RTE_FLOW_ACTION_TYPE_END) {
1404 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ACTION,
1407 act, "Not supported action.");
1408 return -rte_errno;
1409 }
1410
1411 rule->soft_id = 0;
1412
1413 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1414 mark = (const struct rte_flow_action_mark *)act->conf;
1415 rule->soft_id = mark->id;
1416 act = next_no_void_action(actions, act);
1417 }
1418
1419
1420 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1421 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1422 rte_flow_error_set(error, EINVAL,
1423 RTE_FLOW_ERROR_TYPE_ACTION,
1424 act, "Not supported action.");
1425 return -rte_errno;
1426 }
1427
1428 return 0;
1429}
1430
1431
1432static inline
1433const struct rte_flow_item *next_no_fuzzy_pattern(
1434 const struct rte_flow_item pattern[],
1435 const struct rte_flow_item *cur)
1436{
1437 const struct rte_flow_item *next =
1438 next_no_void_pattern(pattern, cur);
1439 while (1) {
1440 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1441 return next;
1442 next = next_no_void_pattern(pattern, next);
1443 }
1444}
1445
1446static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1447{
1448 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1449 const struct rte_flow_item *item;
1450 uint32_t sh, lh, mh;
1451 int i = 0;
1452
1453 while (1) {
1454 item = pattern + i;
1455 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1456 break;
1457
1458 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1459 spec = item->spec;
1460 last = item->last;
1461 mask = item->mask;
1462
1463 if (!spec || !mask)
1464 return 0;
1465
1466 sh = spec->thresh;
1467
1468 if (!last)
1469 lh = sh;
1470 else
1471 lh = last->thresh;
1472
1473 mh = mask->thresh;
1474 sh = sh & mh;
1475 lh = lh & mh;
1476
1477 if (!sh || sh > lh)
1478 return 0;
1479
1480 return 1;
1481 }
1482
1483 i++;
1484 }
1485
1486 return 0;
1487}
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static int
1536txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1537 const struct rte_flow_attr *attr,
1538 const struct rte_flow_item pattern[],
1539 const struct rte_flow_action actions[],
1540 struct txgbe_fdir_rule *rule,
1541 struct rte_flow_error *error)
1542{
1543 const struct rte_flow_item *item;
1544 const struct rte_flow_item_eth *eth_mask;
1545 const struct rte_flow_item_ipv4 *ipv4_spec;
1546 const struct rte_flow_item_ipv4 *ipv4_mask;
1547 const struct rte_flow_item_ipv6 *ipv6_spec;
1548 const struct rte_flow_item_ipv6 *ipv6_mask;
1549 const struct rte_flow_item_tcp *tcp_spec;
1550 const struct rte_flow_item_tcp *tcp_mask;
1551 const struct rte_flow_item_udp *udp_spec;
1552 const struct rte_flow_item_udp *udp_mask;
1553 const struct rte_flow_item_sctp *sctp_spec;
1554 const struct rte_flow_item_sctp *sctp_mask;
1555 const struct rte_flow_item_raw *raw_mask;
1556 const struct rte_flow_item_raw *raw_spec;
1557 u32 ptype = 0;
1558 uint8_t j;
1559
1560 if (!pattern) {
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1563 NULL, "NULL pattern.");
1564 return -rte_errno;
1565 }
1566
1567 if (!actions) {
1568 rte_flow_error_set(error, EINVAL,
1569 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1570 NULL, "NULL action.");
1571 return -rte_errno;
1572 }
1573
1574 if (!attr) {
1575 rte_flow_error_set(error, EINVAL,
1576 RTE_FLOW_ERROR_TYPE_ATTR,
1577 NULL, "NULL attribute.");
1578 return -rte_errno;
1579 }
1580
1581
1582
1583
1584
1585 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1586 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1587 rule->mask.vlan_tci_mask = 0;
1588 rule->mask.flex_bytes_mask = 0;
1589
1590
1591
1592
1593
1594 item = next_no_fuzzy_pattern(pattern, NULL);
1595 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1596 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1597 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1598 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1599 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1600 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1601 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1602 rte_flow_error_set(error, EINVAL,
1603 RTE_FLOW_ERROR_TYPE_ITEM,
1604 item, "Not supported by fdir filter");
1605 return -rte_errno;
1606 }
1607
1608 if (signature_match(pattern))
1609 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1610 else
1611 rule->mode = RTE_FDIR_MODE_PERFECT;
1612
1613
1614 if (item->last) {
1615 rte_flow_error_set(error, EINVAL,
1616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1617 item, "Not supported last point for range");
1618 return -rte_errno;
1619 }
1620
1621
1622 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1623
1624
1625
1626
1627 if (item->spec && !item->mask) {
1628 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM,
1631 item, "Not supported by fdir filter");
1632 return -rte_errno;
1633 }
1634
1635 if (item->mask) {
1636 rule->b_mask = TRUE;
1637 eth_mask = item->mask;
1638
1639
1640 if (eth_mask->type ||
1641 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1642 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1643 rte_flow_error_set(error, EINVAL,
1644 RTE_FLOW_ERROR_TYPE_ITEM,
1645 item, "Not supported by fdir filter");
1646 return -rte_errno;
1647 }
1648
1649
1650 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1651
1652
1653
1654
1655
1656 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1657 if (eth_mask->src.addr_bytes[j] ||
1658 eth_mask->dst.addr_bytes[j] != 0xFF) {
1659 memset(rule, 0,
1660 sizeof(struct txgbe_fdir_rule));
1661 rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ITEM,
1663 item, "Not supported by fdir filter");
1664 return -rte_errno;
1665 }
1666 }
1667
1668
1669 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1670 }
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 item = next_no_fuzzy_pattern(pattern, item);
1681 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1682 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1683 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1684 rte_flow_error_set(error, EINVAL,
1685 RTE_FLOW_ERROR_TYPE_ITEM,
1686 item, "Not supported by fdir filter");
1687 return -rte_errno;
1688 }
1689 } else {
1690 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1691 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1692 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1693 rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_ITEM,
1695 item, "Not supported by fdir filter");
1696 return -rte_errno;
1697 }
1698 }
1699 }
1700
1701
1702 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1703
1704
1705
1706
1707 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1708 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1709
1710 if (item->last) {
1711 rte_flow_error_set(error, EINVAL,
1712 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1713 item, "Not supported last point for range");
1714 return -rte_errno;
1715 }
1716
1717
1718
1719
1720 if (!item->mask) {
1721 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1722 rte_flow_error_set(error, EINVAL,
1723 RTE_FLOW_ERROR_TYPE_ITEM,
1724 item, "Not supported by fdir filter");
1725 return -rte_errno;
1726 }
1727 rule->b_mask = TRUE;
1728 ipv4_mask = item->mask;
1729 if (ipv4_mask->hdr.version_ihl ||
1730 ipv4_mask->hdr.type_of_service ||
1731 ipv4_mask->hdr.total_length ||
1732 ipv4_mask->hdr.packet_id ||
1733 ipv4_mask->hdr.fragment_offset ||
1734 ipv4_mask->hdr.time_to_live ||
1735 ipv4_mask->hdr.next_proto_id ||
1736 ipv4_mask->hdr.hdr_checksum) {
1737 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1738 rte_flow_error_set(error, EINVAL,
1739 RTE_FLOW_ERROR_TYPE_ITEM,
1740 item, "Not supported by fdir filter");
1741 return -rte_errno;
1742 }
1743 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1744 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1745
1746 if (item->spec) {
1747 rule->b_spec = TRUE;
1748 ipv4_spec = item->spec;
1749 rule->input.dst_ip[0] =
1750 ipv4_spec->hdr.dst_addr;
1751 rule->input.src_ip[0] =
1752 ipv4_spec->hdr.src_addr;
1753 }
1754
1755
1756
1757
1758
1759 item = next_no_fuzzy_pattern(pattern, item);
1760 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1761 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1762 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1763 item->type != RTE_FLOW_ITEM_TYPE_END &&
1764 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1765 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1766 rte_flow_error_set(error, EINVAL,
1767 RTE_FLOW_ERROR_TYPE_ITEM,
1768 item, "Not supported by fdir filter");
1769 return -rte_errno;
1770 }
1771 }
1772
1773
1774 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1775
1776
1777
1778
1779 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1780 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1781
1782
1783
1784
1785
1786
1787 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1788 item->last ||
1789 !item->mask) {
1790 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1791 rte_flow_error_set(error, EINVAL,
1792 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1793 item, "Not supported last point for range");
1794 return -rte_errno;
1795 }
1796
1797 rule->b_mask = TRUE;
1798 ipv6_mask = item->mask;
1799 if (ipv6_mask->hdr.vtc_flow ||
1800 ipv6_mask->hdr.payload_len ||
1801 ipv6_mask->hdr.proto ||
1802 ipv6_mask->hdr.hop_limits) {
1803 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1804 rte_flow_error_set(error, EINVAL,
1805 RTE_FLOW_ERROR_TYPE_ITEM,
1806 item, "Not supported by fdir filter");
1807 return -rte_errno;
1808 }
1809
1810
1811 for (j = 0; j < 16; j++) {
1812 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1813 rule->mask.src_ipv6_mask |= 1 << j;
1814 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1815 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1816 rte_flow_error_set(error, EINVAL,
1817 RTE_FLOW_ERROR_TYPE_ITEM,
1818 item, "Not supported by fdir filter");
1819 return -rte_errno;
1820 }
1821 }
1822
1823
1824 for (j = 0; j < 16; j++) {
1825 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1826 rule->mask.dst_ipv6_mask |= 1 << j;
1827 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1828 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1829 rte_flow_error_set(error, EINVAL,
1830 RTE_FLOW_ERROR_TYPE_ITEM,
1831 item, "Not supported by fdir filter");
1832 return -rte_errno;
1833 }
1834 }
1835
1836 if (item->spec) {
1837 rule->b_spec = TRUE;
1838 ipv6_spec = item->spec;
1839 rte_memcpy(rule->input.src_ip,
1840 ipv6_spec->hdr.src_addr, 16);
1841 rte_memcpy(rule->input.dst_ip,
1842 ipv6_spec->hdr.dst_addr, 16);
1843 }
1844
1845
1846
1847
1848
1849 item = next_no_fuzzy_pattern(pattern, item);
1850 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1851 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1852 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1853 item->type != RTE_FLOW_ITEM_TYPE_END &&
1854 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1855 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1856 rte_flow_error_set(error, EINVAL,
1857 RTE_FLOW_ERROR_TYPE_ITEM,
1858 item, "Not supported by fdir filter");
1859 return -rte_errno;
1860 }
1861 }
1862
1863
1864 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1865
1866
1867
1868
1869 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1870 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1871
1872 if (item->last) {
1873 rte_flow_error_set(error, EINVAL,
1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1875 item, "Not supported last point for range");
1876 return -rte_errno;
1877 }
1878
1879
1880
1881
1882 if (!item->mask) {
1883 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1884 rte_flow_error_set(error, EINVAL,
1885 RTE_FLOW_ERROR_TYPE_ITEM,
1886 item, "Not supported by fdir filter");
1887 return -rte_errno;
1888 }
1889 rule->b_mask = TRUE;
1890 tcp_mask = item->mask;
1891 if (tcp_mask->hdr.sent_seq ||
1892 tcp_mask->hdr.recv_ack ||
1893 tcp_mask->hdr.data_off ||
1894 tcp_mask->hdr.tcp_flags ||
1895 tcp_mask->hdr.rx_win ||
1896 tcp_mask->hdr.cksum ||
1897 tcp_mask->hdr.tcp_urp) {
1898 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1899 rte_flow_error_set(error, EINVAL,
1900 RTE_FLOW_ERROR_TYPE_ITEM,
1901 item, "Not supported by fdir filter");
1902 return -rte_errno;
1903 }
1904 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1905 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1906
1907 if (item->spec) {
1908 rule->b_spec = TRUE;
1909 tcp_spec = item->spec;
1910 rule->input.src_port =
1911 tcp_spec->hdr.src_port;
1912 rule->input.dst_port =
1913 tcp_spec->hdr.dst_port;
1914 }
1915
1916 item = next_no_fuzzy_pattern(pattern, item);
1917 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1918 item->type != RTE_FLOW_ITEM_TYPE_END) {
1919 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1920 rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_ITEM,
1922 item, "Not supported by fdir filter");
1923 return -rte_errno;
1924 }
1925 }
1926
1927
1928 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1929
1930
1931
1932
1933 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1934 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1935
1936 if (item->last) {
1937 rte_flow_error_set(error, EINVAL,
1938 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1939 item, "Not supported last point for range");
1940 return -rte_errno;
1941 }
1942
1943
1944
1945
1946 if (!item->mask) {
1947 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1948 rte_flow_error_set(error, EINVAL,
1949 RTE_FLOW_ERROR_TYPE_ITEM,
1950 item, "Not supported by fdir filter");
1951 return -rte_errno;
1952 }
1953 rule->b_mask = TRUE;
1954 udp_mask = item->mask;
1955 if (udp_mask->hdr.dgram_len ||
1956 udp_mask->hdr.dgram_cksum) {
1957 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1958 rte_flow_error_set(error, EINVAL,
1959 RTE_FLOW_ERROR_TYPE_ITEM,
1960 item, "Not supported by fdir filter");
1961 return -rte_errno;
1962 }
1963 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1964 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1965
1966 if (item->spec) {
1967 rule->b_spec = TRUE;
1968 udp_spec = item->spec;
1969 rule->input.src_port =
1970 udp_spec->hdr.src_port;
1971 rule->input.dst_port =
1972 udp_spec->hdr.dst_port;
1973 }
1974
1975 item = next_no_fuzzy_pattern(pattern, item);
1976 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1977 item->type != RTE_FLOW_ITEM_TYPE_END) {
1978 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1979 rte_flow_error_set(error, EINVAL,
1980 RTE_FLOW_ERROR_TYPE_ITEM,
1981 item, "Not supported by fdir filter");
1982 return -rte_errno;
1983 }
1984 }
1985
1986
1987 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1988
1989
1990
1991
1992 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1993 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1994
1995 if (item->last) {
1996 rte_flow_error_set(error, EINVAL,
1997 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1998 item, "Not supported last point for range");
1999 return -rte_errno;
2000 }
2001
2002
2003
2004
2005
2006 if (!item->mask) {
2007 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2008 rte_flow_error_set(error, EINVAL,
2009 RTE_FLOW_ERROR_TYPE_ITEM,
2010 item, "Not supported by fdir filter");
2011 return -rte_errno;
2012 }
2013 rule->b_mask = TRUE;
2014 sctp_mask = item->mask;
2015 if (sctp_mask->hdr.tag ||
2016 sctp_mask->hdr.cksum) {
2017 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2018 rte_flow_error_set(error, EINVAL,
2019 RTE_FLOW_ERROR_TYPE_ITEM,
2020 item, "Not supported by fdir filter");
2021 return -rte_errno;
2022 }
2023 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2024 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2025
2026 if (item->spec) {
2027 rule->b_spec = TRUE;
2028 sctp_spec = item->spec;
2029 rule->input.src_port =
2030 sctp_spec->hdr.src_port;
2031 rule->input.dst_port =
2032 sctp_spec->hdr.dst_port;
2033 }
2034
2035 sctp_mask = item->mask;
2036 if (sctp_mask &&
2037 (sctp_mask->hdr.src_port ||
2038 sctp_mask->hdr.dst_port ||
2039 sctp_mask->hdr.tag ||
2040 sctp_mask->hdr.cksum)) {
2041 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2042 rte_flow_error_set(error, EINVAL,
2043 RTE_FLOW_ERROR_TYPE_ITEM,
2044 item, "Not supported by fdir filter");
2045 return -rte_errno;
2046 }
2047
2048 item = next_no_fuzzy_pattern(pattern, item);
2049 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2050 item->type != RTE_FLOW_ITEM_TYPE_END) {
2051 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2052 rte_flow_error_set(error, EINVAL,
2053 RTE_FLOW_ERROR_TYPE_ITEM,
2054 item, "Not supported by fdir filter");
2055 return -rte_errno;
2056 }
2057 }
2058
2059
2060 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2061
2062 if (item->last) {
2063 rte_flow_error_set(error, EINVAL,
2064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2065 item, "Not supported last point for range");
2066 return -rte_errno;
2067 }
2068
2069 if (!item->mask || !item->spec) {
2070 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2071 rte_flow_error_set(error, EINVAL,
2072 RTE_FLOW_ERROR_TYPE_ITEM,
2073 item, "Not supported by fdir filter");
2074 return -rte_errno;
2075 }
2076
2077 raw_mask = item->mask;
2078
2079
2080 if (raw_mask->relative != 0x1 ||
2081 raw_mask->search != 0x1 ||
2082 raw_mask->reserved != 0x0 ||
2083 (uint32_t)raw_mask->offset != 0xffffffff ||
2084 raw_mask->limit != 0xffff ||
2085 raw_mask->length != 0xffff) {
2086 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2087 rte_flow_error_set(error, EINVAL,
2088 RTE_FLOW_ERROR_TYPE_ITEM,
2089 item, "Not supported by fdir filter");
2090 return -rte_errno;
2091 }
2092
2093 raw_spec = item->spec;
2094
2095
2096 if (raw_spec->relative != 0 ||
2097 raw_spec->search != 0 ||
2098 raw_spec->reserved != 0 ||
2099 raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2100 raw_spec->offset % 2 ||
2101 raw_spec->limit != 0 ||
2102 raw_spec->length != 2 ||
2103
2104 (raw_spec->pattern[0] == 0xff &&
2105 raw_spec->pattern[1] == 0xff)) {
2106 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Not supported by fdir filter");
2110 return -rte_errno;
2111 }
2112
2113
2114 if (raw_mask->pattern[0] != 0xff ||
2115 raw_mask->pattern[1] != 0xff) {
2116 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2117 rte_flow_error_set(error, EINVAL,
2118 RTE_FLOW_ERROR_TYPE_ITEM,
2119 item, "Not supported by fdir filter");
2120 return -rte_errno;
2121 }
2122
2123 rule->mask.flex_bytes_mask = 0xffff;
2124 rule->input.flex_bytes =
2125 (((uint16_t)raw_spec->pattern[1]) << 8) |
2126 raw_spec->pattern[0];
2127 rule->flex_bytes_offset = raw_spec->offset;
2128 }
2129
2130 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2131
2132 item = next_no_fuzzy_pattern(pattern, item);
2133 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2134 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2135 rte_flow_error_set(error, EINVAL,
2136 RTE_FLOW_ERROR_TYPE_ITEM,
2137 item, "Not supported by fdir filter");
2138 return -rte_errno;
2139 }
2140 }
2141
2142 rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2143
2144 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2145}
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184static int
2185txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2186 const struct rte_flow_item pattern[],
2187 const struct rte_flow_action actions[],
2188 struct txgbe_fdir_rule *rule,
2189 struct rte_flow_error *error)
2190{
2191 const struct rte_flow_item *item;
2192 const struct rte_flow_item_eth *eth_mask;
2193 uint32_t j;
2194
2195 if (!pattern) {
2196 rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2198 NULL, "NULL pattern.");
2199 return -rte_errno;
2200 }
2201
2202 if (!actions) {
2203 rte_flow_error_set(error, EINVAL,
2204 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2205 NULL, "NULL action.");
2206 return -rte_errno;
2207 }
2208
2209 if (!attr) {
2210 rte_flow_error_set(error, EINVAL,
2211 RTE_FLOW_ERROR_TYPE_ATTR,
2212 NULL, "NULL attribute.");
2213 return -rte_errno;
2214 }
2215
2216
2217
2218
2219
2220 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2221 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2222 rule->mask.vlan_tci_mask = 0;
2223
2224
2225
2226
2227
2228 item = next_no_void_pattern(pattern, NULL);
2229 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2230 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2231 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2232 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2233 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2234 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2235 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2236 rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_ITEM,
2238 item, "Not supported by fdir filter");
2239 return -rte_errno;
2240 }
2241
2242 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2243
2244
2245 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2246
2247 if (item->spec || item->mask) {
2248 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2249 rte_flow_error_set(error, EINVAL,
2250 RTE_FLOW_ERROR_TYPE_ITEM,
2251 item, "Not supported by fdir filter");
2252 return -rte_errno;
2253 }
2254
2255 if (item->last) {
2256 rte_flow_error_set(error, EINVAL,
2257 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2258 item, "Not supported last point for range");
2259 return -rte_errno;
2260 }
2261
2262
2263 item = next_no_void_pattern(pattern, item);
2264 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2265 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2266 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2267 rte_flow_error_set(error, EINVAL,
2268 RTE_FLOW_ERROR_TYPE_ITEM,
2269 item, "Not supported by fdir filter");
2270 return -rte_errno;
2271 }
2272 }
2273
2274
2275 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2276 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2277
2278 if (item->spec || item->mask) {
2279 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2280 rte_flow_error_set(error, EINVAL,
2281 RTE_FLOW_ERROR_TYPE_ITEM,
2282 item, "Not supported by fdir filter");
2283 return -rte_errno;
2284 }
2285
2286 if (item->last) {
2287 rte_flow_error_set(error, EINVAL,
2288 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2289 item, "Not supported last point for range");
2290 return -rte_errno;
2291 }
2292
2293
2294 item = next_no_void_pattern(pattern, item);
2295 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2296 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2297 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2298 rte_flow_error_set(error, EINVAL,
2299 RTE_FLOW_ERROR_TYPE_ITEM,
2300 item, "Not supported by fdir filter");
2301 return -rte_errno;
2302 }
2303 }
2304
2305
2306 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2307
2308 if (item->spec || item->mask) {
2309 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2310 rte_flow_error_set(error, EINVAL,
2311 RTE_FLOW_ERROR_TYPE_ITEM,
2312 item, "Not supported by fdir filter");
2313 return -rte_errno;
2314 }
2315
2316 if (item->last) {
2317 rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2319 item, "Not supported last point for range");
2320 return -rte_errno;
2321 }
2322
2323
2324 item = next_no_void_pattern(pattern, item);
2325 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2326 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2327 rte_flow_error_set(error, EINVAL,
2328 RTE_FLOW_ERROR_TYPE_ITEM,
2329 item, "Not supported by fdir filter");
2330 return -rte_errno;
2331 }
2332 }
2333
2334
2335 item = next_no_void_pattern(pattern, item);
2336 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2337 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2338 rte_flow_error_set(error, EINVAL,
2339 RTE_FLOW_ERROR_TYPE_ITEM,
2340 item, "Not supported by fdir filter");
2341 return -rte_errno;
2342 }
2343
2344
2345
2346
2347
2348
2349 if (!item->mask) {
2350 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2351 rte_flow_error_set(error, EINVAL,
2352 RTE_FLOW_ERROR_TYPE_ITEM,
2353 item, "Not supported by fdir filter");
2354 return -rte_errno;
2355 }
2356
2357 if (item->last) {
2358 rte_flow_error_set(error, EINVAL,
2359 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2360 item, "Not supported last point for range");
2361 return -rte_errno;
2362 }
2363 rule->b_mask = TRUE;
2364 eth_mask = item->mask;
2365
2366
2367 if (eth_mask->type) {
2368 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2369 rte_flow_error_set(error, EINVAL,
2370 RTE_FLOW_ERROR_TYPE_ITEM,
2371 item, "Not supported by fdir filter");
2372 return -rte_errno;
2373 }
2374
2375
2376 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2377 if (eth_mask->src.addr_bytes[j]) {
2378 memset(rule, 0,
2379 sizeof(struct txgbe_fdir_rule));
2380 rte_flow_error_set(error, EINVAL,
2381 RTE_FLOW_ERROR_TYPE_ITEM,
2382 item, "Not supported by fdir filter");
2383 return -rte_errno;
2384 }
2385 }
2386 rule->mask.mac_addr_byte_mask = 0;
2387 for (j = 0; j < ETH_ADDR_LEN; j++) {
2388
2389 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2390 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2391 } else if (eth_mask->dst.addr_bytes[j]) {
2392 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2393 rte_flow_error_set(error, EINVAL,
2394 RTE_FLOW_ERROR_TYPE_ITEM,
2395 item, "Not supported by fdir filter");
2396 return -rte_errno;
2397 }
2398 }
2399
2400
2401 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2402
2403
2404
2405
2406
2407 item = next_no_void_pattern(pattern, item);
2408 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2409 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2410 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2411 rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_ITEM,
2413 item, "Not supported by fdir filter");
2414 return -rte_errno;
2415 }
2416
2417 if (item->last) {
2418 rte_flow_error_set(error, EINVAL,
2419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2420 item, "Not supported last point for range");
2421 return -rte_errno;
2422 }
2423
2424
2425
2426
2427
2428
2429 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2430}
2431
2432static int
2433txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2434 const struct rte_flow_attr *attr,
2435 const struct rte_flow_item pattern[],
2436 const struct rte_flow_action actions[],
2437 struct txgbe_fdir_rule *rule,
2438 struct rte_flow_error *error)
2439{
2440 int ret;
2441 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2442 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2443
2444 ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2445 actions, rule, error);
2446 if (!ret)
2447 goto step_next;
2448
2449 ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2450 actions, rule, error);
2451 if (ret)
2452 return ret;
2453
2454step_next:
2455
2456 if (hw->mac.type == txgbe_mac_raptor &&
2457 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2458 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2459 return -ENOTSUP;
2460
2461 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2462 fdir_mode != rule->mode)
2463 return -ENOTSUP;
2464
2465 if (rule->queue >= dev->data->nb_rx_queues)
2466 return -ENOTSUP;
2467
2468 return ret;
2469}
2470
2471static int
2472txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2473 const struct rte_flow_attr *attr,
2474 const struct rte_flow_action actions[],
2475 struct txgbe_rte_flow_rss_conf *rss_conf,
2476 struct rte_flow_error *error)
2477{
2478 const struct rte_flow_action *act;
2479 const struct rte_flow_action_rss *rss;
2480 uint16_t n;
2481
2482
2483
2484
2485
2486 act = next_no_void_action(actions, NULL);
2487 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2488 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2489 rte_flow_error_set(error, EINVAL,
2490 RTE_FLOW_ERROR_TYPE_ACTION,
2491 act, "Not supported action.");
2492 return -rte_errno;
2493 }
2494
2495 rss = (const struct rte_flow_action_rss *)act->conf;
2496
2497 if (!rss || !rss->queue_num) {
2498 rte_flow_error_set(error, EINVAL,
2499 RTE_FLOW_ERROR_TYPE_ACTION,
2500 act,
2501 "no valid queues");
2502 return -rte_errno;
2503 }
2504
2505 for (n = 0; n < rss->queue_num; n++) {
2506 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2507 rte_flow_error_set(error, EINVAL,
2508 RTE_FLOW_ERROR_TYPE_ACTION,
2509 act,
2510 "queue id > max number of queues");
2511 return -rte_errno;
2512 }
2513 }
2514
2515 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2516 return rte_flow_error_set
2517 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2518 "non-default RSS hash functions are not supported");
2519 if (rss->level)
2520 return rte_flow_error_set
2521 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2522 "a nonzero RSS encapsulation level is not supported");
2523 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2524 return rte_flow_error_set
2525 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2526 "RSS hash key must be exactly 40 bytes");
2527 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2528 return rte_flow_error_set
2529 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2530 "too many queues for RSS context");
2531 if (txgbe_rss_conf_init(rss_conf, rss))
2532 return rte_flow_error_set
2533 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2534 "RSS context initialization failure");
2535
2536
2537 act = next_no_void_action(actions, act);
2538 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2539 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2540 rte_flow_error_set(error, EINVAL,
2541 RTE_FLOW_ERROR_TYPE_ACTION,
2542 act, "Not supported action.");
2543 return -rte_errno;
2544 }
2545
2546
2547
2548 if (!attr->ingress) {
2549 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2550 rte_flow_error_set(error, EINVAL,
2551 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2552 attr, "Only support ingress.");
2553 return -rte_errno;
2554 }
2555
2556
2557 if (attr->egress) {
2558 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2559 rte_flow_error_set(error, EINVAL,
2560 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2561 attr, "Not support egress.");
2562 return -rte_errno;
2563 }
2564
2565
2566 if (attr->transfer) {
2567 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2568 rte_flow_error_set(error, EINVAL,
2569 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2570 attr, "No support for transfer.");
2571 return -rte_errno;
2572 }
2573
2574 if (attr->priority > 0xFFFF) {
2575 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2576 rte_flow_error_set(error, EINVAL,
2577 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2578 attr, "Error priority.");
2579 return -rte_errno;
2580 }
2581
2582 return 0;
2583}
2584
2585
2586static void
2587txgbe_clear_rss_filter(struct rte_eth_dev *dev)
2588{
2589 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
2590
2591 if (filter_info->rss_info.conf.queue_num)
2592 txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2593}
2594
2595void
2596txgbe_filterlist_init(void)
2597{
2598 TAILQ_INIT(&filter_ntuple_list);
2599 TAILQ_INIT(&filter_ethertype_list);
2600 TAILQ_INIT(&filter_syn_list);
2601 TAILQ_INIT(&filter_fdir_list);
2602 TAILQ_INIT(&filter_l2_tunnel_list);
2603 TAILQ_INIT(&filter_rss_list);
2604 TAILQ_INIT(&txgbe_flow_list);
2605}
2606
2607void
2608txgbe_filterlist_flush(void)
2609{
2610 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2611 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2612 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2613 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2614 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2615 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2616 struct txgbe_rss_conf_ele *rss_filter_ptr;
2617
2618 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2619 TAILQ_REMOVE(&filter_ntuple_list,
2620 ntuple_filter_ptr,
2621 entries);
2622 rte_free(ntuple_filter_ptr);
2623 }
2624
2625 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2626 TAILQ_REMOVE(&filter_ethertype_list,
2627 ethertype_filter_ptr,
2628 entries);
2629 rte_free(ethertype_filter_ptr);
2630 }
2631
2632 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2633 TAILQ_REMOVE(&filter_syn_list,
2634 syn_filter_ptr,
2635 entries);
2636 rte_free(syn_filter_ptr);
2637 }
2638
2639 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2640 TAILQ_REMOVE(&filter_l2_tunnel_list,
2641 l2_tn_filter_ptr,
2642 entries);
2643 rte_free(l2_tn_filter_ptr);
2644 }
2645
2646 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2647 TAILQ_REMOVE(&filter_fdir_list,
2648 fdir_rule_ptr,
2649 entries);
2650 rte_free(fdir_rule_ptr);
2651 }
2652
2653 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2654 TAILQ_REMOVE(&filter_rss_list,
2655 rss_filter_ptr,
2656 entries);
2657 rte_free(rss_filter_ptr);
2658 }
2659
2660 while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
2661 TAILQ_REMOVE(&txgbe_flow_list,
2662 txgbe_flow_mem_ptr,
2663 entries);
2664 rte_free(txgbe_flow_mem_ptr->flow);
2665 rte_free(txgbe_flow_mem_ptr);
2666 }
2667}
2668
2669
2670
2671
2672
2673
2674
2675static struct rte_flow *
2676txgbe_flow_create(struct rte_eth_dev *dev,
2677 const struct rte_flow_attr *attr,
2678 const struct rte_flow_item pattern[],
2679 const struct rte_flow_action actions[],
2680 struct rte_flow_error *error)
2681{
2682 int ret;
2683 struct rte_eth_ntuple_filter ntuple_filter;
2684 struct rte_eth_ethertype_filter ethertype_filter;
2685 struct rte_eth_syn_filter syn_filter;
2686 struct txgbe_fdir_rule fdir_rule;
2687 struct txgbe_l2_tunnel_conf l2_tn_filter;
2688 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2689 struct txgbe_rte_flow_rss_conf rss_conf;
2690 struct rte_flow *flow = NULL;
2691 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2692 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2693 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2694 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2695 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2696 struct txgbe_rss_conf_ele *rss_filter_ptr;
2697 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2698 uint8_t first_mask = FALSE;
2699
2700 flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2701 if (!flow) {
2702 PMD_DRV_LOG(ERR, "failed to allocate memory");
2703 return (struct rte_flow *)flow;
2704 }
2705 txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2706 sizeof(struct txgbe_flow_mem), 0);
2707 if (!txgbe_flow_mem_ptr) {
2708 PMD_DRV_LOG(ERR, "failed to allocate memory");
2709 rte_free(flow);
2710 return NULL;
2711 }
2712 txgbe_flow_mem_ptr->flow = flow;
2713 TAILQ_INSERT_TAIL(&txgbe_flow_list,
2714 txgbe_flow_mem_ptr, entries);
2715
2716 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2717 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2718 actions, &ntuple_filter, error);
2719
2720#ifdef RTE_LIB_SECURITY
2721
2722 if (ntuple_filter.proto == IPPROTO_ESP)
2723 return flow;
2724#endif
2725
2726 if (!ret) {
2727 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2728 if (!ret) {
2729 ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2730 sizeof(struct txgbe_ntuple_filter_ele), 0);
2731 if (!ntuple_filter_ptr) {
2732 PMD_DRV_LOG(ERR, "failed to allocate memory");
2733 goto out;
2734 }
2735 rte_memcpy(&ntuple_filter_ptr->filter_info,
2736 &ntuple_filter,
2737 sizeof(struct rte_eth_ntuple_filter));
2738 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2739 ntuple_filter_ptr, entries);
2740 flow->rule = ntuple_filter_ptr;
2741 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2742 return flow;
2743 }
2744 goto out;
2745 }
2746
2747 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2748 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2749 actions, ðertype_filter, error);
2750 if (!ret) {
2751 ret = txgbe_add_del_ethertype_filter(dev,
2752 ðertype_filter, TRUE);
2753 if (!ret) {
2754 ethertype_filter_ptr =
2755 rte_zmalloc("txgbe_ethertype_filter",
2756 sizeof(struct txgbe_ethertype_filter_ele), 0);
2757 if (!ethertype_filter_ptr) {
2758 PMD_DRV_LOG(ERR, "failed to allocate memory");
2759 goto out;
2760 }
2761 rte_memcpy(ðertype_filter_ptr->filter_info,
2762 ðertype_filter,
2763 sizeof(struct rte_eth_ethertype_filter));
2764 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2765 ethertype_filter_ptr, entries);
2766 flow->rule = ethertype_filter_ptr;
2767 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2768 return flow;
2769 }
2770 goto out;
2771 }
2772
2773 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2774 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2775 actions, &syn_filter, error);
2776 if (!ret) {
2777 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2778 if (!ret) {
2779 syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2780 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2781 if (!syn_filter_ptr) {
2782 PMD_DRV_LOG(ERR, "failed to allocate memory");
2783 goto out;
2784 }
2785 rte_memcpy(&syn_filter_ptr->filter_info,
2786 &syn_filter,
2787 sizeof(struct rte_eth_syn_filter));
2788 TAILQ_INSERT_TAIL(&filter_syn_list,
2789 syn_filter_ptr,
2790 entries);
2791 flow->rule = syn_filter_ptr;
2792 flow->filter_type = RTE_ETH_FILTER_SYN;
2793 return flow;
2794 }
2795 goto out;
2796 }
2797
2798 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2799 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2800 actions, &fdir_rule, error);
2801 if (!ret) {
2802
2803 if (fdir_rule.b_mask) {
2804 if (!fdir_info->mask_added) {
2805
2806 rte_memcpy(&fdir_info->mask,
2807 &fdir_rule.mask,
2808 sizeof(struct txgbe_hw_fdir_mask));
2809 fdir_info->flex_bytes_offset =
2810 fdir_rule.flex_bytes_offset;
2811
2812 if (fdir_rule.mask.flex_bytes_mask)
2813 txgbe_fdir_set_flexbytes_offset(dev,
2814 fdir_rule.flex_bytes_offset);
2815
2816 ret = txgbe_fdir_set_input_mask(dev);
2817 if (ret)
2818 goto out;
2819
2820 fdir_info->mask_added = TRUE;
2821 first_mask = TRUE;
2822 } else {
2823
2824
2825
2826
2827 ret = memcmp(&fdir_info->mask,
2828 &fdir_rule.mask,
2829 sizeof(struct txgbe_hw_fdir_mask));
2830 if (ret)
2831 goto out;
2832
2833 if (fdir_info->flex_bytes_offset !=
2834 fdir_rule.flex_bytes_offset)
2835 goto out;
2836 }
2837 }
2838
2839 if (fdir_rule.b_spec) {
2840 ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2841 FALSE, FALSE);
2842 if (!ret) {
2843 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2844 sizeof(struct txgbe_fdir_rule_ele), 0);
2845 if (!fdir_rule_ptr) {
2846 PMD_DRV_LOG(ERR,
2847 "failed to allocate memory");
2848 goto out;
2849 }
2850 rte_memcpy(&fdir_rule_ptr->filter_info,
2851 &fdir_rule,
2852 sizeof(struct txgbe_fdir_rule));
2853 TAILQ_INSERT_TAIL(&filter_fdir_list,
2854 fdir_rule_ptr, entries);
2855 flow->rule = fdir_rule_ptr;
2856 flow->filter_type = RTE_ETH_FILTER_FDIR;
2857
2858 return flow;
2859 }
2860
2861 if (ret) {
2862
2863
2864
2865
2866 if (first_mask)
2867 fdir_info->mask_added = FALSE;
2868 goto out;
2869 }
2870 }
2871
2872 goto out;
2873 }
2874
2875 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2876 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2877 actions, &l2_tn_filter, error);
2878 if (!ret) {
2879 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2880 if (!ret) {
2881 l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2882 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2883 if (!l2_tn_filter_ptr) {
2884 PMD_DRV_LOG(ERR, "failed to allocate memory");
2885 goto out;
2886 }
2887 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2888 &l2_tn_filter,
2889 sizeof(struct txgbe_l2_tunnel_conf));
2890 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2891 l2_tn_filter_ptr, entries);
2892 flow->rule = l2_tn_filter_ptr;
2893 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2894 return flow;
2895 }
2896 }
2897
2898 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2899 ret = txgbe_parse_rss_filter(dev, attr,
2900 actions, &rss_conf, error);
2901 if (!ret) {
2902 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2903 if (!ret) {
2904 rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2905 sizeof(struct txgbe_rss_conf_ele), 0);
2906 if (!rss_filter_ptr) {
2907 PMD_DRV_LOG(ERR, "failed to allocate memory");
2908 goto out;
2909 }
2910 txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2911 &rss_conf.conf);
2912 TAILQ_INSERT_TAIL(&filter_rss_list,
2913 rss_filter_ptr, entries);
2914 flow->rule = rss_filter_ptr;
2915 flow->filter_type = RTE_ETH_FILTER_HASH;
2916 return flow;
2917 }
2918 }
2919
2920out:
2921 TAILQ_REMOVE(&txgbe_flow_list,
2922 txgbe_flow_mem_ptr, entries);
2923 rte_flow_error_set(error, -ret,
2924 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2925 "Failed to create flow.");
2926 rte_free(txgbe_flow_mem_ptr);
2927 rte_free(flow);
2928 return NULL;
2929}
2930
2931
2932
2933
2934
2935
2936static int
2937txgbe_flow_validate(struct rte_eth_dev *dev,
2938 const struct rte_flow_attr *attr,
2939 const struct rte_flow_item pattern[],
2940 const struct rte_flow_action actions[],
2941 struct rte_flow_error *error)
2942{
2943 struct rte_eth_ntuple_filter ntuple_filter;
2944 struct rte_eth_ethertype_filter ethertype_filter;
2945 struct rte_eth_syn_filter syn_filter;
2946 struct txgbe_l2_tunnel_conf l2_tn_filter;
2947 struct txgbe_fdir_rule fdir_rule;
2948 struct txgbe_rte_flow_rss_conf rss_conf;
2949 int ret = 0;
2950
2951 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2952 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2953 actions, &ntuple_filter, error);
2954 if (!ret)
2955 return 0;
2956
2957 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2958 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2959 actions, ðertype_filter, error);
2960 if (!ret)
2961 return 0;
2962
2963 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2964 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2965 actions, &syn_filter, error);
2966 if (!ret)
2967 return 0;
2968
2969 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2970 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2971 actions, &fdir_rule, error);
2972 if (!ret)
2973 return 0;
2974
2975 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2976 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2977 actions, &l2_tn_filter, error);
2978 if (!ret)
2979 return 0;
2980
2981 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2982 ret = txgbe_parse_rss_filter(dev, attr,
2983 actions, &rss_conf, error);
2984
2985 return ret;
2986}
2987
2988
2989static int
2990txgbe_flow_destroy(struct rte_eth_dev *dev,
2991 struct rte_flow *flow,
2992 struct rte_flow_error *error)
2993{
2994 int ret = 0;
2995 struct rte_flow *pmd_flow = flow;
2996 enum rte_filter_type filter_type = pmd_flow->filter_type;
2997 struct rte_eth_ntuple_filter ntuple_filter;
2998 struct rte_eth_ethertype_filter ethertype_filter;
2999 struct rte_eth_syn_filter syn_filter;
3000 struct txgbe_fdir_rule fdir_rule;
3001 struct txgbe_l2_tunnel_conf l2_tn_filter;
3002 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
3003 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
3004 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
3005 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3006 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
3007 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
3008 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
3009 struct txgbe_rss_conf_ele *rss_filter_ptr;
3010
3011 switch (filter_type) {
3012 case RTE_ETH_FILTER_NTUPLE:
3013 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
3014 pmd_flow->rule;
3015 rte_memcpy(&ntuple_filter,
3016 &ntuple_filter_ptr->filter_info,
3017 sizeof(struct rte_eth_ntuple_filter));
3018 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3019 if (!ret) {
3020 TAILQ_REMOVE(&filter_ntuple_list,
3021 ntuple_filter_ptr, entries);
3022 rte_free(ntuple_filter_ptr);
3023 }
3024 break;
3025 case RTE_ETH_FILTER_ETHERTYPE:
3026 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
3027 pmd_flow->rule;
3028 rte_memcpy(ðertype_filter,
3029 ðertype_filter_ptr->filter_info,
3030 sizeof(struct rte_eth_ethertype_filter));
3031 ret = txgbe_add_del_ethertype_filter(dev,
3032 ðertype_filter, FALSE);
3033 if (!ret) {
3034 TAILQ_REMOVE(&filter_ethertype_list,
3035 ethertype_filter_ptr, entries);
3036 rte_free(ethertype_filter_ptr);
3037 }
3038 break;
3039 case RTE_ETH_FILTER_SYN:
3040 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
3041 pmd_flow->rule;
3042 rte_memcpy(&syn_filter,
3043 &syn_filter_ptr->filter_info,
3044 sizeof(struct rte_eth_syn_filter));
3045 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
3046 if (!ret) {
3047 TAILQ_REMOVE(&filter_syn_list,
3048 syn_filter_ptr, entries);
3049 rte_free(syn_filter_ptr);
3050 }
3051 break;
3052 case RTE_ETH_FILTER_FDIR:
3053 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
3054 rte_memcpy(&fdir_rule,
3055 &fdir_rule_ptr->filter_info,
3056 sizeof(struct txgbe_fdir_rule));
3057 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3058 if (!ret) {
3059 TAILQ_REMOVE(&filter_fdir_list,
3060 fdir_rule_ptr, entries);
3061 rte_free(fdir_rule_ptr);
3062 if (TAILQ_EMPTY(&filter_fdir_list))
3063 fdir_info->mask_added = false;
3064 }
3065 break;
3066 case RTE_ETH_FILTER_L2_TUNNEL:
3067 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
3068 pmd_flow->rule;
3069 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3070 sizeof(struct txgbe_l2_tunnel_conf));
3071 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3072 if (!ret) {
3073 TAILQ_REMOVE(&filter_l2_tunnel_list,
3074 l2_tn_filter_ptr, entries);
3075 rte_free(l2_tn_filter_ptr);
3076 }
3077 break;
3078 case RTE_ETH_FILTER_HASH:
3079 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
3080 pmd_flow->rule;
3081 ret = txgbe_config_rss_filter(dev,
3082 &rss_filter_ptr->filter_info, FALSE);
3083 if (!ret) {
3084 TAILQ_REMOVE(&filter_rss_list,
3085 rss_filter_ptr, entries);
3086 rte_free(rss_filter_ptr);
3087 }
3088 break;
3089 default:
3090 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3091 filter_type);
3092 ret = -EINVAL;
3093 break;
3094 }
3095
3096 if (ret) {
3097 rte_flow_error_set(error, EINVAL,
3098 RTE_FLOW_ERROR_TYPE_HANDLE,
3099 NULL, "Failed to destroy flow");
3100 return ret;
3101 }
3102
3103 TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
3104 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
3105 TAILQ_REMOVE(&txgbe_flow_list,
3106 txgbe_flow_mem_ptr, entries);
3107 rte_free(txgbe_flow_mem_ptr);
3108 }
3109 }
3110 rte_free(flow);
3111
3112 return ret;
3113}
3114
3115
3116static int
3117txgbe_flow_flush(struct rte_eth_dev *dev,
3118 struct rte_flow_error *error)
3119{
3120 int ret = 0;
3121
3122 txgbe_clear_all_ntuple_filter(dev);
3123 txgbe_clear_all_ethertype_filter(dev);
3124 txgbe_clear_syn_filter(dev);
3125
3126 ret = txgbe_clear_all_fdir_filter(dev);
3127 if (ret < 0) {
3128 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3129 NULL, "Failed to flush rule");
3130 return ret;
3131 }
3132
3133 ret = txgbe_clear_all_l2_tn_filter(dev);
3134 if (ret < 0) {
3135 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3136 NULL, "Failed to flush rule");
3137 return ret;
3138 }
3139
3140 txgbe_clear_rss_filter(dev);
3141
3142 txgbe_filterlist_flush();
3143
3144 return 0;
3145}
3146
3147const struct rte_flow_ops txgbe_flow_ops = {
3148 .validate = txgbe_flow_validate,
3149 .create = txgbe_flow_create,
3150 .destroy = txgbe_flow_destroy,
3151 .flush = txgbe_flow_flush,
3152};
3153