1
2
3
4
5#include <sys/queue.h>
6#include <stdio.h>
7#include <errno.h>
8#include <stdint.h>
9#include <stdarg.h>
10
11#include <rte_common.h>
12#include <rte_interrupts.h>
13#include <rte_byteorder.h>
14#include <rte_log.h>
15#include <rte_debug.h>
16#include <rte_pci.h>
17#include <rte_ether.h>
18#include <rte_ethdev_driver.h>
19#include <rte_ethdev_pci.h>
20#include <rte_memory.h>
21#include <rte_eal.h>
22#include <rte_atomic.h>
23#include <rte_malloc.h>
24#include <rte_dev.h>
25#include <rte_flow.h>
26#include <rte_flow_driver.h>
27
28#include "e1000_logs.h"
29#include "base/e1000_api.h"
30#include "e1000_ethdev.h"
31
32#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
33 do { \
34 item = (pattern) + (index); \
35 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
36 (index)++; \
37 item = (pattern) + (index); \
38 } \
39 } while (0)
40
41#define NEXT_ITEM_OF_ACTION(act, actions, index) \
42 do { \
43 act = (actions) + (index); \
44 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
45 (index)++; \
46 act = (actions) + (index); \
47 } \
48 } while (0)
49
50#define IGB_FLEX_RAW_NUM 12
51
52struct igb_flow_mem_list igb_flow_list;
53struct igb_ntuple_filter_list igb_filter_ntuple_list;
54struct igb_ethertype_filter_list igb_filter_ethertype_list;
55struct igb_syn_filter_list igb_filter_syn_list;
56struct igb_flex_filter_list igb_filter_flex_list;
57struct igb_rss_filter_list igb_filter_rss_list;
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static int
91cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
92 const struct rte_flow_item pattern[],
93 const struct rte_flow_action actions[],
94 struct rte_eth_ntuple_filter *filter,
95 struct rte_flow_error *error)
96{
97 const struct rte_flow_item *item;
98 const struct rte_flow_action *act;
99 const struct rte_flow_item_ipv4 *ipv4_spec;
100 const struct rte_flow_item_ipv4 *ipv4_mask;
101 const struct rte_flow_item_tcp *tcp_spec;
102 const struct rte_flow_item_tcp *tcp_mask;
103 const struct rte_flow_item_udp *udp_spec;
104 const struct rte_flow_item_udp *udp_mask;
105 const struct rte_flow_item_sctp *sctp_spec;
106 const struct rte_flow_item_sctp *sctp_mask;
107 uint32_t index;
108
109 if (!pattern) {
110 rte_flow_error_set(error,
111 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
112 NULL, "NULL pattern.");
113 return -rte_errno;
114 }
115
116 if (!actions) {
117 rte_flow_error_set(error, EINVAL,
118 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
119 NULL, "NULL action.");
120 return -rte_errno;
121 }
122 if (!attr) {
123 rte_flow_error_set(error, EINVAL,
124 RTE_FLOW_ERROR_TYPE_ATTR,
125 NULL, "NULL attribute.");
126 return -rte_errno;
127 }
128
129
130 index = 0;
131
132
133 NEXT_ITEM_OF_PATTERN(item, pattern, index);
134
135 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
136 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
137 rte_flow_error_set(error, EINVAL,
138 RTE_FLOW_ERROR_TYPE_ITEM,
139 item, "Not supported by ntuple filter");
140 return -rte_errno;
141 }
142
143 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
144
145 if (item->last) {
146 rte_flow_error_set(error,
147 EINVAL,
148 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
149 item, "Not supported last point for range");
150 return -rte_errno;
151 }
152
153 if (item->spec || item->mask) {
154 rte_flow_error_set(error, EINVAL,
155 RTE_FLOW_ERROR_TYPE_ITEM,
156 item, "Not supported by ntuple filter");
157 return -rte_errno;
158 }
159
160 index++;
161 NEXT_ITEM_OF_PATTERN(item, pattern, index);
162 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
163 rte_flow_error_set(error,
164 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
165 item, "Not supported by ntuple filter");
166 return -rte_errno;
167 }
168 }
169
170
171 if (!item->spec || !item->mask) {
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ITEM,
174 item, "Invalid ntuple mask");
175 return -rte_errno;
176 }
177
178 if (item->last) {
179 rte_flow_error_set(error, EINVAL,
180 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
181 item, "Not supported last point for range");
182 return -rte_errno;
183 }
184
185 ipv4_mask = item->mask;
186
187
188
189
190
191 if (ipv4_mask->hdr.version_ihl ||
192 ipv4_mask->hdr.type_of_service ||
193 ipv4_mask->hdr.total_length ||
194 ipv4_mask->hdr.packet_id ||
195 ipv4_mask->hdr.fragment_offset ||
196 ipv4_mask->hdr.time_to_live ||
197 ipv4_mask->hdr.hdr_checksum) {
198 rte_flow_error_set(error,
199 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
200 item, "Not supported by ntuple filter");
201 return -rte_errno;
202 }
203
204 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
205 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
206 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
207
208 ipv4_spec = item->spec;
209 filter->dst_ip = ipv4_spec->hdr.dst_addr;
210 filter->src_ip = ipv4_spec->hdr.src_addr;
211 filter->proto = ipv4_spec->hdr.next_proto_id;
212
213
214 index++;
215 NEXT_ITEM_OF_PATTERN(item, pattern, index);
216 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
217 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
218 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
219 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
220 rte_flow_error_set(error, EINVAL,
221 RTE_FLOW_ERROR_TYPE_ITEM,
222 item, "Not supported by ntuple filter");
223 return -rte_errno;
224 }
225
226
227 if (item->last) {
228 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229 rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
231 item, "Not supported last point for range");
232 return -rte_errno;
233 }
234
235
236 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
237 if (item->spec && item->mask) {
238 tcp_mask = item->mask;
239
240
241
242
243
244 if (tcp_mask->hdr.sent_seq ||
245 tcp_mask->hdr.recv_ack ||
246 tcp_mask->hdr.data_off ||
247 tcp_mask->hdr.rx_win ||
248 tcp_mask->hdr.cksum ||
249 tcp_mask->hdr.tcp_urp) {
250 memset(filter, 0,
251 sizeof(struct rte_eth_ntuple_filter));
252 rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_ITEM,
254 item, "Not supported by ntuple filter");
255 return -rte_errno;
256 }
257
258 filter->dst_port_mask = tcp_mask->hdr.dst_port;
259 filter->src_port_mask = tcp_mask->hdr.src_port;
260 if (tcp_mask->hdr.tcp_flags == 0xFF) {
261 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
262 } else if (!tcp_mask->hdr.tcp_flags) {
263 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
264 } else {
265 memset(filter, 0,
266 sizeof(struct rte_eth_ntuple_filter));
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
269 item, "Not supported by ntuple filter");
270 return -rte_errno;
271 }
272
273 tcp_spec = item->spec;
274 filter->dst_port = tcp_spec->hdr.dst_port;
275 filter->src_port = tcp_spec->hdr.src_port;
276 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
277 }
278 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
279 if (item->spec && item->mask) {
280 udp_mask = item->mask;
281
282
283
284
285
286 if (udp_mask->hdr.dgram_len ||
287 udp_mask->hdr.dgram_cksum) {
288 memset(filter, 0,
289 sizeof(struct rte_eth_ntuple_filter));
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_ITEM,
292 item, "Not supported by ntuple filter");
293 return -rte_errno;
294 }
295
296 filter->dst_port_mask = udp_mask->hdr.dst_port;
297 filter->src_port_mask = udp_mask->hdr.src_port;
298
299 udp_spec = item->spec;
300 filter->dst_port = udp_spec->hdr.dst_port;
301 filter->src_port = udp_spec->hdr.src_port;
302 }
303 } else {
304 if (item->spec && item->mask) {
305 sctp_mask = item->mask;
306
307
308
309
310
311 if (sctp_mask->hdr.tag ||
312 sctp_mask->hdr.cksum) {
313 memset(filter, 0,
314 sizeof(struct rte_eth_ntuple_filter));
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_ITEM,
317 item, "Not supported by ntuple filter");
318 return -rte_errno;
319 }
320
321 filter->dst_port_mask = sctp_mask->hdr.dst_port;
322 filter->src_port_mask = sctp_mask->hdr.src_port;
323
324 sctp_spec = (const struct rte_flow_item_sctp *)
325 item->spec;
326 filter->dst_port = sctp_spec->hdr.dst_port;
327 filter->src_port = sctp_spec->hdr.src_port;
328 }
329 }
330
331 index++;
332 NEXT_ITEM_OF_PATTERN(item, pattern, index);
333 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
334 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
335 rte_flow_error_set(error, EINVAL,
336 RTE_FLOW_ERROR_TYPE_ITEM,
337 item, "Not supported by ntuple filter");
338 return -rte_errno;
339 }
340
341
342 index = 0;
343
344
345
346
347
348 NEXT_ITEM_OF_ACTION(act, actions, index);
349 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
350 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
351 rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ACTION,
353 item, "Not supported action.");
354 return -rte_errno;
355 }
356 filter->queue =
357 ((const struct rte_flow_action_queue *)act->conf)->index;
358
359
360 index++;
361 NEXT_ITEM_OF_ACTION(act, actions, index);
362 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
363 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
364 rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_ACTION,
366 act, "Not supported action.");
367 return -rte_errno;
368 }
369
370
371
372 if (!attr->ingress) {
373 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374 rte_flow_error_set(error, EINVAL,
375 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
376 attr, "Only support ingress.");
377 return -rte_errno;
378 }
379
380
381 if (attr->egress) {
382 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
383 rte_flow_error_set(error, EINVAL,
384 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
385 attr, "Not support egress.");
386 return -rte_errno;
387 }
388
389
390 if (attr->transfer) {
391 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
392 rte_flow_error_set(error, EINVAL,
393 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
394 attr, "No support for transfer.");
395 return -rte_errno;
396 }
397
398 if (attr->priority > 0xFFFF) {
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
402 attr, "Error priority.");
403 return -rte_errno;
404 }
405 filter->priority = (uint16_t)attr->priority;
406
407 return 0;
408}
409
410
411static int
412igb_parse_ntuple_filter(struct rte_eth_dev *dev,
413 const struct rte_flow_attr *attr,
414 const struct rte_flow_item pattern[],
415 const struct rte_flow_action actions[],
416 struct rte_eth_ntuple_filter *filter,
417 struct rte_flow_error *error)
418{
419 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
420 int ret;
421
422 MAC_TYPE_FILTER_SUP(hw->mac.type);
423
424 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
425
426 if (ret)
427 return ret;
428
429
430 if (filter->priority > E1000_2TUPLE_MAX_PRI) {
431 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ITEM,
434 NULL, "Priority not supported by ntuple filter");
435 return -rte_errno;
436 }
437
438 if (hw->mac.type == e1000_82576) {
439 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
440 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
441 rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ITEM,
443 NULL, "queue number not "
444 "supported by ntuple filter");
445 return -rte_errno;
446 }
447 filter->flags |= RTE_5TUPLE_FLAGS;
448 } else {
449 if (filter->src_ip_mask || filter->dst_ip_mask ||
450 filter->src_port_mask) {
451 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
452 rte_flow_error_set(error, EINVAL,
453 RTE_FLOW_ERROR_TYPE_ITEM,
454 NULL, "only two tuple are "
455 "supported by this filter");
456 return -rte_errno;
457 }
458 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
459 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
460 rte_flow_error_set(error, EINVAL,
461 RTE_FLOW_ERROR_TYPE_ITEM,
462 NULL, "queue number not "
463 "supported by ntuple filter");
464 return -rte_errno;
465 }
466 filter->flags |= RTE_2TUPLE_FLAGS;
467 }
468
469 return 0;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488static int
489cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
490 const struct rte_flow_item *pattern,
491 const struct rte_flow_action *actions,
492 struct rte_eth_ethertype_filter *filter,
493 struct rte_flow_error *error)
494{
495 const struct rte_flow_item *item;
496 const struct rte_flow_action *act;
497 const struct rte_flow_item_eth *eth_spec;
498 const struct rte_flow_item_eth *eth_mask;
499 const struct rte_flow_action_queue *act_q;
500 uint32_t index;
501
502 if (!pattern) {
503 rte_flow_error_set(error, EINVAL,
504 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
505 NULL, "NULL pattern.");
506 return -rte_errno;
507 }
508
509 if (!actions) {
510 rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
512 NULL, "NULL action.");
513 return -rte_errno;
514 }
515
516 if (!attr) {
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ATTR,
519 NULL, "NULL attribute.");
520 return -rte_errno;
521 }
522
523
524 index = 0;
525
526
527 NEXT_ITEM_OF_PATTERN(item, pattern, index);
528 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ITEM,
531 item, "Not supported by ethertype filter");
532 return -rte_errno;
533 }
534
535
536 if (item->last) {
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
539 item, "Not supported last point for range");
540 return -rte_errno;
541 }
542
543
544 if (!item->spec || !item->mask) {
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ITEM,
547 item, "Not supported by ethertype filter");
548 return -rte_errno;
549 }
550
551 eth_spec = item->spec;
552 eth_mask = item->mask;
553
554
555
556
557
558 if (!rte_is_zero_ether_addr(ð_mask->src) ||
559 (!rte_is_zero_ether_addr(ð_mask->dst) &&
560 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_ITEM,
563 item, "Invalid ether address mask");
564 return -rte_errno;
565 }
566
567 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
568 rte_flow_error_set(error, EINVAL,
569 RTE_FLOW_ERROR_TYPE_ITEM,
570 item, "Invalid ethertype mask");
571 return -rte_errno;
572 }
573
574
575
576
577 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
578 filter->mac_addr = eth_spec->dst;
579 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
580 } else {
581 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
582 }
583 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
584
585
586 index++;
587 NEXT_ITEM_OF_PATTERN(item, pattern, index);
588 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
589 rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM,
591 item, "Not supported by ethertype filter.");
592 return -rte_errno;
593 }
594
595
596
597 index = 0;
598
599 NEXT_ITEM_OF_ACTION(act, actions, index);
600 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
601 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ACTION,
604 act, "Not supported action.");
605 return -rte_errno;
606 }
607
608 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
609 act_q = (const struct rte_flow_action_queue *)act->conf;
610 filter->queue = act_q->index;
611 } else {
612 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
613 }
614
615
616 index++;
617 NEXT_ITEM_OF_ACTION(act, actions, index);
618 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ACTION,
621 act, "Not supported action.");
622 return -rte_errno;
623 }
624
625
626
627 if (!attr->ingress) {
628 rte_flow_error_set(error, EINVAL,
629 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
630 attr, "Only support ingress.");
631 return -rte_errno;
632 }
633
634
635 if (attr->egress) {
636 rte_flow_error_set(error, EINVAL,
637 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
638 attr, "Not support egress.");
639 return -rte_errno;
640 }
641
642
643 if (attr->transfer) {
644 rte_flow_error_set(error, EINVAL,
645 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
646 attr, "No support for transfer.");
647 return -rte_errno;
648 }
649
650
651 if (attr->priority) {
652 rte_flow_error_set(error, EINVAL,
653 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
654 attr, "Not support priority.");
655 return -rte_errno;
656 }
657
658
659 if (attr->group) {
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
662 attr, "Not support group.");
663 return -rte_errno;
664 }
665
666 return 0;
667}
668
669static int
670igb_parse_ethertype_filter(struct rte_eth_dev *dev,
671 const struct rte_flow_attr *attr,
672 const struct rte_flow_item pattern[],
673 const struct rte_flow_action actions[],
674 struct rte_eth_ethertype_filter *filter,
675 struct rte_flow_error *error)
676{
677 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
678 int ret;
679
680 MAC_TYPE_FILTER_SUP(hw->mac.type);
681
682 ret = cons_parse_ethertype_filter(attr, pattern,
683 actions, filter, error);
684
685 if (ret)
686 return ret;
687
688 if (hw->mac.type == e1000_82576) {
689 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
690 memset(filter, 0, sizeof(
691 struct rte_eth_ethertype_filter));
692 rte_flow_error_set(error, EINVAL,
693 RTE_FLOW_ERROR_TYPE_ITEM,
694 NULL, "queue number not supported "
695 "by ethertype filter");
696 return -rte_errno;
697 }
698 } else {
699 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
700 memset(filter, 0, sizeof(
701 struct rte_eth_ethertype_filter));
702 rte_flow_error_set(error, EINVAL,
703 RTE_FLOW_ERROR_TYPE_ITEM,
704 NULL, "queue number not supported "
705 "by ethertype filter");
706 return -rte_errno;
707 }
708 }
709
710 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
711 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
712 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ITEM,
715 NULL, "IPv4/IPv6 not supported by ethertype filter");
716 return -rte_errno;
717 }
718
719 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
720 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
723 NULL, "mac compare is unsupported");
724 return -rte_errno;
725 }
726
727 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
728 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729 rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ITEM,
731 NULL, "drop option is unsupported");
732 return -rte_errno;
733 }
734
735 return 0;
736}
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758static int
759cons_parse_syn_filter(const struct rte_flow_attr *attr,
760 const struct rte_flow_item pattern[],
761 const struct rte_flow_action actions[],
762 struct rte_eth_syn_filter *filter,
763 struct rte_flow_error *error)
764{
765 const struct rte_flow_item *item;
766 const struct rte_flow_action *act;
767 const struct rte_flow_item_tcp *tcp_spec;
768 const struct rte_flow_item_tcp *tcp_mask;
769 const struct rte_flow_action_queue *act_q;
770 uint32_t index;
771
772 if (!pattern) {
773 rte_flow_error_set(error, EINVAL,
774 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
775 NULL, "NULL pattern.");
776 return -rte_errno;
777 }
778
779 if (!actions) {
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
782 NULL, "NULL action.");
783 return -rte_errno;
784 }
785
786 if (!attr) {
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ATTR,
789 NULL, "NULL attribute.");
790 return -rte_errno;
791 }
792
793
794 index = 0;
795
796
797 NEXT_ITEM_OF_PATTERN(item, pattern, index);
798 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
799 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
800 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
801 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
802 rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ITEM,
804 item, "Not supported by syn filter");
805 return -rte_errno;
806 }
807
808 if (item->last) {
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
811 item, "Not supported last point for range");
812 return -rte_errno;
813 }
814
815
816 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
817
818 if (item->spec || item->mask) {
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM,
821 item, "Invalid SYN address mask");
822 return -rte_errno;
823 }
824
825
826 index++;
827 NEXT_ITEM_OF_PATTERN(item, pattern, index);
828 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
829 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 item, "Not supported by syn filter");
833 return -rte_errno;
834 }
835 }
836
837
838 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
839 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
840
841 if (item->spec || item->mask) {
842 rte_flow_error_set(error, EINVAL,
843 RTE_FLOW_ERROR_TYPE_ITEM,
844 item, "Invalid SYN mask");
845 return -rte_errno;
846 }
847
848
849 index++;
850 NEXT_ITEM_OF_PATTERN(item, pattern, index);
851 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
854 item, "Not supported by syn filter");
855 return -rte_errno;
856 }
857 }
858
859
860 if (!item->spec || !item->mask) {
861 rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ITEM,
863 item, "Invalid SYN mask");
864 return -rte_errno;
865 }
866
867 if (item->last) {
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870 item, "Not supported last point for range");
871 return -rte_errno;
872 }
873
874 tcp_spec = item->spec;
875 tcp_mask = item->mask;
876 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
877 tcp_mask->hdr.src_port ||
878 tcp_mask->hdr.dst_port ||
879 tcp_mask->hdr.sent_seq ||
880 tcp_mask->hdr.recv_ack ||
881 tcp_mask->hdr.data_off ||
882 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
883 tcp_mask->hdr.rx_win ||
884 tcp_mask->hdr.cksum ||
885 tcp_mask->hdr.tcp_urp) {
886 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Not supported by syn filter");
890 return -rte_errno;
891 }
892
893
894 index++;
895 NEXT_ITEM_OF_PATTERN(item, pattern, index);
896 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
897 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
898 rte_flow_error_set(error, EINVAL,
899 RTE_FLOW_ERROR_TYPE_ITEM,
900 item, "Not supported by syn filter");
901 return -rte_errno;
902 }
903
904
905 index = 0;
906
907
908 NEXT_ITEM_OF_ACTION(act, actions, index);
909 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
910 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
911 rte_flow_error_set(error, EINVAL,
912 RTE_FLOW_ERROR_TYPE_ACTION,
913 act, "Not supported action.");
914 return -rte_errno;
915 }
916
917 act_q = (const struct rte_flow_action_queue *)act->conf;
918 filter->queue = act_q->index;
919
920
921 index++;
922 NEXT_ITEM_OF_ACTION(act, actions, index);
923 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
924 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
925 rte_flow_error_set(error, EINVAL,
926 RTE_FLOW_ERROR_TYPE_ACTION,
927 act, "Not supported action.");
928 return -rte_errno;
929 }
930
931
932
933 if (!attr->ingress) {
934 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
935 rte_flow_error_set(error, EINVAL,
936 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
937 attr, "Only support ingress.");
938 return -rte_errno;
939 }
940
941
942 if (attr->egress) {
943 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
944 rte_flow_error_set(error, EINVAL,
945 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
946 attr, "Not support egress.");
947 return -rte_errno;
948 }
949
950
951 if (attr->transfer) {
952 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
953 rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
955 attr, "No support for transfer.");
956 return -rte_errno;
957 }
958
959
960 if (!attr->priority) {
961 filter->hig_pri = 0;
962 } else if (attr->priority == (uint32_t)~0U) {
963 filter->hig_pri = 1;
964 } else {
965 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
966 rte_flow_error_set(error, EINVAL,
967 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
968 attr, "Not support priority.");
969 return -rte_errno;
970 }
971
972 return 0;
973}
974
975static int
976igb_parse_syn_filter(struct rte_eth_dev *dev,
977 const struct rte_flow_attr *attr,
978 const struct rte_flow_item pattern[],
979 const struct rte_flow_action actions[],
980 struct rte_eth_syn_filter *filter,
981 struct rte_flow_error *error)
982{
983 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
984 int ret;
985
986 MAC_TYPE_FILTER_SUP(hw->mac.type);
987
988 ret = cons_parse_syn_filter(attr, pattern,
989 actions, filter, error);
990
991 if (hw->mac.type == e1000_82576) {
992 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
993 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994 rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_ITEM,
996 NULL, "queue number not "
997 "supported by syn filter");
998 return -rte_errno;
999 }
1000 } else {
1001 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1002 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ITEM,
1005 NULL, "queue number not "
1006 "supported by syn filter");
1007 return -rte_errno;
1008 }
1009 }
1010
1011 if (ret)
1012 return ret;
1013
1014 return 0;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static int
1041cons_parse_flex_filter(const struct rte_flow_attr *attr,
1042 const struct rte_flow_item pattern[],
1043 const struct rte_flow_action actions[],
1044 struct igb_flex_filter *filter,
1045 struct rte_flow_error *error)
1046{
1047 const struct rte_flow_item *item;
1048 const struct rte_flow_action *act;
1049 const struct rte_flow_item_raw *raw_spec;
1050 const struct rte_flow_item_raw *raw_mask;
1051 const struct rte_flow_action_queue *act_q;
1052 uint32_t index, i, offset, total_offset;
1053 uint32_t max_offset = 0;
1054 int32_t shift, j, raw_index = 0;
1055 int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1056 int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1057
1058 if (!pattern) {
1059 rte_flow_error_set(error, EINVAL,
1060 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1061 NULL, "NULL pattern.");
1062 return -rte_errno;
1063 }
1064
1065 if (!actions) {
1066 rte_flow_error_set(error, EINVAL,
1067 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1068 NULL, "NULL action.");
1069 return -rte_errno;
1070 }
1071
1072 if (!attr) {
1073 rte_flow_error_set(error, EINVAL,
1074 RTE_FLOW_ERROR_TYPE_ATTR,
1075 NULL, "NULL attribute.");
1076 return -rte_errno;
1077 }
1078
1079
1080 index = 0;
1081
1082item_loop:
1083
1084
1085 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1086 if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1087 rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ITEM,
1089 item, "Not supported by flex filter");
1090 return -rte_errno;
1091 }
1092
1093 if (item->last) {
1094 rte_flow_error_set(error, EINVAL,
1095 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1096 item, "Not supported last point for range");
1097 return -rte_errno;
1098 }
1099
1100 raw_spec = item->spec;
1101 raw_mask = item->mask;
1102
1103 if (!raw_mask->length ||
1104 !raw_mask->relative) {
1105 memset(filter, 0, sizeof(struct igb_flex_filter));
1106 rte_flow_error_set(error, EINVAL,
1107 RTE_FLOW_ERROR_TYPE_ITEM,
1108 item, "Not supported by flex filter");
1109 return -rte_errno;
1110 }
1111
1112 if (raw_mask->offset)
1113 offset = raw_spec->offset;
1114 else
1115 offset = 0;
1116
1117 for (j = 0; j < raw_spec->length; j++) {
1118 if (raw_mask->pattern[j] != 0xFF) {
1119 memset(filter, 0, sizeof(struct igb_flex_filter));
1120 rte_flow_error_set(error, EINVAL,
1121 RTE_FLOW_ERROR_TYPE_ITEM,
1122 item, "Not supported by flex filter");
1123 return -rte_errno;
1124 }
1125 }
1126
1127 total_offset = 0;
1128
1129 if (raw_spec->relative) {
1130 for (j = raw_index; j > 0; j--) {
1131 total_offset += raw_offset[j - 1];
1132 if (!relative[j - 1])
1133 break;
1134 }
1135 if (total_offset + raw_spec->length + offset > max_offset)
1136 max_offset = total_offset + raw_spec->length + offset;
1137 } else {
1138 if (raw_spec->length + offset > max_offset)
1139 max_offset = raw_spec->length + offset;
1140 }
1141
1142 if ((raw_spec->length + offset + total_offset) >
1143 IGB_FLEX_FILTER_MAXLEN) {
1144 memset(filter, 0, sizeof(struct igb_flex_filter));
1145 rte_flow_error_set(error, EINVAL,
1146 RTE_FLOW_ERROR_TYPE_ITEM,
1147 item, "Not supported by flex filter");
1148 return -rte_errno;
1149 }
1150
1151 if (raw_spec->relative == 0) {
1152 for (j = 0; j < raw_spec->length; j++)
1153 filter->bytes[offset + j] =
1154 raw_spec->pattern[j];
1155 j = offset / CHAR_BIT;
1156 shift = offset % CHAR_BIT;
1157 } else {
1158 for (j = 0; j < raw_spec->length; j++)
1159 filter->bytes[total_offset + offset + j] =
1160 raw_spec->pattern[j];
1161 j = (total_offset + offset) / CHAR_BIT;
1162 shift = (total_offset + offset) % CHAR_BIT;
1163 }
1164
1165 i = 0;
1166
1167 for ( ; shift < CHAR_BIT; shift++) {
1168 filter->mask[j] |= (0x80 >> shift);
1169 i++;
1170 if (i == raw_spec->length)
1171 break;
1172 if (shift == (CHAR_BIT - 1)) {
1173 j++;
1174 shift = -1;
1175 }
1176 }
1177
1178 relative[raw_index] = raw_spec->relative;
1179 raw_offset[raw_index] = offset + raw_spec->length;
1180 raw_index++;
1181
1182
1183 index++;
1184 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1185 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1186 item->type != RTE_FLOW_ITEM_TYPE_END) {
1187 rte_flow_error_set(error, EINVAL,
1188 RTE_FLOW_ERROR_TYPE_ITEM,
1189 item, "Not supported by flex filter");
1190 return -rte_errno;
1191 }
1192
1193
1194 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1195
1196 goto item_loop;
1197 }
1198
1199 filter->len = RTE_ALIGN(max_offset, 8);
1200
1201
1202 index = 0;
1203
1204
1205 NEXT_ITEM_OF_ACTION(act, actions, index);
1206 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1207 memset(filter, 0, sizeof(struct igb_flex_filter));
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ACTION,
1210 act, "Not supported action.");
1211 return -rte_errno;
1212 }
1213
1214 act_q = (const struct rte_flow_action_queue *)act->conf;
1215 filter->queue = act_q->index;
1216
1217
1218 index++;
1219 NEXT_ITEM_OF_ACTION(act, actions, index);
1220 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1221 memset(filter, 0, sizeof(struct igb_flex_filter));
1222 rte_flow_error_set(error, EINVAL,
1223 RTE_FLOW_ERROR_TYPE_ACTION,
1224 act, "Not supported action.");
1225 return -rte_errno;
1226 }
1227
1228
1229
1230 if (!attr->ingress) {
1231 memset(filter, 0, sizeof(struct igb_flex_filter));
1232 rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1234 attr, "Only support ingress.");
1235 return -rte_errno;
1236 }
1237
1238
1239 if (attr->egress) {
1240 memset(filter, 0, sizeof(struct igb_flex_filter));
1241 rte_flow_error_set(error, EINVAL,
1242 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1243 attr, "Not support egress.");
1244 return -rte_errno;
1245 }
1246
1247
1248 if (attr->transfer) {
1249 memset(filter, 0, sizeof(struct igb_flex_filter));
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1252 attr, "No support for transfer.");
1253 return -rte_errno;
1254 }
1255
1256 if (attr->priority > 0xFFFF) {
1257 memset(filter, 0, sizeof(struct igb_flex_filter));
1258 rte_flow_error_set(error, EINVAL,
1259 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1260 attr, "Error priority.");
1261 return -rte_errno;
1262 }
1263
1264 filter->priority = (uint16_t)attr->priority;
1265
1266 return 0;
1267}
1268
1269static int
1270igb_parse_flex_filter(struct rte_eth_dev *dev,
1271 const struct rte_flow_attr *attr,
1272 const struct rte_flow_item pattern[],
1273 const struct rte_flow_action actions[],
1274 struct igb_flex_filter *filter,
1275 struct rte_flow_error *error)
1276{
1277 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1278 int ret;
1279
1280 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1281
1282 ret = cons_parse_flex_filter(attr, pattern,
1283 actions, filter, error);
1284
1285 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1286 memset(filter, 0, sizeof(struct igb_flex_filter));
1287 rte_flow_error_set(error, EINVAL,
1288 RTE_FLOW_ERROR_TYPE_ITEM,
1289 NULL, "queue number not supported by flex filter");
1290 return -rte_errno;
1291 }
1292
1293 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1294 filter->len % sizeof(uint64_t) != 0) {
1295 PMD_DRV_LOG(ERR, "filter's length is out of range");
1296 return -EINVAL;
1297 }
1298
1299 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1300 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1301 return -EINVAL;
1302 }
1303
1304 if (ret)
1305 return ret;
1306
1307 return 0;
1308}
1309
1310static int
1311igb_parse_rss_filter(struct rte_eth_dev *dev,
1312 const struct rte_flow_attr *attr,
1313 const struct rte_flow_action actions[],
1314 struct igb_rte_flow_rss_conf *rss_conf,
1315 struct rte_flow_error *error)
1316{
1317 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1318 const struct rte_flow_action *act;
1319 const struct rte_flow_action_rss *rss;
1320 uint16_t n, index;
1321
1322
1323
1324
1325
1326 index = 0;
1327 NEXT_ITEM_OF_ACTION(act, actions, index);
1328 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1329 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ACTION,
1332 act, "Not supported action.");
1333 return -rte_errno;
1334 }
1335
1336 rss = (const struct rte_flow_action_rss *)act->conf;
1337
1338 if (!rss || !rss->queue_num) {
1339 rte_flow_error_set(error, EINVAL,
1340 RTE_FLOW_ERROR_TYPE_ACTION,
1341 act,
1342 "no valid queues");
1343 return -rte_errno;
1344 }
1345
1346 for (n = 0; n < rss->queue_num; n++) {
1347 if (rss->queue[n] >= dev->data->nb_rx_queues) {
1348 rte_flow_error_set(error, EINVAL,
1349 RTE_FLOW_ERROR_TYPE_ACTION,
1350 act,
1351 "queue id > max number of queues");
1352 return -rte_errno;
1353 }
1354 }
1355
1356 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
1357 return rte_flow_error_set
1358 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1359 "non-default RSS hash functions are not supported");
1360 if (rss->level)
1361 return rte_flow_error_set
1362 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1363 "a nonzero RSS encapsulation level is not supported");
1364 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1365 return rte_flow_error_set
1366 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1367 "RSS hash key must be exactly 40 bytes");
1368 if (((hw->mac.type == e1000_82576) &&
1369 (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
1370 ((hw->mac.type != e1000_82576) &&
1371 (rss->queue_num > IGB_MAX_RX_QUEUE_NUM)))
1372 return rte_flow_error_set
1373 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1374 "too many queues for RSS context");
1375 if (igb_rss_conf_init(dev, rss_conf, rss))
1376 return rte_flow_error_set
1377 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
1378 "RSS context initialization failure");
1379
1380
1381 index++;
1382 NEXT_ITEM_OF_ACTION(act, actions, index);
1383 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1384 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1385 rte_flow_error_set(error, EINVAL,
1386 RTE_FLOW_ERROR_TYPE_ACTION,
1387 act, "Not supported action.");
1388 return -rte_errno;
1389 }
1390
1391
1392
1393 if (!attr->ingress) {
1394 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1395 rte_flow_error_set(error, EINVAL,
1396 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1397 attr, "Only support ingress.");
1398 return -rte_errno;
1399 }
1400
1401
1402 if (attr->egress) {
1403 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1404 rte_flow_error_set(error, EINVAL,
1405 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1406 attr, "Not support egress.");
1407 return -rte_errno;
1408 }
1409
1410
1411 if (attr->transfer) {
1412 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1415 attr, "No support for transfer.");
1416 return -rte_errno;
1417 }
1418
1419 if (attr->priority > 0xFFFF) {
1420 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1421 rte_flow_error_set(error, EINVAL,
1422 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1423 attr, "Error priority.");
1424 return -rte_errno;
1425 }
1426
1427 return 0;
1428}
1429
1430
1431
1432
1433
1434
1435
1436static struct rte_flow *
1437igb_flow_create(struct rte_eth_dev *dev,
1438 const struct rte_flow_attr *attr,
1439 const struct rte_flow_item pattern[],
1440 const struct rte_flow_action actions[],
1441 struct rte_flow_error *error)
1442{
1443 int ret;
1444 struct rte_eth_ntuple_filter ntuple_filter;
1445 struct rte_eth_ethertype_filter ethertype_filter;
1446 struct rte_eth_syn_filter syn_filter;
1447 struct igb_flex_filter flex_filter;
1448 struct igb_rte_flow_rss_conf rss_conf;
1449 struct rte_flow *flow = NULL;
1450 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1451 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1452 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1453 struct igb_flex_filter_ele *flex_filter_ptr;
1454 struct igb_rss_conf_ele *rss_filter_ptr;
1455 struct igb_flow_mem *igb_flow_mem_ptr;
1456
1457 flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1458 if (!flow) {
1459 PMD_DRV_LOG(ERR, "failed to allocate memory");
1460 return (struct rte_flow *)flow;
1461 }
1462 igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1463 sizeof(struct igb_flow_mem), 0);
1464 if (!igb_flow_mem_ptr) {
1465 PMD_DRV_LOG(ERR, "failed to allocate memory");
1466 rte_free(flow);
1467 return NULL;
1468 }
1469 igb_flow_mem_ptr->flow = flow;
1470 igb_flow_mem_ptr->dev = dev;
1471 TAILQ_INSERT_TAIL(&igb_flow_list,
1472 igb_flow_mem_ptr, entries);
1473
1474 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1475 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1476 actions, &ntuple_filter, error);
1477 if (!ret) {
1478 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1479 if (!ret) {
1480 ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1481 sizeof(struct igb_ntuple_filter_ele), 0);
1482 if (!ntuple_filter_ptr) {
1483 PMD_DRV_LOG(ERR, "failed to allocate memory");
1484 goto out;
1485 }
1486
1487 rte_memcpy(&ntuple_filter_ptr->filter_info,
1488 &ntuple_filter,
1489 sizeof(struct rte_eth_ntuple_filter));
1490 TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1491 ntuple_filter_ptr, entries);
1492 flow->rule = ntuple_filter_ptr;
1493 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1494 return flow;
1495 }
1496 goto out;
1497 }
1498
1499 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1500 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1501 actions, ðertype_filter, error);
1502 if (!ret) {
1503 ret = igb_add_del_ethertype_filter(dev,
1504 ðertype_filter, TRUE);
1505 if (!ret) {
1506 ethertype_filter_ptr = rte_zmalloc(
1507 "igb_ethertype_filter",
1508 sizeof(struct igb_ethertype_filter_ele), 0);
1509 if (!ethertype_filter_ptr) {
1510 PMD_DRV_LOG(ERR, "failed to allocate memory");
1511 goto out;
1512 }
1513
1514 rte_memcpy(ðertype_filter_ptr->filter_info,
1515 ðertype_filter,
1516 sizeof(struct rte_eth_ethertype_filter));
1517 TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1518 ethertype_filter_ptr, entries);
1519 flow->rule = ethertype_filter_ptr;
1520 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1521 return flow;
1522 }
1523 goto out;
1524 }
1525
1526 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1527 ret = igb_parse_syn_filter(dev, attr, pattern,
1528 actions, &syn_filter, error);
1529 if (!ret) {
1530 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1531 if (!ret) {
1532 syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1533 sizeof(struct igb_eth_syn_filter_ele), 0);
1534 if (!syn_filter_ptr) {
1535 PMD_DRV_LOG(ERR, "failed to allocate memory");
1536 goto out;
1537 }
1538
1539 rte_memcpy(&syn_filter_ptr->filter_info,
1540 &syn_filter,
1541 sizeof(struct rte_eth_syn_filter));
1542 TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1543 syn_filter_ptr,
1544 entries);
1545 flow->rule = syn_filter_ptr;
1546 flow->filter_type = RTE_ETH_FILTER_SYN;
1547 return flow;
1548 }
1549 goto out;
1550 }
1551
1552 memset(&flex_filter, 0, sizeof(struct igb_flex_filter));
1553 ret = igb_parse_flex_filter(dev, attr, pattern,
1554 actions, &flex_filter, error);
1555 if (!ret) {
1556 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1557 if (!ret) {
1558 flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1559 sizeof(struct igb_flex_filter_ele), 0);
1560 if (!flex_filter_ptr) {
1561 PMD_DRV_LOG(ERR, "failed to allocate memory");
1562 goto out;
1563 }
1564
1565 rte_memcpy(&flex_filter_ptr->filter_info,
1566 &flex_filter,
1567 sizeof(struct igb_flex_filter));
1568 TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1569 flex_filter_ptr, entries);
1570 flow->rule = flex_filter_ptr;
1571 flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1572 return flow;
1573 }
1574 }
1575
1576 memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1577 ret = igb_parse_rss_filter(dev, attr,
1578 actions, &rss_conf, error);
1579 if (!ret) {
1580 ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
1581 if (!ret) {
1582 rss_filter_ptr = rte_zmalloc("igb_rss_filter",
1583 sizeof(struct igb_rss_conf_ele), 0);
1584 if (!rss_filter_ptr) {
1585 PMD_DRV_LOG(ERR, "failed to allocate memory");
1586 goto out;
1587 }
1588 igb_rss_conf_init(dev, &rss_filter_ptr->filter_info,
1589 &rss_conf.conf);
1590 TAILQ_INSERT_TAIL(&igb_filter_rss_list,
1591 rss_filter_ptr, entries);
1592 flow->rule = rss_filter_ptr;
1593 flow->filter_type = RTE_ETH_FILTER_HASH;
1594 return flow;
1595 }
1596 }
1597
1598out:
1599 TAILQ_REMOVE(&igb_flow_list,
1600 igb_flow_mem_ptr, entries);
1601 rte_flow_error_set(error, -ret,
1602 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1603 "Failed to create flow.");
1604 rte_free(igb_flow_mem_ptr);
1605 rte_free(flow);
1606 return NULL;
1607}
1608
1609
1610
1611
1612
1613
1614static int
1615igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1616 const struct rte_flow_attr *attr,
1617 const struct rte_flow_item pattern[],
1618 const struct rte_flow_action actions[],
1619 struct rte_flow_error *error)
1620{
1621 struct rte_eth_ntuple_filter ntuple_filter;
1622 struct rte_eth_ethertype_filter ethertype_filter;
1623 struct rte_eth_syn_filter syn_filter;
1624 struct igb_flex_filter flex_filter;
1625 struct igb_rte_flow_rss_conf rss_conf;
1626 int ret;
1627
1628 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1629 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1630 actions, &ntuple_filter, error);
1631 if (!ret)
1632 return 0;
1633
1634 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1635 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1636 actions, ðertype_filter, error);
1637 if (!ret)
1638 return 0;
1639
1640 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1641 ret = igb_parse_syn_filter(dev, attr, pattern,
1642 actions, &syn_filter, error);
1643 if (!ret)
1644 return 0;
1645
1646 memset(&flex_filter, 0, sizeof(struct igb_flex_filter));
1647 ret = igb_parse_flex_filter(dev, attr, pattern,
1648 actions, &flex_filter, error);
1649 if (!ret)
1650 return 0;
1651
1652 memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1653 ret = igb_parse_rss_filter(dev, attr,
1654 actions, &rss_conf, error);
1655
1656 return ret;
1657}
1658
1659
1660static int
1661igb_flow_destroy(struct rte_eth_dev *dev,
1662 struct rte_flow *flow,
1663 struct rte_flow_error *error)
1664{
1665 int ret;
1666 struct rte_flow *pmd_flow = flow;
1667 enum rte_filter_type filter_type = pmd_flow->filter_type;
1668 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1669 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1670 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1671 struct igb_flex_filter_ele *flex_filter_ptr;
1672 struct igb_flow_mem *igb_flow_mem_ptr;
1673 struct igb_rss_conf_ele *rss_filter_ptr;
1674
1675 switch (filter_type) {
1676 case RTE_ETH_FILTER_NTUPLE:
1677 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1678 pmd_flow->rule;
1679 ret = igb_add_del_ntuple_filter(dev,
1680 &ntuple_filter_ptr->filter_info, FALSE);
1681 if (!ret) {
1682 TAILQ_REMOVE(&igb_filter_ntuple_list,
1683 ntuple_filter_ptr, entries);
1684 rte_free(ntuple_filter_ptr);
1685 }
1686 break;
1687 case RTE_ETH_FILTER_ETHERTYPE:
1688 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1689 pmd_flow->rule;
1690 ret = igb_add_del_ethertype_filter(dev,
1691 ðertype_filter_ptr->filter_info, FALSE);
1692 if (!ret) {
1693 TAILQ_REMOVE(&igb_filter_ethertype_list,
1694 ethertype_filter_ptr, entries);
1695 rte_free(ethertype_filter_ptr);
1696 }
1697 break;
1698 case RTE_ETH_FILTER_SYN:
1699 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1700 pmd_flow->rule;
1701 ret = eth_igb_syn_filter_set(dev,
1702 &syn_filter_ptr->filter_info, FALSE);
1703 if (!ret) {
1704 TAILQ_REMOVE(&igb_filter_syn_list,
1705 syn_filter_ptr, entries);
1706 rte_free(syn_filter_ptr);
1707 }
1708 break;
1709 case RTE_ETH_FILTER_FLEXIBLE:
1710 flex_filter_ptr = (struct igb_flex_filter_ele *)
1711 pmd_flow->rule;
1712 ret = eth_igb_add_del_flex_filter(dev,
1713 &flex_filter_ptr->filter_info, FALSE);
1714 if (!ret) {
1715 TAILQ_REMOVE(&igb_filter_flex_list,
1716 flex_filter_ptr, entries);
1717 rte_free(flex_filter_ptr);
1718 }
1719 break;
1720 case RTE_ETH_FILTER_HASH:
1721 rss_filter_ptr = (struct igb_rss_conf_ele *)
1722 pmd_flow->rule;
1723 ret = igb_config_rss_filter(dev,
1724 &rss_filter_ptr->filter_info, FALSE);
1725 if (!ret) {
1726 TAILQ_REMOVE(&igb_filter_rss_list,
1727 rss_filter_ptr, entries);
1728 rte_free(rss_filter_ptr);
1729 }
1730 break;
1731 default:
1732 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1733 filter_type);
1734 ret = -EINVAL;
1735 break;
1736 }
1737
1738 if (ret) {
1739 rte_flow_error_set(error, EINVAL,
1740 RTE_FLOW_ERROR_TYPE_HANDLE,
1741 NULL, "Failed to destroy flow");
1742 return ret;
1743 }
1744
1745 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1746 if (igb_flow_mem_ptr->flow == pmd_flow) {
1747 TAILQ_REMOVE(&igb_flow_list,
1748 igb_flow_mem_ptr, entries);
1749 rte_free(igb_flow_mem_ptr);
1750 }
1751 }
1752 rte_free(flow);
1753
1754 return ret;
1755}
1756
1757
1758static void
1759igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1760{
1761 struct e1000_filter_info *filter_info =
1762 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1763 struct e1000_5tuple_filter *p_5tuple;
1764 struct e1000_2tuple_filter *p_2tuple;
1765
1766 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1767 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1768
1769 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1770 igb_delete_2tuple_filter(dev, p_2tuple);
1771}
1772
1773
1774static void
1775igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1776{
1777 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1778 struct e1000_filter_info *filter_info =
1779 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1780 int i;
1781
1782 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1783 if (filter_info->ethertype_mask & (1 << i)) {
1784 (void)igb_ethertype_filter_remove(filter_info,
1785 (uint8_t)i);
1786 E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1787 E1000_WRITE_FLUSH(hw);
1788 }
1789 }
1790}
1791
1792
1793static void
1794igb_clear_syn_filter(struct rte_eth_dev *dev)
1795{
1796 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1797 struct e1000_filter_info *filter_info =
1798 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1799
1800 if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1801 filter_info->syn_info = 0;
1802 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1803 E1000_WRITE_FLUSH(hw);
1804 }
1805}
1806
1807
1808static void
1809igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1810{
1811 struct e1000_filter_info *filter_info =
1812 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1813 struct e1000_flex_filter *flex_filter;
1814
1815 while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1816 igb_remove_flex_filter(dev, flex_filter);
1817}
1818
1819
1820static void
1821igb_clear_rss_filter(struct rte_eth_dev *dev)
1822{
1823 struct e1000_filter_info *filter =
1824 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1825
1826 if (filter->rss_info.conf.queue_num)
1827 igb_config_rss_filter(dev, &filter->rss_info, FALSE);
1828}
1829
1830void
1831igb_filterlist_flush(struct rte_eth_dev *dev)
1832{
1833 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1834 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1835 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1836 struct igb_flex_filter_ele *flex_filter_ptr;
1837 struct igb_rss_conf_ele *rss_filter_ptr;
1838 struct igb_flow_mem *igb_flow_mem_ptr;
1839 enum rte_filter_type filter_type;
1840 struct rte_flow *pmd_flow;
1841
1842 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1843 if (igb_flow_mem_ptr->dev == dev) {
1844 pmd_flow = igb_flow_mem_ptr->flow;
1845 filter_type = pmd_flow->filter_type;
1846
1847 switch (filter_type) {
1848 case RTE_ETH_FILTER_NTUPLE:
1849 ntuple_filter_ptr =
1850 (struct igb_ntuple_filter_ele *)
1851 pmd_flow->rule;
1852 TAILQ_REMOVE(&igb_filter_ntuple_list,
1853 ntuple_filter_ptr, entries);
1854 rte_free(ntuple_filter_ptr);
1855 break;
1856 case RTE_ETH_FILTER_ETHERTYPE:
1857 ethertype_filter_ptr =
1858 (struct igb_ethertype_filter_ele *)
1859 pmd_flow->rule;
1860 TAILQ_REMOVE(&igb_filter_ethertype_list,
1861 ethertype_filter_ptr, entries);
1862 rte_free(ethertype_filter_ptr);
1863 break;
1864 case RTE_ETH_FILTER_SYN:
1865 syn_filter_ptr =
1866 (struct igb_eth_syn_filter_ele *)
1867 pmd_flow->rule;
1868 TAILQ_REMOVE(&igb_filter_syn_list,
1869 syn_filter_ptr, entries);
1870 rte_free(syn_filter_ptr);
1871 break;
1872 case RTE_ETH_FILTER_FLEXIBLE:
1873 flex_filter_ptr =
1874 (struct igb_flex_filter_ele *)
1875 pmd_flow->rule;
1876 TAILQ_REMOVE(&igb_filter_flex_list,
1877 flex_filter_ptr, entries);
1878 rte_free(flex_filter_ptr);
1879 break;
1880 case RTE_ETH_FILTER_HASH:
1881 rss_filter_ptr =
1882 (struct igb_rss_conf_ele *)
1883 pmd_flow->rule;
1884 TAILQ_REMOVE(&igb_filter_rss_list,
1885 rss_filter_ptr, entries);
1886 rte_free(rss_filter_ptr);
1887 break;
1888 default:
1889 PMD_DRV_LOG(WARNING, "Filter type"
1890 "(%d) not supported", filter_type);
1891 break;
1892 }
1893 TAILQ_REMOVE(&igb_flow_list,
1894 igb_flow_mem_ptr,
1895 entries);
1896 rte_free(igb_flow_mem_ptr->flow);
1897 rte_free(igb_flow_mem_ptr);
1898 }
1899 }
1900}
1901
1902
1903static int
1904igb_flow_flush(struct rte_eth_dev *dev,
1905 __rte_unused struct rte_flow_error *error)
1906{
1907 igb_clear_all_ntuple_filter(dev);
1908 igb_clear_all_ethertype_filter(dev);
1909 igb_clear_syn_filter(dev);
1910 igb_clear_all_flex_filter(dev);
1911 igb_clear_rss_filter(dev);
1912 igb_filterlist_flush(dev);
1913
1914 return 0;
1915}
1916
1917const struct rte_flow_ops igb_flow_ops = {
1918 .validate = igb_flow_validate,
1919 .create = igb_flow_create,
1920 .destroy = igb_flow_destroy,
1921 .flush = igb_flow_flush,
1922};
1923