1
2
3
4
5
6
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <net/udp_tunnel.h>
10#include <linux/bitops.h>
11#include <linux/vmalloc.h>
12
13#include <linux/qed/qed_if.h>
14#include "qede.h"
15
16#define QEDE_FILTER_PRINT_MAX_LEN (64)
17struct qede_arfs_tuple {
18 union {
19 __be32 src_ipv4;
20 struct in6_addr src_ipv6;
21 };
22 union {
23 __be32 dst_ipv4;
24 struct in6_addr dst_ipv6;
25 };
26 __be16 src_port;
27 __be16 dst_port;
28 __be16 eth_proto;
29 u8 ip_proto;
30
31
32 enum qed_filter_config_mode mode;
33
34
35 bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
36
37
38 void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
39
40
41 void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
42};
43
44struct qede_arfs_fltr_node {
45#define QEDE_FLTR_VALID 0
46 unsigned long state;
47
48
49 void *data;
50
51
52 dma_addr_t mapping;
53
54
55 int buf_len;
56
57
58 struct qede_arfs_tuple tuple;
59
60 u32 flow_id;
61 u64 sw_id;
62 u16 rxq_id;
63 u16 next_rxq_id;
64 u8 vfid;
65 bool filter_op;
66 bool used;
67 u8 fw_rc;
68 bool b_is_drop;
69 struct hlist_node node;
70};
71
72struct qede_arfs {
73#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
74#define QEDE_ARFS_POLL_COUNT 100
75#define QEDE_RFS_FLW_BITSHIFT (4)
76#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
77 struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
78
79
80 spinlock_t arfs_list_lock;
81 unsigned long *arfs_fltr_bmap;
82 int filter_count;
83
84
85 enum qed_filter_config_mode mode;
86};
87
88static void qede_configure_arfs_fltr(struct qede_dev *edev,
89 struct qede_arfs_fltr_node *n,
90 u16 rxq_id, bool add_fltr)
91{
92 const struct qed_eth_ops *op = edev->ops;
93 struct qed_ntuple_filter_params params;
94
95 if (n->used)
96 return;
97
98 memset(¶ms, 0, sizeof(params));
99
100 params.addr = n->mapping;
101 params.length = n->buf_len;
102 params.qid = rxq_id;
103 params.b_is_add = add_fltr;
104 params.b_is_drop = n->b_is_drop;
105
106 if (n->vfid) {
107 params.b_is_vf = true;
108 params.vf_id = n->vfid - 1;
109 }
110
111 if (n->tuple.stringify) {
112 char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
113
114 n->tuple.stringify(&n->tuple, tuple_buffer);
115 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
116 "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
117 add_fltr ? "Adding" : "Deleting",
118 n->sw_id, tuple_buffer, n->vfid, rxq_id);
119 }
120
121 n->used = true;
122 n->filter_op = add_fltr;
123 op->ntuple_filter_config(edev->cdev, n, ¶ms);
124}
125
126static void
127qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
128{
129 kfree(fltr->data);
130
131 if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
132 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
133
134 kfree(fltr);
135}
136
137static int
138qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
139 struct qede_arfs_fltr_node *fltr,
140 u16 bucket_idx)
141{
142 fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
143 fltr->buf_len, DMA_TO_DEVICE);
144 if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
145 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
146 qede_free_arfs_filter(edev, fltr);
147 return -ENOMEM;
148 }
149
150 INIT_HLIST_NODE(&fltr->node);
151 hlist_add_head(&fltr->node,
152 QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
153
154 edev->arfs->filter_count++;
155 if (edev->arfs->filter_count == 1 &&
156 edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
157 edev->ops->configure_arfs_searcher(edev->cdev,
158 fltr->tuple.mode);
159 edev->arfs->mode = fltr->tuple.mode;
160 }
161
162 return 0;
163}
164
165static void
166qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
167 struct qede_arfs_fltr_node *fltr)
168{
169 hlist_del(&fltr->node);
170 dma_unmap_single(&edev->pdev->dev, fltr->mapping,
171 fltr->buf_len, DMA_TO_DEVICE);
172
173 qede_free_arfs_filter(edev, fltr);
174
175 edev->arfs->filter_count--;
176 if (!edev->arfs->filter_count &&
177 edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
178 enum qed_filter_config_mode mode;
179
180 mode = QED_FILTER_CONFIG_MODE_DISABLE;
181 edev->ops->configure_arfs_searcher(edev->cdev, mode);
182 edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
183 }
184}
185
186void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
187{
188 struct qede_arfs_fltr_node *fltr = filter;
189 struct qede_dev *edev = dev;
190
191 fltr->fw_rc = fw_rc;
192
193 if (fw_rc) {
194 DP_NOTICE(edev,
195 "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
196 fw_rc, fltr->flow_id, fltr->sw_id,
197 ntohs(fltr->tuple.src_port),
198 ntohs(fltr->tuple.dst_port), fltr->rxq_id);
199
200 spin_lock_bh(&edev->arfs->arfs_list_lock);
201
202 fltr->used = false;
203 clear_bit(QEDE_FLTR_VALID, &fltr->state);
204
205 spin_unlock_bh(&edev->arfs->arfs_list_lock);
206 return;
207 }
208
209 spin_lock_bh(&edev->arfs->arfs_list_lock);
210
211 fltr->used = false;
212
213 if (fltr->filter_op) {
214 set_bit(QEDE_FLTR_VALID, &fltr->state);
215 if (fltr->rxq_id != fltr->next_rxq_id)
216 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
217 false);
218 } else {
219 clear_bit(QEDE_FLTR_VALID, &fltr->state);
220 if (fltr->rxq_id != fltr->next_rxq_id) {
221 fltr->rxq_id = fltr->next_rxq_id;
222 qede_configure_arfs_fltr(edev, fltr,
223 fltr->rxq_id, true);
224 }
225 }
226
227 spin_unlock_bh(&edev->arfs->arfs_list_lock);
228}
229
230
231void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
232{
233 int i;
234
235 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
236 struct hlist_node *temp;
237 struct hlist_head *head;
238 struct qede_arfs_fltr_node *fltr;
239
240 head = &edev->arfs->arfs_hl_head[i];
241
242 hlist_for_each_entry_safe(fltr, temp, head, node) {
243 bool del = false;
244
245 if (edev->state != QEDE_STATE_OPEN)
246 del = true;
247
248 spin_lock_bh(&edev->arfs->arfs_list_lock);
249
250 if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
251 !fltr->used) || free_fltr) {
252 qede_dequeue_fltr_and_config_searcher(edev,
253 fltr);
254 } else {
255 bool flow_exp = false;
256#ifdef CONFIG_RFS_ACCEL
257 flow_exp = rps_may_expire_flow(edev->ndev,
258 fltr->rxq_id,
259 fltr->flow_id,
260 fltr->sw_id);
261#endif
262 if ((flow_exp || del) && !free_fltr)
263 qede_configure_arfs_fltr(edev, fltr,
264 fltr->rxq_id,
265 false);
266 }
267
268 spin_unlock_bh(&edev->arfs->arfs_list_lock);
269 }
270 }
271
272#ifdef CONFIG_RFS_ACCEL
273 spin_lock_bh(&edev->arfs->arfs_list_lock);
274
275 if (edev->arfs->filter_count) {
276 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
277 schedule_delayed_work(&edev->sp_task,
278 QEDE_SP_TASK_POLL_DELAY);
279 }
280
281 spin_unlock_bh(&edev->arfs->arfs_list_lock);
282#endif
283}
284
285
286
287
288void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
289{
290 int count = QEDE_ARFS_POLL_COUNT;
291
292 while (count) {
293 qede_process_arfs_filters(edev, false);
294
295 if (!edev->arfs->filter_count)
296 break;
297
298 msleep(100);
299 count--;
300 }
301
302 if (!count) {
303 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
304
305
306 qede_process_arfs_filters(edev, true);
307 }
308}
309
310int qede_alloc_arfs(struct qede_dev *edev)
311{
312 int i;
313
314 if (!edev->dev_info.common.b_arfs_capable)
315 return -EINVAL;
316
317 edev->arfs = vzalloc(sizeof(*edev->arfs));
318 if (!edev->arfs)
319 return -ENOMEM;
320
321 spin_lock_init(&edev->arfs->arfs_list_lock);
322
323 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
324 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
325
326 edev->arfs->arfs_fltr_bmap =
327 vzalloc(array_size(sizeof(long),
328 BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
329 if (!edev->arfs->arfs_fltr_bmap) {
330 vfree(edev->arfs);
331 edev->arfs = NULL;
332 return -ENOMEM;
333 }
334
335#ifdef CONFIG_RFS_ACCEL
336 edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
337 if (!edev->ndev->rx_cpu_rmap) {
338 vfree(edev->arfs->arfs_fltr_bmap);
339 edev->arfs->arfs_fltr_bmap = NULL;
340 vfree(edev->arfs);
341 edev->arfs = NULL;
342 return -ENOMEM;
343 }
344#endif
345 return 0;
346}
347
348void qede_free_arfs(struct qede_dev *edev)
349{
350 if (!edev->arfs)
351 return;
352
353#ifdef CONFIG_RFS_ACCEL
354 if (edev->ndev->rx_cpu_rmap)
355 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
356
357 edev->ndev->rx_cpu_rmap = NULL;
358#endif
359 vfree(edev->arfs->arfs_fltr_bmap);
360 edev->arfs->arfs_fltr_bmap = NULL;
361 vfree(edev->arfs);
362 edev->arfs = NULL;
363}
364
365#ifdef CONFIG_RFS_ACCEL
366static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
367 const struct sk_buff *skb)
368{
369 if (skb->protocol == htons(ETH_P_IP)) {
370 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
371 tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
372 return true;
373 else
374 return false;
375 } else {
376 struct in6_addr *src = &tpos->tuple.src_ipv6;
377 u8 size = sizeof(struct in6_addr);
378
379 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
380 !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
381 return true;
382 else
383 return false;
384 }
385}
386
387static struct qede_arfs_fltr_node *
388qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
389 __be16 src_port, __be16 dst_port, u8 ip_proto)
390{
391 struct qede_arfs_fltr_node *tpos;
392
393 hlist_for_each_entry(tpos, h, node)
394 if (tpos->tuple.ip_proto == ip_proto &&
395 tpos->tuple.eth_proto == skb->protocol &&
396 qede_compare_ip_addr(tpos, skb) &&
397 tpos->tuple.src_port == src_port &&
398 tpos->tuple.dst_port == dst_port)
399 return tpos;
400
401 return NULL;
402}
403
404static struct qede_arfs_fltr_node *
405qede_alloc_filter(struct qede_dev *edev, int min_hlen)
406{
407 struct qede_arfs_fltr_node *n;
408 int bit_id;
409
410 bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
411 QEDE_RFS_MAX_FLTR);
412
413 if (bit_id >= QEDE_RFS_MAX_FLTR)
414 return NULL;
415
416 n = kzalloc(sizeof(*n), GFP_ATOMIC);
417 if (!n)
418 return NULL;
419
420 n->data = kzalloc(min_hlen, GFP_ATOMIC);
421 if (!n->data) {
422 kfree(n);
423 return NULL;
424 }
425
426 n->sw_id = (u16)bit_id;
427 set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
428 return n;
429}
430
431int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
432 u16 rxq_index, u32 flow_id)
433{
434 struct qede_dev *edev = netdev_priv(dev);
435 struct qede_arfs_fltr_node *n;
436 int min_hlen, rc, tp_offset;
437 struct ethhdr *eth;
438 __be16 *ports;
439 u16 tbl_idx;
440 u8 ip_proto;
441
442 if (skb->encapsulation)
443 return -EPROTONOSUPPORT;
444
445 if (skb->protocol != htons(ETH_P_IP) &&
446 skb->protocol != htons(ETH_P_IPV6))
447 return -EPROTONOSUPPORT;
448
449 if (skb->protocol == htons(ETH_P_IP)) {
450 ip_proto = ip_hdr(skb)->protocol;
451 tp_offset = sizeof(struct iphdr);
452 } else {
453 ip_proto = ipv6_hdr(skb)->nexthdr;
454 tp_offset = sizeof(struct ipv6hdr);
455 }
456
457 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
458 return -EPROTONOSUPPORT;
459
460 ports = (__be16 *)(skb->data + tp_offset);
461 tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
462
463 spin_lock_bh(&edev->arfs->arfs_list_lock);
464
465 n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
466 skb, ports[0], ports[1], ip_proto);
467 if (n) {
468
469 n->next_rxq_id = rxq_index;
470
471 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
472 if (n->rxq_id != rxq_index)
473 qede_configure_arfs_fltr(edev, n, n->rxq_id,
474 false);
475 } else {
476 if (!n->used) {
477 n->rxq_id = rxq_index;
478 qede_configure_arfs_fltr(edev, n, n->rxq_id,
479 true);
480 }
481 }
482
483 rc = n->sw_id;
484 goto ret_unlock;
485 }
486
487 min_hlen = ETH_HLEN + skb_headlen(skb);
488
489 n = qede_alloc_filter(edev, min_hlen);
490 if (!n) {
491 rc = -ENOMEM;
492 goto ret_unlock;
493 }
494
495 n->buf_len = min_hlen;
496 n->rxq_id = rxq_index;
497 n->next_rxq_id = rxq_index;
498 n->tuple.src_port = ports[0];
499 n->tuple.dst_port = ports[1];
500 n->flow_id = flow_id;
501
502 if (skb->protocol == htons(ETH_P_IP)) {
503 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
504 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
505 } else {
506 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
507 sizeof(struct in6_addr));
508 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
509 sizeof(struct in6_addr));
510 }
511
512 eth = (struct ethhdr *)n->data;
513 eth->h_proto = skb->protocol;
514 n->tuple.eth_proto = skb->protocol;
515 n->tuple.ip_proto = ip_proto;
516 n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
517 memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
518
519 rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
520 if (rc)
521 goto ret_unlock;
522
523 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
524
525 spin_unlock_bh(&edev->arfs->arfs_list_lock);
526
527 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
528 schedule_delayed_work(&edev->sp_task, 0);
529
530 return n->sw_id;
531
532ret_unlock:
533 spin_unlock_bh(&edev->arfs->arfs_list_lock);
534 return rc;
535}
536#endif
537
538void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
539{
540 struct qede_dev *edev = dev;
541
542 if (edev->vxlan_dst_port != vxlan_port)
543 edev->vxlan_dst_port = 0;
544
545 if (edev->geneve_dst_port != geneve_port)
546 edev->geneve_dst_port = 0;
547}
548
549void qede_force_mac(void *dev, u8 *mac, bool forced)
550{
551 struct qede_dev *edev = dev;
552
553 __qede_lock(edev);
554
555 if (!is_valid_ether_addr(mac)) {
556 __qede_unlock(edev);
557 return;
558 }
559
560 ether_addr_copy(edev->ndev->dev_addr, mac);
561 __qede_unlock(edev);
562}
563
564void qede_fill_rss_params(struct qede_dev *edev,
565 struct qed_update_vport_rss_params *rss, u8 *update)
566{
567 bool need_reset = false;
568 int i;
569
570 if (QEDE_RSS_COUNT(edev) <= 1) {
571 memset(rss, 0, sizeof(*rss));
572 *update = 0;
573 return;
574 }
575
576
577 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
578 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
579 need_reset = true;
580 break;
581 }
582 }
583
584 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
585 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
586 u16 indir_val, val;
587
588 val = QEDE_RSS_COUNT(edev);
589 indir_val = ethtool_rxfh_indir_default(i, val);
590 edev->rss_ind_table[i] = indir_val;
591 }
592 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
593 }
594
595
596 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
597 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
598
599 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
600 }
601
602 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
603 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
604 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
605 }
606 memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
607
608 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
609 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
610 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
611 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
612 }
613 rss->rss_caps = edev->rss_caps;
614
615 *update = 1;
616}
617
618static int qede_set_ucast_rx_mac(struct qede_dev *edev,
619 enum qed_filter_xcast_params_type opcode,
620 unsigned char mac[ETH_ALEN])
621{
622 struct qed_filter_params filter_cmd;
623
624 memset(&filter_cmd, 0, sizeof(filter_cmd));
625 filter_cmd.type = QED_FILTER_TYPE_UCAST;
626 filter_cmd.filter.ucast.type = opcode;
627 filter_cmd.filter.ucast.mac_valid = 1;
628 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
629
630 return edev->ops->filter_config(edev->cdev, &filter_cmd);
631}
632
633static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
634 enum qed_filter_xcast_params_type opcode,
635 u16 vid)
636{
637 struct qed_filter_params filter_cmd;
638
639 memset(&filter_cmd, 0, sizeof(filter_cmd));
640 filter_cmd.type = QED_FILTER_TYPE_UCAST;
641 filter_cmd.filter.ucast.type = opcode;
642 filter_cmd.filter.ucast.vlan_valid = 1;
643 filter_cmd.filter.ucast.vlan = vid;
644
645 return edev->ops->filter_config(edev->cdev, &filter_cmd);
646}
647
648static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
649{
650 struct qed_update_vport_params *params;
651 int rc;
652
653
654 if (edev->accept_any_vlan == action)
655 return 0;
656
657 params = vzalloc(sizeof(*params));
658 if (!params)
659 return -ENOMEM;
660
661 params->vport_id = 0;
662 params->accept_any_vlan = action;
663 params->update_accept_any_vlan_flg = 1;
664
665 rc = edev->ops->vport_update(edev->cdev, params);
666 if (rc) {
667 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
668 action ? "enable" : "disable");
669 } else {
670 DP_INFO(edev, "%s accept-any-vlan\n",
671 action ? "enabled" : "disabled");
672 edev->accept_any_vlan = action;
673 }
674
675 vfree(params);
676 return 0;
677}
678
679int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
680{
681 struct qede_dev *edev = netdev_priv(dev);
682 struct qede_vlan *vlan, *tmp;
683 int rc = 0;
684
685 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
686
687 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
688 if (!vlan) {
689 DP_INFO(edev, "Failed to allocate struct for vlan\n");
690 return -ENOMEM;
691 }
692 INIT_LIST_HEAD(&vlan->list);
693 vlan->vid = vid;
694 vlan->configured = false;
695
696
697 list_for_each_entry(tmp, &edev->vlan_list, list) {
698 if (tmp->vid == vlan->vid) {
699 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
700 "vlan already configured\n");
701 kfree(vlan);
702 return -EEXIST;
703 }
704 }
705
706
707 __qede_lock(edev);
708 if (edev->state != QEDE_STATE_OPEN) {
709 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
710 "Interface is down, VLAN %d will be configured when interface is up\n",
711 vid);
712 if (vid != 0)
713 edev->non_configured_vlans++;
714 list_add(&vlan->list, &edev->vlan_list);
715 goto out;
716 }
717
718
719
720
721
722 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
723 (vlan->vid == 0)) {
724 rc = qede_set_ucast_rx_vlan(edev,
725 QED_FILTER_XCAST_TYPE_ADD,
726 vlan->vid);
727 if (rc) {
728 DP_ERR(edev, "Failed to configure VLAN %d\n",
729 vlan->vid);
730 kfree(vlan);
731 goto out;
732 }
733 vlan->configured = true;
734
735
736 if (vlan->vid != 0)
737 edev->configured_vlans++;
738 } else {
739
740 if (!edev->non_configured_vlans) {
741 rc = qede_config_accept_any_vlan(edev, true);
742 if (rc) {
743 kfree(vlan);
744 goto out;
745 }
746 }
747
748 edev->non_configured_vlans++;
749 }
750
751 list_add(&vlan->list, &edev->vlan_list);
752
753out:
754 __qede_unlock(edev);
755 return rc;
756}
757
758static void qede_del_vlan_from_list(struct qede_dev *edev,
759 struct qede_vlan *vlan)
760{
761
762 if (vlan->vid != 0) {
763 if (vlan->configured)
764 edev->configured_vlans--;
765 else
766 edev->non_configured_vlans--;
767 }
768
769 list_del(&vlan->list);
770 kfree(vlan);
771}
772
773int qede_configure_vlan_filters(struct qede_dev *edev)
774{
775 int rc = 0, real_rc = 0, accept_any_vlan = 0;
776 struct qed_dev_eth_info *dev_info;
777 struct qede_vlan *vlan = NULL;
778
779 if (list_empty(&edev->vlan_list))
780 return 0;
781
782 dev_info = &edev->dev_info;
783
784
785 list_for_each_entry(vlan, &edev->vlan_list, list) {
786 if (vlan->configured)
787 continue;
788
789
790 if ((vlan->vid != 0) &&
791 (edev->configured_vlans == dev_info->num_vlan_filters)) {
792 accept_any_vlan = 1;
793 continue;
794 }
795
796 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
797
798 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
799 vlan->vid);
800 if (rc) {
801 DP_ERR(edev, "Failed to configure VLAN %u\n",
802 vlan->vid);
803 real_rc = rc;
804 continue;
805 }
806
807 vlan->configured = true;
808
809 if (vlan->vid != 0) {
810 edev->non_configured_vlans--;
811 edev->configured_vlans++;
812 }
813 }
814
815
816
817
818
819
820 if (accept_any_vlan)
821 rc = qede_config_accept_any_vlan(edev, true);
822 else if (!edev->non_configured_vlans)
823 rc = qede_config_accept_any_vlan(edev, false);
824
825 if (rc && !real_rc)
826 real_rc = rc;
827
828 return real_rc;
829}
830
831int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
832{
833 struct qede_dev *edev = netdev_priv(dev);
834 struct qede_vlan *vlan;
835 int rc = 0;
836
837 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
838
839
840 __qede_lock(edev);
841 list_for_each_entry(vlan, &edev->vlan_list, list)
842 if (vlan->vid == vid)
843 break;
844
845 if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
846 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
847 "Vlan isn't configured\n");
848 goto out;
849 }
850
851 if (edev->state != QEDE_STATE_OPEN) {
852
853
854
855 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
856 "Interface is down, removing VLAN from list only\n");
857 qede_del_vlan_from_list(edev, vlan);
858 goto out;
859 }
860
861
862 if (vlan->configured) {
863 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
864 vid);
865 if (rc) {
866 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
867 goto out;
868 }
869 }
870
871 qede_del_vlan_from_list(edev, vlan);
872
873
874
875
876 rc = qede_configure_vlan_filters(edev);
877
878out:
879 __qede_unlock(edev);
880 return rc;
881}
882
883void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
884{
885 struct qede_vlan *vlan = NULL;
886
887 if (list_empty(&edev->vlan_list))
888 return;
889
890 list_for_each_entry(vlan, &edev->vlan_list, list) {
891 if (!vlan->configured)
892 continue;
893
894 vlan->configured = false;
895
896
897 if (vlan->vid != 0) {
898 edev->non_configured_vlans++;
899 edev->configured_vlans--;
900 }
901
902 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
903 "marked vlan %d as non-configured\n", vlan->vid);
904 }
905
906 edev->accept_any_vlan = false;
907}
908
909static void qede_set_features_reload(struct qede_dev *edev,
910 struct qede_reload_args *args)
911{
912 edev->ndev->features = args->u.features;
913}
914
915netdev_features_t qede_fix_features(struct net_device *dev,
916 netdev_features_t features)
917{
918 struct qede_dev *edev = netdev_priv(dev);
919
920 if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
921 !(features & NETIF_F_GRO))
922 features &= ~NETIF_F_GRO_HW;
923
924 return features;
925}
926
927int qede_set_features(struct net_device *dev, netdev_features_t features)
928{
929 struct qede_dev *edev = netdev_priv(dev);
930 netdev_features_t changes = features ^ dev->features;
931 bool need_reload = false;
932
933 if (changes & NETIF_F_GRO_HW)
934 need_reload = true;
935
936 if (need_reload) {
937 struct qede_reload_args args;
938
939 args.u.features = features;
940 args.func = &qede_set_features_reload;
941
942
943
944
945
946 __qede_lock(edev);
947 if (edev->xdp_prog)
948 args.func(edev, &args);
949 else
950 qede_reload(edev, &args, true);
951 __qede_unlock(edev);
952
953 return 1;
954 }
955
956 return 0;
957}
958
959static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
960{
961 struct qede_dev *edev = netdev_priv(dev);
962 struct qed_tunn_params tunn_params;
963 struct udp_tunnel_info ti;
964 u16 *save_port;
965 int rc;
966
967 memset(&tunn_params, 0, sizeof(tunn_params));
968
969 udp_tunnel_nic_get_port(dev, table, 0, &ti);
970 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
971 tunn_params.update_vxlan_port = 1;
972 tunn_params.vxlan_port = ntohs(ti.port);
973 save_port = &edev->vxlan_dst_port;
974 } else {
975 tunn_params.update_geneve_port = 1;
976 tunn_params.geneve_port = ntohs(ti.port);
977 save_port = &edev->geneve_dst_port;
978 }
979
980 __qede_lock(edev);
981 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
982 __qede_unlock(edev);
983 if (rc)
984 return rc;
985
986 *save_port = ntohs(ti.port);
987 return 0;
988}
989
990static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
991 .sync_table = qede_udp_tunnel_sync,
992 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
993 .tables = {
994 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
995 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
996 },
997}, qede_udp_tunnels_vxlan = {
998 .sync_table = qede_udp_tunnel_sync,
999 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
1000 .tables = {
1001 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1002 },
1003}, qede_udp_tunnels_geneve = {
1004 .sync_table = qede_udp_tunnel_sync,
1005 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
1006 .tables = {
1007 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
1008 },
1009};
1010
1011void qede_set_udp_tunnels(struct qede_dev *edev)
1012{
1013 if (edev->dev_info.common.vxlan_enable &&
1014 edev->dev_info.common.geneve_enable)
1015 edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both;
1016 else if (edev->dev_info.common.vxlan_enable)
1017 edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan;
1018 else if (edev->dev_info.common.geneve_enable)
1019 edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve;
1020}
1021
1022static void qede_xdp_reload_func(struct qede_dev *edev,
1023 struct qede_reload_args *args)
1024{
1025 struct bpf_prog *old;
1026
1027 old = xchg(&edev->xdp_prog, args->u.new_prog);
1028 if (old)
1029 bpf_prog_put(old);
1030}
1031
1032static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1033{
1034 struct qede_reload_args args;
1035
1036
1037 args.func = &qede_xdp_reload_func;
1038 args.u.new_prog = prog;
1039 qede_reload(edev, &args, false);
1040
1041 return 0;
1042}
1043
1044int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1045{
1046 struct qede_dev *edev = netdev_priv(dev);
1047
1048 switch (xdp->command) {
1049 case XDP_SETUP_PROG:
1050 return qede_xdp_set(edev, xdp->prog);
1051 default:
1052 return -EINVAL;
1053 }
1054}
1055
1056static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1057 enum qed_filter_xcast_params_type opcode,
1058 unsigned char *mac, int num_macs)
1059{
1060 struct qed_filter_params filter_cmd;
1061 int i;
1062
1063 memset(&filter_cmd, 0, sizeof(filter_cmd));
1064 filter_cmd.type = QED_FILTER_TYPE_MCAST;
1065 filter_cmd.filter.mcast.type = opcode;
1066 filter_cmd.filter.mcast.num = num_macs;
1067
1068 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1069 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1070
1071 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1072}
1073
1074int qede_set_mac_addr(struct net_device *ndev, void *p)
1075{
1076 struct qede_dev *edev = netdev_priv(ndev);
1077 struct sockaddr *addr = p;
1078 int rc = 0;
1079
1080
1081
1082
1083
1084 __qede_lock(edev);
1085
1086 if (!is_valid_ether_addr(addr->sa_data)) {
1087 DP_NOTICE(edev, "The MAC address is not valid\n");
1088 rc = -EFAULT;
1089 goto out;
1090 }
1091
1092 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1093 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1094 addr->sa_data);
1095 rc = -EINVAL;
1096 goto out;
1097 }
1098
1099 if (edev->state == QEDE_STATE_OPEN) {
1100
1101 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1102 ndev->dev_addr);
1103 if (rc)
1104 goto out;
1105 }
1106
1107 ether_addr_copy(ndev->dev_addr, addr->sa_data);
1108 DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1109
1110 if (edev->state != QEDE_STATE_OPEN) {
1111 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1112 "The device is currently down\n");
1113
1114 if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1115 edev->ops->req_bulletin_update_mac(edev->cdev,
1116 ndev->dev_addr);
1117 goto out;
1118 }
1119
1120 edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1121
1122 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1123 ndev->dev_addr);
1124out:
1125 __qede_unlock(edev);
1126 return rc;
1127}
1128
1129static int
1130qede_configure_mcast_filtering(struct net_device *ndev,
1131 enum qed_filter_rx_mode_type *accept_flags)
1132{
1133 struct qede_dev *edev = netdev_priv(ndev);
1134 unsigned char *mc_macs, *temp;
1135 struct netdev_hw_addr *ha;
1136 int rc = 0, mc_count;
1137 size_t size;
1138
1139 size = 64 * ETH_ALEN;
1140
1141 mc_macs = kzalloc(size, GFP_KERNEL);
1142 if (!mc_macs) {
1143 DP_NOTICE(edev,
1144 "Failed to allocate memory for multicast MACs\n");
1145 rc = -ENOMEM;
1146 goto exit;
1147 }
1148
1149 temp = mc_macs;
1150
1151
1152 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1153 mc_macs, 1);
1154 if (rc)
1155 goto exit;
1156
1157 netif_addr_lock_bh(ndev);
1158
1159 mc_count = netdev_mc_count(ndev);
1160 if (mc_count <= 64) {
1161 netdev_for_each_mc_addr(ha, ndev) {
1162 ether_addr_copy(temp, ha->addr);
1163 temp += ETH_ALEN;
1164 }
1165 }
1166
1167 netif_addr_unlock_bh(ndev);
1168
1169
1170 if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1171 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1172 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1173 } else {
1174
1175 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1176 mc_macs, mc_count);
1177 }
1178
1179exit:
1180 kfree(mc_macs);
1181 return rc;
1182}
1183
1184void qede_set_rx_mode(struct net_device *ndev)
1185{
1186 struct qede_dev *edev = netdev_priv(ndev);
1187
1188 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1189 schedule_delayed_work(&edev->sp_task, 0);
1190}
1191
1192
1193void qede_config_rx_mode(struct net_device *ndev)
1194{
1195 enum qed_filter_rx_mode_type accept_flags;
1196 struct qede_dev *edev = netdev_priv(ndev);
1197 struct qed_filter_params rx_mode;
1198 unsigned char *uc_macs, *temp;
1199 struct netdev_hw_addr *ha;
1200 int rc, uc_count;
1201 size_t size;
1202
1203 netif_addr_lock_bh(ndev);
1204
1205 uc_count = netdev_uc_count(ndev);
1206 size = uc_count * ETH_ALEN;
1207
1208 uc_macs = kzalloc(size, GFP_ATOMIC);
1209 if (!uc_macs) {
1210 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1211 netif_addr_unlock_bh(ndev);
1212 return;
1213 }
1214
1215 temp = uc_macs;
1216 netdev_for_each_uc_addr(ha, ndev) {
1217 ether_addr_copy(temp, ha->addr);
1218 temp += ETH_ALEN;
1219 }
1220
1221 netif_addr_unlock_bh(ndev);
1222
1223
1224 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1225 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1226
1227
1228
1229
1230 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1231 edev->ndev->dev_addr);
1232 if (rc)
1233 goto out;
1234
1235
1236 if (ndev->flags & IFF_PROMISC)
1237 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1238 else
1239 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1240
1241
1242 if (uc_count < edev->dev_info.num_mac_filters) {
1243 int i;
1244
1245 temp = uc_macs;
1246 for (i = 0; i < uc_count; i++) {
1247 rc = qede_set_ucast_rx_mac(edev,
1248 QED_FILTER_XCAST_TYPE_ADD,
1249 temp);
1250 if (rc)
1251 goto out;
1252
1253 temp += ETH_ALEN;
1254 }
1255 } else {
1256 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1257 }
1258
1259 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1260 if (rc)
1261 goto out;
1262
1263
1264 if (ndev->flags & IFF_PROMISC) {
1265 qede_config_accept_any_vlan(edev, true);
1266 } else if (!edev->non_configured_vlans) {
1267
1268
1269
1270
1271 qede_config_accept_any_vlan(edev, false);
1272 }
1273
1274 rx_mode.filter.accept_flags = accept_flags;
1275 edev->ops->filter_config(edev->cdev, &rx_mode);
1276out:
1277 kfree(uc_macs);
1278}
1279
1280static struct qede_arfs_fltr_node *
1281qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
1282{
1283 struct qede_arfs_fltr_node *fltr;
1284
1285 hlist_for_each_entry(fltr, head, node)
1286 if (location == fltr->sw_id)
1287 return fltr;
1288
1289 return NULL;
1290}
1291
1292int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1293 u32 *rule_locs)
1294{
1295 struct qede_arfs_fltr_node *fltr;
1296 struct hlist_head *head;
1297 int cnt = 0, rc = 0;
1298
1299 info->data = QEDE_RFS_MAX_FLTR;
1300
1301 __qede_lock(edev);
1302
1303 if (!edev->arfs) {
1304 rc = -EPERM;
1305 goto unlock;
1306 }
1307
1308 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1309
1310 hlist_for_each_entry(fltr, head, node) {
1311 if (cnt == info->rule_cnt) {
1312 rc = -EMSGSIZE;
1313 goto unlock;
1314 }
1315
1316 rule_locs[cnt] = fltr->sw_id;
1317 cnt++;
1318 }
1319
1320 info->rule_cnt = cnt;
1321
1322unlock:
1323 __qede_unlock(edev);
1324 return rc;
1325}
1326
1327int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1328{
1329 struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1330 struct qede_arfs_fltr_node *fltr = NULL;
1331 int rc = 0;
1332
1333 cmd->data = QEDE_RFS_MAX_FLTR;
1334
1335 __qede_lock(edev);
1336
1337 if (!edev->arfs) {
1338 rc = -EPERM;
1339 goto unlock;
1340 }
1341
1342 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1343 fsp->location);
1344 if (!fltr) {
1345 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1346 fsp->location);
1347 rc = -EINVAL;
1348 goto unlock;
1349 }
1350
1351 if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1352 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1353 fsp->flow_type = TCP_V4_FLOW;
1354 else
1355 fsp->flow_type = UDP_V4_FLOW;
1356
1357 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1358 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1359 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1360 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1361 } else {
1362 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1363 fsp->flow_type = TCP_V6_FLOW;
1364 else
1365 fsp->flow_type = UDP_V6_FLOW;
1366 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1367 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1368 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1369 &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1370 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1371 &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1372 }
1373
1374 fsp->ring_cookie = fltr->rxq_id;
1375
1376 if (fltr->vfid) {
1377 fsp->ring_cookie |= ((u64)fltr->vfid) <<
1378 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1379 }
1380
1381 if (fltr->b_is_drop)
1382 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1383unlock:
1384 __qede_unlock(edev);
1385 return rc;
1386}
1387
1388static int
1389qede_poll_arfs_filter_config(struct qede_dev *edev,
1390 struct qede_arfs_fltr_node *fltr)
1391{
1392 int count = QEDE_ARFS_POLL_COUNT;
1393
1394 while (fltr->used && count) {
1395 msleep(20);
1396 count--;
1397 }
1398
1399 if (count == 0 || fltr->fw_rc) {
1400 DP_NOTICE(edev, "Timeout in polling filter config\n");
1401 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1402 return -EIO;
1403 }
1404
1405 return fltr->fw_rc;
1406}
1407
1408static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1409{
1410 int size = ETH_HLEN;
1411
1412 if (t->eth_proto == htons(ETH_P_IP))
1413 size += sizeof(struct iphdr);
1414 else
1415 size += sizeof(struct ipv6hdr);
1416
1417 if (t->ip_proto == IPPROTO_TCP)
1418 size += sizeof(struct tcphdr);
1419 else
1420 size += sizeof(struct udphdr);
1421
1422 return size;
1423}
1424
1425static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1426 struct qede_arfs_tuple *b)
1427{
1428 if (a->eth_proto != htons(ETH_P_IP) ||
1429 b->eth_proto != htons(ETH_P_IP))
1430 return false;
1431
1432 return (a->src_ipv4 == b->src_ipv4) &&
1433 (a->dst_ipv4 == b->dst_ipv4);
1434}
1435
1436static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1437 void *header)
1438{
1439 __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1440 struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1441 struct ethhdr *eth = (struct ethhdr *)header;
1442
1443 eth->h_proto = t->eth_proto;
1444 ip->saddr = t->src_ipv4;
1445 ip->daddr = t->dst_ipv4;
1446 ip->version = 0x4;
1447 ip->ihl = 0x5;
1448 ip->protocol = t->ip_proto;
1449 ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1450
1451
1452 ports[0] = t->src_port;
1453 ports[1] = t->dst_port;
1454}
1455
1456static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1457 void *buffer)
1458{
1459 const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1460
1461 snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1462 "%s %pI4 (%04x) -> %pI4 (%04x)",
1463 prefix, &t->src_ipv4, t->src_port,
1464 &t->dst_ipv4, t->dst_port);
1465}
1466
1467static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1468 struct qede_arfs_tuple *b)
1469{
1470 if (a->eth_proto != htons(ETH_P_IPV6) ||
1471 b->eth_proto != htons(ETH_P_IPV6))
1472 return false;
1473
1474 if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1475 return false;
1476
1477 if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1478 return false;
1479
1480 return true;
1481}
1482
1483static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1484 void *header)
1485{
1486 __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1487 struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1488 struct ethhdr *eth = (struct ethhdr *)header;
1489
1490 eth->h_proto = t->eth_proto;
1491 memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1492 memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1493 ip6->version = 0x6;
1494
1495 if (t->ip_proto == IPPROTO_TCP) {
1496 ip6->nexthdr = NEXTHDR_TCP;
1497 ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1498 } else {
1499 ip6->nexthdr = NEXTHDR_UDP;
1500 ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1501 }
1502
1503
1504 ports[0] = t->src_port;
1505 ports[1] = t->dst_port;
1506}
1507
1508
1509static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1510 struct ethtool_rx_flow_spec *fs)
1511{
1512 if (fs->flow_type & FLOW_MAC_EXT) {
1513 DP_INFO(edev, "Don't support MAC extensions\n");
1514 return -EOPNOTSUPP;
1515 }
1516
1517 if ((fs->flow_type & FLOW_EXT) &&
1518 (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1519 DP_INFO(edev, "Don't support vlan-based classification\n");
1520 return -EOPNOTSUPP;
1521 }
1522
1523 if ((fs->flow_type & FLOW_EXT) &&
1524 (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1525 DP_INFO(edev, "Don't support user defined data\n");
1526 return -EOPNOTSUPP;
1527 }
1528
1529 return 0;
1530}
1531
1532static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
1533 struct qede_arfs_tuple *t)
1534{
1535
1536
1537
1538 if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1539 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1540 } else if (!t->src_port && t->dst_port &&
1541 !t->src_ipv4 && !t->dst_ipv4) {
1542 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1543 } else if (!t->src_port && !t->dst_port &&
1544 !t->dst_ipv4 && t->src_ipv4) {
1545 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1546 } else if (!t->src_port && !t->dst_port &&
1547 t->dst_ipv4 && !t->src_ipv4) {
1548 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1549 } else {
1550 DP_INFO(edev, "Invalid N-tuple\n");
1551 return -EOPNOTSUPP;
1552 }
1553
1554 t->ip_comp = qede_flow_spec_ipv4_cmp;
1555 t->build_hdr = qede_flow_build_ipv4_hdr;
1556 t->stringify = qede_flow_stringify_ipv4_hdr;
1557
1558 return 0;
1559}
1560
1561static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
1562 struct qede_arfs_tuple *t,
1563 struct in6_addr *zaddr)
1564{
1565
1566
1567
1568 if (t->src_port && t->dst_port &&
1569 memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1570 memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1571 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1572 } else if (!t->src_port && t->dst_port &&
1573 !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1574 !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1575 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1576 } else if (!t->src_port && !t->dst_port &&
1577 !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1578 memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1579 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1580 } else if (!t->src_port && !t->dst_port &&
1581 memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1582 !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1583 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1584 } else {
1585 DP_INFO(edev, "Invalid N-tuple\n");
1586 return -EOPNOTSUPP;
1587 }
1588
1589 t->ip_comp = qede_flow_spec_ipv6_cmp;
1590 t->build_hdr = qede_flow_build_ipv6_hdr;
1591
1592 return 0;
1593}
1594
1595
1596static struct qede_arfs_fltr_node *
1597qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1598{
1599 struct qede_arfs_fltr_node *fltr;
1600 struct hlist_node *temp;
1601 struct hlist_head *head;
1602
1603 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1604
1605 hlist_for_each_entry_safe(fltr, temp, head, node) {
1606 if (fltr->tuple.ip_proto == t->ip_proto &&
1607 fltr->tuple.src_port == t->src_port &&
1608 fltr->tuple.dst_port == t->dst_port &&
1609 t->ip_comp(&fltr->tuple, t))
1610 return fltr;
1611 }
1612
1613 return NULL;
1614}
1615
1616static void qede_flow_set_destination(struct qede_dev *edev,
1617 struct qede_arfs_fltr_node *n,
1618 struct ethtool_rx_flow_spec *fs)
1619{
1620 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1621 n->b_is_drop = true;
1622 return;
1623 }
1624
1625 n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1626 n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1627 n->next_rxq_id = n->rxq_id;
1628
1629 if (n->vfid)
1630 DP_VERBOSE(edev, QED_MSG_SP,
1631 "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1632}
1633
1634int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
1635{
1636 struct qede_arfs_fltr_node *fltr = NULL;
1637 int rc = -EPERM;
1638
1639 __qede_lock(edev);
1640 if (!edev->arfs)
1641 goto unlock;
1642
1643 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1644 cookie);
1645 if (!fltr)
1646 goto unlock;
1647
1648 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1649
1650 rc = qede_poll_arfs_filter_config(edev, fltr);
1651 if (rc == 0)
1652 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1653
1654unlock:
1655 __qede_unlock(edev);
1656 return rc;
1657}
1658
1659int qede_get_arfs_filter_count(struct qede_dev *edev)
1660{
1661 int count = 0;
1662
1663 __qede_lock(edev);
1664
1665 if (!edev->arfs)
1666 goto unlock;
1667
1668 count = edev->arfs->filter_count;
1669
1670unlock:
1671 __qede_unlock(edev);
1672 return count;
1673}
1674
1675static int qede_parse_actions(struct qede_dev *edev,
1676 struct flow_action *flow_action,
1677 struct netlink_ext_ack *extack)
1678{
1679 const struct flow_action_entry *act;
1680 int i;
1681
1682 if (!flow_action_has_entries(flow_action)) {
1683 DP_NOTICE(edev, "No actions received\n");
1684 return -EINVAL;
1685 }
1686
1687 if (!flow_action_basic_hw_stats_check(flow_action, extack))
1688 return -EOPNOTSUPP;
1689
1690 flow_action_for_each(i, act, flow_action) {
1691 switch (act->id) {
1692 case FLOW_ACTION_DROP:
1693 break;
1694 case FLOW_ACTION_QUEUE:
1695 if (act->queue.vf)
1696 break;
1697
1698 if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
1699 DP_INFO(edev, "Queue out-of-bounds\n");
1700 return -EINVAL;
1701 }
1702 break;
1703 default:
1704 return -EINVAL;
1705 }
1706 }
1707
1708 return 0;
1709}
1710
1711static int
1712qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
1713 struct qede_arfs_tuple *t)
1714{
1715 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1716 struct flow_match_ports match;
1717
1718 flow_rule_match_ports(rule, &match);
1719 if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
1720 (match.key->dst && match.mask->dst != htons(U16_MAX))) {
1721 DP_NOTICE(edev, "Do not support ports masks\n");
1722 return -EINVAL;
1723 }
1724
1725 t->src_port = match.key->src;
1726 t->dst_port = match.key->dst;
1727 }
1728
1729 return 0;
1730}
1731
1732static int
1733qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
1734 struct qede_arfs_tuple *t)
1735{
1736 struct in6_addr zero_addr, addr;
1737
1738 memset(&zero_addr, 0, sizeof(addr));
1739 memset(&addr, 0xff, sizeof(addr));
1740
1741 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1742 struct flow_match_ipv6_addrs match;
1743
1744 flow_rule_match_ipv6_addrs(rule, &match);
1745 if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
1746 memcmp(&match.mask->src, &addr, sizeof(addr))) ||
1747 (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
1748 memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
1749 DP_NOTICE(edev,
1750 "Do not support IPv6 address prefix/mask\n");
1751 return -EINVAL;
1752 }
1753
1754 memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
1755 memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
1756 }
1757
1758 if (qede_flow_parse_ports(edev, rule, t))
1759 return -EINVAL;
1760
1761 return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
1762}
1763
1764static int
1765qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
1766 struct qede_arfs_tuple *t)
1767{
1768 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1769 struct flow_match_ipv4_addrs match;
1770
1771 flow_rule_match_ipv4_addrs(rule, &match);
1772 if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
1773 (match.key->dst && match.mask->dst != htonl(U32_MAX))) {
1774 DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
1775 return -EINVAL;
1776 }
1777
1778 t->src_ipv4 = match.key->src;
1779 t->dst_ipv4 = match.key->dst;
1780 }
1781
1782 if (qede_flow_parse_ports(edev, rule, t))
1783 return -EINVAL;
1784
1785 return qede_set_v4_tuple_to_profile(edev, t);
1786}
1787
1788static int
1789qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
1790 struct qede_arfs_tuple *tuple)
1791{
1792 tuple->ip_proto = IPPROTO_TCP;
1793 tuple->eth_proto = htons(ETH_P_IPV6);
1794
1795 return qede_flow_parse_v6_common(edev, rule, tuple);
1796}
1797
1798static int
1799qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
1800 struct qede_arfs_tuple *tuple)
1801{
1802 tuple->ip_proto = IPPROTO_TCP;
1803 tuple->eth_proto = htons(ETH_P_IP);
1804
1805 return qede_flow_parse_v4_common(edev, rule, tuple);
1806}
1807
1808static int
1809qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
1810 struct qede_arfs_tuple *tuple)
1811{
1812 tuple->ip_proto = IPPROTO_UDP;
1813 tuple->eth_proto = htons(ETH_P_IPV6);
1814
1815 return qede_flow_parse_v6_common(edev, rule, tuple);
1816}
1817
1818static int
1819qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
1820 struct qede_arfs_tuple *tuple)
1821{
1822 tuple->ip_proto = IPPROTO_UDP;
1823 tuple->eth_proto = htons(ETH_P_IP);
1824
1825 return qede_flow_parse_v4_common(edev, rule, tuple);
1826}
1827
1828static int
1829qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
1830 struct flow_rule *rule, struct qede_arfs_tuple *tuple)
1831{
1832 struct flow_dissector *dissector = rule->match.dissector;
1833 int rc = -EINVAL;
1834 u8 ip_proto = 0;
1835
1836 memset(tuple, 0, sizeof(*tuple));
1837
1838 if (dissector->used_keys &
1839 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1840 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1841 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1842 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1843 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
1844 DP_NOTICE(edev, "Unsupported key set:0x%x\n",
1845 dissector->used_keys);
1846 return -EOPNOTSUPP;
1847 }
1848
1849 if (proto != htons(ETH_P_IP) &&
1850 proto != htons(ETH_P_IPV6)) {
1851 DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
1852 return -EPROTONOSUPPORT;
1853 }
1854
1855 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1856 struct flow_match_basic match;
1857
1858 flow_rule_match_basic(rule, &match);
1859 ip_proto = match.key->ip_proto;
1860 }
1861
1862 if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
1863 rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
1864 else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
1865 rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
1866 else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
1867 rc = qede_flow_parse_udp_v4(edev, rule, tuple);
1868 else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
1869 rc = qede_flow_parse_udp_v6(edev, rule, tuple);
1870 else
1871 DP_NOTICE(edev, "Invalid protocol request\n");
1872
1873 return rc;
1874}
1875
1876int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
1877 struct flow_cls_offload *f)
1878{
1879 struct qede_arfs_fltr_node *n;
1880 int min_hlen, rc = -EINVAL;
1881 struct qede_arfs_tuple t;
1882
1883 __qede_lock(edev);
1884
1885 if (!edev->arfs) {
1886 rc = -EPERM;
1887 goto unlock;
1888 }
1889
1890
1891 if (qede_parse_flow_attr(edev, proto, f->rule, &t))
1892 goto unlock;
1893
1894
1895 if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
1896 edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
1897 DP_NOTICE(edev,
1898 "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
1899 t.mode, edev->arfs->mode, edev->arfs->filter_count);
1900 goto unlock;
1901 }
1902
1903
1904 if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
1905 goto unlock;
1906
1907 if (qede_flow_find_fltr(edev, &t)) {
1908 rc = -EEXIST;
1909 goto unlock;
1910 }
1911
1912 n = kzalloc(sizeof(*n), GFP_KERNEL);
1913 if (!n) {
1914 rc = -ENOMEM;
1915 goto unlock;
1916 }
1917
1918 min_hlen = qede_flow_get_min_header_size(&t);
1919
1920 n->data = kzalloc(min_hlen, GFP_KERNEL);
1921 if (!n->data) {
1922 kfree(n);
1923 rc = -ENOMEM;
1924 goto unlock;
1925 }
1926
1927 memcpy(&n->tuple, &t, sizeof(n->tuple));
1928
1929 n->buf_len = min_hlen;
1930 n->b_is_drop = true;
1931 n->sw_id = f->cookie;
1932
1933 n->tuple.build_hdr(&n->tuple, n->data);
1934
1935 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1936 if (rc)
1937 goto unlock;
1938
1939 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1940 rc = qede_poll_arfs_filter_config(edev, n);
1941
1942unlock:
1943 __qede_unlock(edev);
1944 return rc;
1945}
1946
1947static int qede_flow_spec_validate(struct qede_dev *edev,
1948 struct flow_action *flow_action,
1949 struct qede_arfs_tuple *t,
1950 __u32 location)
1951{
1952 if (location >= QEDE_RFS_MAX_FLTR) {
1953 DP_INFO(edev, "Location out-of-bounds\n");
1954 return -EINVAL;
1955 }
1956
1957
1958 if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
1959 DP_INFO(edev, "Location already in use\n");
1960 return -EINVAL;
1961 }
1962
1963
1964 if (edev->arfs->filter_count &&
1965 edev->arfs->mode != t->mode) {
1966 DP_INFO(edev,
1967 "flow_spec would require filtering mode %08x, but %08x is configured\n",
1968 t->mode, edev->arfs->filter_count);
1969 return -EINVAL;
1970 }
1971
1972 if (qede_parse_actions(edev, flow_action, NULL))
1973 return -EINVAL;
1974
1975 return 0;
1976}
1977
1978static int qede_flow_spec_to_rule(struct qede_dev *edev,
1979 struct qede_arfs_tuple *t,
1980 struct ethtool_rx_flow_spec *fs)
1981{
1982 struct ethtool_rx_flow_spec_input input = {};
1983 struct ethtool_rx_flow_rule *flow;
1984 __be16 proto;
1985 int err = 0;
1986
1987 if (qede_flow_spec_validate_unused(edev, fs))
1988 return -EOPNOTSUPP;
1989
1990 switch ((fs->flow_type & ~FLOW_EXT)) {
1991 case TCP_V4_FLOW:
1992 case UDP_V4_FLOW:
1993 proto = htons(ETH_P_IP);
1994 break;
1995 case TCP_V6_FLOW:
1996 case UDP_V6_FLOW:
1997 proto = htons(ETH_P_IPV6);
1998 break;
1999 default:
2000 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2001 "Can't support flow of type %08x\n", fs->flow_type);
2002 return -EOPNOTSUPP;
2003 }
2004
2005 input.fs = fs;
2006 flow = ethtool_rx_flow_rule_create(&input);
2007 if (IS_ERR(flow))
2008 return PTR_ERR(flow);
2009
2010 if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
2011 err = -EINVAL;
2012 goto err_out;
2013 }
2014
2015
2016 err = qede_flow_spec_validate(edev, &flow->rule->action, t,
2017 fs->location);
2018err_out:
2019 ethtool_rx_flow_rule_destroy(flow);
2020 return err;
2021
2022}
2023
2024int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
2025{
2026 struct ethtool_rx_flow_spec *fsp = &info->fs;
2027 struct qede_arfs_fltr_node *n;
2028 struct qede_arfs_tuple t;
2029 int min_hlen, rc;
2030
2031 __qede_lock(edev);
2032
2033 if (!edev->arfs) {
2034 rc = -EPERM;
2035 goto unlock;
2036 }
2037
2038
2039 rc = qede_flow_spec_to_rule(edev, &t, fsp);
2040 if (rc)
2041 goto unlock;
2042
2043 if (qede_flow_find_fltr(edev, &t)) {
2044 rc = -EINVAL;
2045 goto unlock;
2046 }
2047
2048 n = kzalloc(sizeof(*n), GFP_KERNEL);
2049 if (!n) {
2050 rc = -ENOMEM;
2051 goto unlock;
2052 }
2053
2054 min_hlen = qede_flow_get_min_header_size(&t);
2055 n->data = kzalloc(min_hlen, GFP_KERNEL);
2056 if (!n->data) {
2057 kfree(n);
2058 rc = -ENOMEM;
2059 goto unlock;
2060 }
2061
2062 n->sw_id = fsp->location;
2063 set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
2064 n->buf_len = min_hlen;
2065
2066 memcpy(&n->tuple, &t, sizeof(n->tuple));
2067
2068 qede_flow_set_destination(edev, n, fsp);
2069
2070
2071 n->tuple.build_hdr(&n->tuple, n->data);
2072
2073 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2074 if (rc)
2075 goto unlock;
2076
2077 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2078 rc = qede_poll_arfs_filter_config(edev, n);
2079unlock:
2080 __qede_unlock(edev);
2081
2082 return rc;
2083}
2084