1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <net/udp_tunnel.h>
35#include <linux/bitops.h>
36#include <linux/vmalloc.h>
37
38#include <linux/qed/qed_if.h>
39#include "qede.h"
40
41struct qede_arfs_tuple {
42 union {
43 __be32 src_ipv4;
44 struct in6_addr src_ipv6;
45 };
46 union {
47 __be32 dst_ipv4;
48 struct in6_addr dst_ipv6;
49 };
50 __be16 src_port;
51 __be16 dst_port;
52 __be16 eth_proto;
53 u8 ip_proto;
54};
55
56struct qede_arfs_fltr_node {
57#define QEDE_FLTR_VALID 0
58 unsigned long state;
59
60
61 void *data;
62
63
64 dma_addr_t mapping;
65
66
67 int buf_len;
68
69
70 struct qede_arfs_tuple tuple;
71
72 u32 flow_id;
73 u16 sw_id;
74 u16 rxq_id;
75 u16 next_rxq_id;
76 bool filter_op;
77 bool used;
78 u8 fw_rc;
79 struct hlist_node node;
80};
81
82struct qede_arfs {
83#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
84#define QEDE_ARFS_POLL_COUNT 100
85#define QEDE_RFS_FLW_BITSHIFT (4)
86#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
87 struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
88
89
90 spinlock_t arfs_list_lock;
91 unsigned long *arfs_fltr_bmap;
92 int filter_count;
93 bool enable;
94};
95
96static void qede_configure_arfs_fltr(struct qede_dev *edev,
97 struct qede_arfs_fltr_node *n,
98 u16 rxq_id, bool add_fltr)
99{
100 const struct qed_eth_ops *op = edev->ops;
101 struct qed_ntuple_filter_params params;
102
103 if (n->used)
104 return;
105
106 memset(¶ms, 0, sizeof(params));
107
108 params.addr = n->mapping;
109 params.length = n->buf_len;
110 params.qid = rxq_id;
111 params.b_is_add = add_fltr;
112
113 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
114 "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
115 add_fltr ? "Adding" : "Deleting",
116 n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
117 ntohs(n->tuple.dst_port), rxq_id);
118
119 n->used = true;
120 n->filter_op = add_fltr;
121 op->ntuple_filter_config(edev->cdev, n, ¶ms);
122}
123
124static void
125qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
126{
127 kfree(fltr->data);
128 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
129 kfree(fltr);
130}
131
132static int
133qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
134 struct qede_arfs_fltr_node *fltr,
135 u16 bucket_idx)
136{
137 fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
138 fltr->buf_len, DMA_TO_DEVICE);
139 if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
140 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
141 qede_free_arfs_filter(edev, fltr);
142 return -ENOMEM;
143 }
144
145 INIT_HLIST_NODE(&fltr->node);
146 hlist_add_head(&fltr->node,
147 QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
148 edev->arfs->filter_count++;
149
150 if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
151 enum qed_filter_config_mode mode;
152
153 mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
154 edev->ops->configure_arfs_searcher(edev->cdev, mode);
155 edev->arfs->enable = true;
156 }
157
158 return 0;
159}
160
161static void
162qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
163 struct qede_arfs_fltr_node *fltr)
164{
165 hlist_del(&fltr->node);
166 dma_unmap_single(&edev->pdev->dev, fltr->mapping,
167 fltr->buf_len, DMA_TO_DEVICE);
168
169 qede_free_arfs_filter(edev, fltr);
170 edev->arfs->filter_count--;
171
172 if (!edev->arfs->filter_count && edev->arfs->enable) {
173 enum qed_filter_config_mode mode;
174
175 mode = QED_FILTER_CONFIG_MODE_DISABLE;
176 edev->arfs->enable = false;
177 edev->ops->configure_arfs_searcher(edev->cdev, mode);
178 }
179}
180
181void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
182{
183 struct qede_arfs_fltr_node *fltr = filter;
184 struct qede_dev *edev = dev;
185
186 fltr->fw_rc = fw_rc;
187
188 if (fw_rc) {
189 DP_NOTICE(edev,
190 "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
191 fw_rc, fltr->flow_id, fltr->sw_id,
192 ntohs(fltr->tuple.src_port),
193 ntohs(fltr->tuple.dst_port), fltr->rxq_id);
194
195 spin_lock_bh(&edev->arfs->arfs_list_lock);
196
197 fltr->used = false;
198 clear_bit(QEDE_FLTR_VALID, &fltr->state);
199
200 spin_unlock_bh(&edev->arfs->arfs_list_lock);
201 return;
202 }
203
204 spin_lock_bh(&edev->arfs->arfs_list_lock);
205
206 fltr->used = false;
207
208 if (fltr->filter_op) {
209 set_bit(QEDE_FLTR_VALID, &fltr->state);
210 if (fltr->rxq_id != fltr->next_rxq_id)
211 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
212 false);
213 } else {
214 clear_bit(QEDE_FLTR_VALID, &fltr->state);
215 if (fltr->rxq_id != fltr->next_rxq_id) {
216 fltr->rxq_id = fltr->next_rxq_id;
217 qede_configure_arfs_fltr(edev, fltr,
218 fltr->rxq_id, true);
219 }
220 }
221
222 spin_unlock_bh(&edev->arfs->arfs_list_lock);
223}
224
225
226void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
227{
228 int i;
229
230 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
231 struct hlist_node *temp;
232 struct hlist_head *head;
233 struct qede_arfs_fltr_node *fltr;
234
235 head = &edev->arfs->arfs_hl_head[i];
236
237 hlist_for_each_entry_safe(fltr, temp, head, node) {
238 bool del = false;
239
240 if (edev->state != QEDE_STATE_OPEN)
241 del = true;
242
243 spin_lock_bh(&edev->arfs->arfs_list_lock);
244
245 if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
246 !fltr->used) || free_fltr) {
247 qede_dequeue_fltr_and_config_searcher(edev,
248 fltr);
249 } else {
250 bool flow_exp = false;
251#ifdef CONFIG_RFS_ACCEL
252 flow_exp = rps_may_expire_flow(edev->ndev,
253 fltr->rxq_id,
254 fltr->flow_id,
255 fltr->sw_id);
256#endif
257 if ((flow_exp || del) && !free_fltr)
258 qede_configure_arfs_fltr(edev, fltr,
259 fltr->rxq_id,
260 false);
261 }
262
263 spin_unlock_bh(&edev->arfs->arfs_list_lock);
264 }
265 }
266
267 spin_lock_bh(&edev->arfs->arfs_list_lock);
268
269 if (!edev->arfs->filter_count) {
270 if (edev->arfs->enable) {
271 enum qed_filter_config_mode mode;
272
273 mode = QED_FILTER_CONFIG_MODE_DISABLE;
274 edev->arfs->enable = false;
275 edev->ops->configure_arfs_searcher(edev->cdev, mode);
276 }
277#ifdef CONFIG_RFS_ACCEL
278 } else {
279 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
280 schedule_delayed_work(&edev->sp_task,
281 QEDE_SP_TASK_POLL_DELAY);
282#endif
283 }
284
285 spin_unlock_bh(&edev->arfs->arfs_list_lock);
286}
287
288
289
290
291void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
292{
293 int count = QEDE_ARFS_POLL_COUNT;
294
295 while (count) {
296 qede_process_arfs_filters(edev, false);
297
298 if (!edev->arfs->filter_count)
299 break;
300
301 msleep(100);
302 count--;
303 }
304
305 if (!count) {
306 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
307
308
309 qede_process_arfs_filters(edev, true);
310 }
311}
312
313int qede_alloc_arfs(struct qede_dev *edev)
314{
315 int i;
316
317 edev->arfs = vzalloc(sizeof(*edev->arfs));
318 if (!edev->arfs)
319 return -ENOMEM;
320
321 spin_lock_init(&edev->arfs->arfs_list_lock);
322
323 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
324 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
325
326 edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
327 sizeof(long));
328 if (!edev->arfs->arfs_fltr_bmap) {
329 vfree(edev->arfs);
330 edev->arfs = NULL;
331 return -ENOMEM;
332 }
333
334#ifdef CONFIG_RFS_ACCEL
335 edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
336 if (!edev->ndev->rx_cpu_rmap) {
337 vfree(edev->arfs->arfs_fltr_bmap);
338 edev->arfs->arfs_fltr_bmap = NULL;
339 vfree(edev->arfs);
340 edev->arfs = NULL;
341 return -ENOMEM;
342 }
343#endif
344 return 0;
345}
346
347void qede_free_arfs(struct qede_dev *edev)
348{
349 if (!edev->arfs)
350 return;
351
352#ifdef CONFIG_RFS_ACCEL
353 if (edev->ndev->rx_cpu_rmap)
354 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
355
356 edev->ndev->rx_cpu_rmap = NULL;
357#endif
358 vfree(edev->arfs->arfs_fltr_bmap);
359 edev->arfs->arfs_fltr_bmap = NULL;
360 vfree(edev->arfs);
361 edev->arfs = NULL;
362}
363
364#ifdef CONFIG_RFS_ACCEL
365static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
366 const struct sk_buff *skb)
367{
368 if (skb->protocol == htons(ETH_P_IP)) {
369 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
370 tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
371 return true;
372 else
373 return false;
374 } else {
375 struct in6_addr *src = &tpos->tuple.src_ipv6;
376 u8 size = sizeof(struct in6_addr);
377
378 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
379 !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
380 return true;
381 else
382 return false;
383 }
384}
385
386static struct qede_arfs_fltr_node *
387qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
388 __be16 src_port, __be16 dst_port, u8 ip_proto)
389{
390 struct qede_arfs_fltr_node *tpos;
391
392 hlist_for_each_entry(tpos, h, node)
393 if (tpos->tuple.ip_proto == ip_proto &&
394 tpos->tuple.eth_proto == skb->protocol &&
395 qede_compare_ip_addr(tpos, skb) &&
396 tpos->tuple.src_port == src_port &&
397 tpos->tuple.dst_port == dst_port)
398 return tpos;
399
400 return NULL;
401}
402
403static struct qede_arfs_fltr_node *
404qede_alloc_filter(struct qede_dev *edev, int min_hlen)
405{
406 struct qede_arfs_fltr_node *n;
407 int bit_id;
408
409 bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
410 QEDE_RFS_MAX_FLTR);
411
412 if (bit_id >= QEDE_RFS_MAX_FLTR)
413 return NULL;
414
415 n = kzalloc(sizeof(*n), GFP_ATOMIC);
416 if (!n)
417 return NULL;
418
419 n->data = kzalloc(min_hlen, GFP_ATOMIC);
420 if (!n->data) {
421 kfree(n);
422 return NULL;
423 }
424
425 n->sw_id = (u16)bit_id;
426 set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
427 return n;
428}
429
430int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
431 u16 rxq_index, u32 flow_id)
432{
433 struct qede_dev *edev = netdev_priv(dev);
434 struct qede_arfs_fltr_node *n;
435 int min_hlen, rc, tp_offset;
436 struct ethhdr *eth;
437 __be16 *ports;
438 u16 tbl_idx;
439 u8 ip_proto;
440
441 if (skb->encapsulation)
442 return -EPROTONOSUPPORT;
443
444 if (skb->protocol != htons(ETH_P_IP) &&
445 skb->protocol != htons(ETH_P_IPV6))
446 return -EPROTONOSUPPORT;
447
448 if (skb->protocol == htons(ETH_P_IP)) {
449 ip_proto = ip_hdr(skb)->protocol;
450 tp_offset = sizeof(struct iphdr);
451 } else {
452 ip_proto = ipv6_hdr(skb)->nexthdr;
453 tp_offset = sizeof(struct ipv6hdr);
454 }
455
456 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
457 return -EPROTONOSUPPORT;
458
459 ports = (__be16 *)(skb->data + tp_offset);
460 tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
461
462 spin_lock_bh(&edev->arfs->arfs_list_lock);
463
464 n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
465 skb, ports[0], ports[1], ip_proto);
466 if (n) {
467
468 n->next_rxq_id = rxq_index;
469
470 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
471 if (n->rxq_id != rxq_index)
472 qede_configure_arfs_fltr(edev, n, n->rxq_id,
473 false);
474 } else {
475 if (!n->used) {
476 n->rxq_id = rxq_index;
477 qede_configure_arfs_fltr(edev, n, n->rxq_id,
478 true);
479 }
480 }
481
482 rc = n->sw_id;
483 goto ret_unlock;
484 }
485
486 min_hlen = ETH_HLEN + skb_headlen(skb);
487
488 n = qede_alloc_filter(edev, min_hlen);
489 if (!n) {
490 rc = -ENOMEM;
491 goto ret_unlock;
492 }
493
494 n->buf_len = min_hlen;
495 n->rxq_id = rxq_index;
496 n->next_rxq_id = rxq_index;
497 n->tuple.src_port = ports[0];
498 n->tuple.dst_port = ports[1];
499 n->flow_id = flow_id;
500
501 if (skb->protocol == htons(ETH_P_IP)) {
502 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
503 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
504 } else {
505 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
506 sizeof(struct in6_addr));
507 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
508 sizeof(struct in6_addr));
509 }
510
511 eth = (struct ethhdr *)n->data;
512 eth->h_proto = skb->protocol;
513 n->tuple.eth_proto = skb->protocol;
514 n->tuple.ip_proto = ip_proto;
515 memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
516
517 rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
518 if (rc)
519 goto ret_unlock;
520
521 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
522
523 spin_unlock_bh(&edev->arfs->arfs_list_lock);
524
525 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
526 schedule_delayed_work(&edev->sp_task, 0);
527
528 return n->sw_id;
529
530ret_unlock:
531 spin_unlock_bh(&edev->arfs->arfs_list_lock);
532 return rc;
533}
534#endif
535
536void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
537{
538 struct qede_dev *edev = dev;
539
540 if (edev->vxlan_dst_port != vxlan_port)
541 edev->vxlan_dst_port = 0;
542
543 if (edev->geneve_dst_port != geneve_port)
544 edev->geneve_dst_port = 0;
545}
546
547void qede_force_mac(void *dev, u8 *mac, bool forced)
548{
549 struct qede_dev *edev = dev;
550
551 __qede_lock(edev);
552
553
554 if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
555 __qede_unlock(edev);
556 return;
557 }
558
559 ether_addr_copy(edev->ndev->dev_addr, mac);
560 __qede_unlock(edev);
561}
562
563void qede_fill_rss_params(struct qede_dev *edev,
564 struct qed_update_vport_rss_params *rss, u8 *update)
565{
566 bool need_reset = false;
567 int i;
568
569 if (QEDE_RSS_COUNT(edev) <= 1) {
570 memset(rss, 0, sizeof(*rss));
571 *update = 0;
572 return;
573 }
574
575
576 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
577 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
578 need_reset = true;
579 break;
580 }
581 }
582
583 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
584 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
585 u16 indir_val, val;
586
587 val = QEDE_RSS_COUNT(edev);
588 indir_val = ethtool_rxfh_indir_default(i, val);
589 edev->rss_ind_table[i] = indir_val;
590 }
591 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
592 }
593
594
595 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
596 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
597
598 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
599 }
600
601 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
602 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
603 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
604 }
605 memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
606
607 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
608 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
609 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
610 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
611 }
612 rss->rss_caps = edev->rss_caps;
613
614 *update = 1;
615}
616
617static int qede_set_ucast_rx_mac(struct qede_dev *edev,
618 enum qed_filter_xcast_params_type opcode,
619 unsigned char mac[ETH_ALEN])
620{
621 struct qed_filter_params filter_cmd;
622
623 memset(&filter_cmd, 0, sizeof(filter_cmd));
624 filter_cmd.type = QED_FILTER_TYPE_UCAST;
625 filter_cmd.filter.ucast.type = opcode;
626 filter_cmd.filter.ucast.mac_valid = 1;
627 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
628
629 return edev->ops->filter_config(edev->cdev, &filter_cmd);
630}
631
632static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
633 enum qed_filter_xcast_params_type opcode,
634 u16 vid)
635{
636 struct qed_filter_params filter_cmd;
637
638 memset(&filter_cmd, 0, sizeof(filter_cmd));
639 filter_cmd.type = QED_FILTER_TYPE_UCAST;
640 filter_cmd.filter.ucast.type = opcode;
641 filter_cmd.filter.ucast.vlan_valid = 1;
642 filter_cmd.filter.ucast.vlan = vid;
643
644 return edev->ops->filter_config(edev->cdev, &filter_cmd);
645}
646
647static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
648{
649 struct qed_update_vport_params *params;
650 int rc;
651
652
653 if (edev->accept_any_vlan == action)
654 return 0;
655
656 params = vzalloc(sizeof(*params));
657 if (!params)
658 return -ENOMEM;
659
660 params->vport_id = 0;
661 params->accept_any_vlan = action;
662 params->update_accept_any_vlan_flg = 1;
663
664 rc = edev->ops->vport_update(edev->cdev, params);
665 if (rc) {
666 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
667 action ? "enable" : "disable");
668 } else {
669 DP_INFO(edev, "%s accept-any-vlan\n",
670 action ? "enabled" : "disabled");
671 edev->accept_any_vlan = action;
672 }
673
674 vfree(params);
675 return 0;
676}
677
678int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
679{
680 struct qede_dev *edev = netdev_priv(dev);
681 struct qede_vlan *vlan, *tmp;
682 int rc = 0;
683
684 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
685
686 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
687 if (!vlan) {
688 DP_INFO(edev, "Failed to allocate struct for vlan\n");
689 return -ENOMEM;
690 }
691 INIT_LIST_HEAD(&vlan->list);
692 vlan->vid = vid;
693 vlan->configured = false;
694
695
696 list_for_each_entry(tmp, &edev->vlan_list, list) {
697 if (tmp->vid == vlan->vid) {
698 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
699 "vlan already configured\n");
700 kfree(vlan);
701 return -EEXIST;
702 }
703 }
704
705
706 __qede_lock(edev);
707 if (edev->state != QEDE_STATE_OPEN) {
708 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
709 "Interface is down, VLAN %d will be configured when interface is up\n",
710 vid);
711 if (vid != 0)
712 edev->non_configured_vlans++;
713 list_add(&vlan->list, &edev->vlan_list);
714 goto out;
715 }
716
717
718
719
720
721 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
722 (vlan->vid == 0)) {
723 rc = qede_set_ucast_rx_vlan(edev,
724 QED_FILTER_XCAST_TYPE_ADD,
725 vlan->vid);
726 if (rc) {
727 DP_ERR(edev, "Failed to configure VLAN %d\n",
728 vlan->vid);
729 kfree(vlan);
730 goto out;
731 }
732 vlan->configured = true;
733
734
735 if (vlan->vid != 0)
736 edev->configured_vlans++;
737 } else {
738
739 if (!edev->non_configured_vlans) {
740 rc = qede_config_accept_any_vlan(edev, true);
741 if (rc) {
742 kfree(vlan);
743 goto out;
744 }
745 }
746
747 edev->non_configured_vlans++;
748 }
749
750 list_add(&vlan->list, &edev->vlan_list);
751
752out:
753 __qede_unlock(edev);
754 return rc;
755}
756
757static void qede_del_vlan_from_list(struct qede_dev *edev,
758 struct qede_vlan *vlan)
759{
760
761 if (vlan->vid != 0) {
762 if (vlan->configured)
763 edev->configured_vlans--;
764 else
765 edev->non_configured_vlans--;
766 }
767
768 list_del(&vlan->list);
769 kfree(vlan);
770}
771
772int qede_configure_vlan_filters(struct qede_dev *edev)
773{
774 int rc = 0, real_rc = 0, accept_any_vlan = 0;
775 struct qed_dev_eth_info *dev_info;
776 struct qede_vlan *vlan = NULL;
777
778 if (list_empty(&edev->vlan_list))
779 return 0;
780
781 dev_info = &edev->dev_info;
782
783
784 list_for_each_entry(vlan, &edev->vlan_list, list) {
785 if (vlan->configured)
786 continue;
787
788
789 if ((vlan->vid != 0) &&
790 (edev->configured_vlans == dev_info->num_vlan_filters)) {
791 accept_any_vlan = 1;
792 continue;
793 }
794
795 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
796
797 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
798 vlan->vid);
799 if (rc) {
800 DP_ERR(edev, "Failed to configure VLAN %u\n",
801 vlan->vid);
802 real_rc = rc;
803 continue;
804 }
805
806 vlan->configured = true;
807
808 if (vlan->vid != 0) {
809 edev->non_configured_vlans--;
810 edev->configured_vlans++;
811 }
812 }
813
814
815
816
817
818
819 if (accept_any_vlan)
820 rc = qede_config_accept_any_vlan(edev, true);
821 else if (!edev->non_configured_vlans)
822 rc = qede_config_accept_any_vlan(edev, false);
823
824 if (rc && !real_rc)
825 real_rc = rc;
826
827 return real_rc;
828}
829
830int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
831{
832 struct qede_dev *edev = netdev_priv(dev);
833 struct qede_vlan *vlan = NULL;
834 int rc = 0;
835
836 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
837
838
839 __qede_lock(edev);
840 list_for_each_entry(vlan, &edev->vlan_list, list)
841 if (vlan->vid == vid)
842 break;
843
844 if (!vlan || (vlan->vid != vid)) {
845 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
846 "Vlan isn't configured\n");
847 goto out;
848 }
849
850 if (edev->state != QEDE_STATE_OPEN) {
851
852
853
854 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
855 "Interface is down, removing VLAN from list only\n");
856 qede_del_vlan_from_list(edev, vlan);
857 goto out;
858 }
859
860
861 if (vlan->configured) {
862 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
863 vid);
864 if (rc) {
865 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
866 goto out;
867 }
868 }
869
870 qede_del_vlan_from_list(edev, vlan);
871
872
873
874
875 rc = qede_configure_vlan_filters(edev);
876
877out:
878 __qede_unlock(edev);
879 return rc;
880}
881
882void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
883{
884 struct qede_vlan *vlan = NULL;
885
886 if (list_empty(&edev->vlan_list))
887 return;
888
889 list_for_each_entry(vlan, &edev->vlan_list, list) {
890 if (!vlan->configured)
891 continue;
892
893 vlan->configured = false;
894
895
896 if (vlan->vid != 0) {
897 edev->non_configured_vlans++;
898 edev->configured_vlans--;
899 }
900
901 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
902 "marked vlan %d as non-configured\n", vlan->vid);
903 }
904
905 edev->accept_any_vlan = false;
906}
907
908static void qede_set_features_reload(struct qede_dev *edev,
909 struct qede_reload_args *args)
910{
911 edev->ndev->features = args->u.features;
912}
913
914netdev_features_t qede_fix_features(struct net_device *dev,
915 netdev_features_t features)
916{
917 struct qede_dev *edev = netdev_priv(dev);
918
919 if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
920 !(features & NETIF_F_GRO))
921 features &= ~NETIF_F_GRO_HW;
922
923 return features;
924}
925
926int qede_set_features(struct net_device *dev, netdev_features_t features)
927{
928 struct qede_dev *edev = netdev_priv(dev);
929 netdev_features_t changes = features ^ dev->features;
930 bool need_reload = false;
931
932 if (changes & NETIF_F_GRO_HW)
933 need_reload = true;
934
935 if (need_reload) {
936 struct qede_reload_args args;
937
938 args.u.features = features;
939 args.func = &qede_set_features_reload;
940
941
942
943
944
945 __qede_lock(edev);
946 if (edev->xdp_prog)
947 args.func(edev, &args);
948 else
949 qede_reload(edev, &args, true);
950 __qede_unlock(edev);
951
952 return 1;
953 }
954
955 return 0;
956}
957
958void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
959{
960 struct qede_dev *edev = netdev_priv(dev);
961 struct qed_tunn_params tunn_params;
962 u16 t_port = ntohs(ti->port);
963 int rc;
964
965 memset(&tunn_params, 0, sizeof(tunn_params));
966
967 switch (ti->type) {
968 case UDP_TUNNEL_TYPE_VXLAN:
969 if (!edev->dev_info.common.vxlan_enable)
970 return;
971
972 if (edev->vxlan_dst_port)
973 return;
974
975 tunn_params.update_vxlan_port = 1;
976 tunn_params.vxlan_port = t_port;
977
978 __qede_lock(edev);
979 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
980 __qede_unlock(edev);
981
982 if (!rc) {
983 edev->vxlan_dst_port = t_port;
984 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
985 t_port);
986 } else {
987 DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
988 t_port);
989 }
990
991 break;
992 case UDP_TUNNEL_TYPE_GENEVE:
993 if (!edev->dev_info.common.geneve_enable)
994 return;
995
996 if (edev->geneve_dst_port)
997 return;
998
999 tunn_params.update_geneve_port = 1;
1000 tunn_params.geneve_port = t_port;
1001
1002 __qede_lock(edev);
1003 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1004 __qede_unlock(edev);
1005
1006 if (!rc) {
1007 edev->geneve_dst_port = t_port;
1008 DP_VERBOSE(edev, QED_MSG_DEBUG,
1009 "Added geneve port=%d\n", t_port);
1010 } else {
1011 DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
1012 t_port);
1013 }
1014
1015 break;
1016 default:
1017 return;
1018 }
1019}
1020
1021void qede_udp_tunnel_del(struct net_device *dev,
1022 struct udp_tunnel_info *ti)
1023{
1024 struct qede_dev *edev = netdev_priv(dev);
1025 struct qed_tunn_params tunn_params;
1026 u16 t_port = ntohs(ti->port);
1027
1028 memset(&tunn_params, 0, sizeof(tunn_params));
1029
1030 switch (ti->type) {
1031 case UDP_TUNNEL_TYPE_VXLAN:
1032 if (t_port != edev->vxlan_dst_port)
1033 return;
1034
1035 tunn_params.update_vxlan_port = 1;
1036 tunn_params.vxlan_port = 0;
1037
1038 __qede_lock(edev);
1039 edev->ops->tunn_config(edev->cdev, &tunn_params);
1040 __qede_unlock(edev);
1041
1042 edev->vxlan_dst_port = 0;
1043
1044 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1045 t_port);
1046
1047 break;
1048 case UDP_TUNNEL_TYPE_GENEVE:
1049 if (t_port != edev->geneve_dst_port)
1050 return;
1051
1052 tunn_params.update_geneve_port = 1;
1053 tunn_params.geneve_port = 0;
1054
1055 __qede_lock(edev);
1056 edev->ops->tunn_config(edev->cdev, &tunn_params);
1057 __qede_unlock(edev);
1058
1059 edev->geneve_dst_port = 0;
1060
1061 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1062 t_port);
1063 break;
1064 default:
1065 return;
1066 }
1067}
1068
1069static void qede_xdp_reload_func(struct qede_dev *edev,
1070 struct qede_reload_args *args)
1071{
1072 struct bpf_prog *old;
1073
1074 old = xchg(&edev->xdp_prog, args->u.new_prog);
1075 if (old)
1076 bpf_prog_put(old);
1077}
1078
1079static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1080{
1081 struct qede_reload_args args;
1082
1083
1084 args.func = &qede_xdp_reload_func;
1085 args.u.new_prog = prog;
1086 qede_reload(edev, &args, false);
1087
1088 return 0;
1089}
1090
1091int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1092{
1093 struct qede_dev *edev = netdev_priv(dev);
1094
1095 switch (xdp->command) {
1096 case XDP_SETUP_PROG:
1097 return qede_xdp_set(edev, xdp->prog);
1098 case XDP_QUERY_PROG:
1099 xdp->prog_attached = !!edev->xdp_prog;
1100 xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1101 return 0;
1102 default:
1103 return -EINVAL;
1104 }
1105}
1106
1107static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1108 enum qed_filter_xcast_params_type opcode,
1109 unsigned char *mac, int num_macs)
1110{
1111 struct qed_filter_params filter_cmd;
1112 int i;
1113
1114 memset(&filter_cmd, 0, sizeof(filter_cmd));
1115 filter_cmd.type = QED_FILTER_TYPE_MCAST;
1116 filter_cmd.filter.mcast.type = opcode;
1117 filter_cmd.filter.mcast.num = num_macs;
1118
1119 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1120 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1121
1122 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1123}
1124
1125int qede_set_mac_addr(struct net_device *ndev, void *p)
1126{
1127 struct qede_dev *edev = netdev_priv(ndev);
1128 struct sockaddr *addr = p;
1129 int rc = 0;
1130
1131
1132
1133
1134
1135 __qede_lock(edev);
1136
1137 if (!is_valid_ether_addr(addr->sa_data)) {
1138 DP_NOTICE(edev, "The MAC address is not valid\n");
1139 rc = -EFAULT;
1140 goto out;
1141 }
1142
1143 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1144 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1145 addr->sa_data);
1146 rc = -EINVAL;
1147 goto out;
1148 }
1149
1150 if (edev->state == QEDE_STATE_OPEN) {
1151
1152 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1153 ndev->dev_addr);
1154 if (rc)
1155 goto out;
1156 }
1157
1158 ether_addr_copy(ndev->dev_addr, addr->sa_data);
1159 DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1160
1161 if (edev->state != QEDE_STATE_OPEN) {
1162 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1163 "The device is currently down\n");
1164 goto out;
1165 }
1166
1167 edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1168
1169 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1170 ndev->dev_addr);
1171out:
1172 __qede_unlock(edev);
1173 return rc;
1174}
1175
1176static int
1177qede_configure_mcast_filtering(struct net_device *ndev,
1178 enum qed_filter_rx_mode_type *accept_flags)
1179{
1180 struct qede_dev *edev = netdev_priv(ndev);
1181 unsigned char *mc_macs, *temp;
1182 struct netdev_hw_addr *ha;
1183 int rc = 0, mc_count;
1184 size_t size;
1185
1186 size = 64 * ETH_ALEN;
1187
1188 mc_macs = kzalloc(size, GFP_KERNEL);
1189 if (!mc_macs) {
1190 DP_NOTICE(edev,
1191 "Failed to allocate memory for multicast MACs\n");
1192 rc = -ENOMEM;
1193 goto exit;
1194 }
1195
1196 temp = mc_macs;
1197
1198
1199 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1200 mc_macs, 1);
1201 if (rc)
1202 goto exit;
1203
1204 netif_addr_lock_bh(ndev);
1205
1206 mc_count = netdev_mc_count(ndev);
1207 if (mc_count < 64) {
1208 netdev_for_each_mc_addr(ha, ndev) {
1209 ether_addr_copy(temp, ha->addr);
1210 temp += ETH_ALEN;
1211 }
1212 }
1213
1214 netif_addr_unlock_bh(ndev);
1215
1216
1217 if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1218 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1219 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1220 } else {
1221
1222 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1223 mc_macs, mc_count);
1224 }
1225
1226exit:
1227 kfree(mc_macs);
1228 return rc;
1229}
1230
1231void qede_set_rx_mode(struct net_device *ndev)
1232{
1233 struct qede_dev *edev = netdev_priv(ndev);
1234
1235 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1236 schedule_delayed_work(&edev->sp_task, 0);
1237}
1238
1239
1240void qede_config_rx_mode(struct net_device *ndev)
1241{
1242 enum qed_filter_rx_mode_type accept_flags;
1243 struct qede_dev *edev = netdev_priv(ndev);
1244 struct qed_filter_params rx_mode;
1245 unsigned char *uc_macs, *temp;
1246 struct netdev_hw_addr *ha;
1247 int rc, uc_count;
1248 size_t size;
1249
1250 netif_addr_lock_bh(ndev);
1251
1252 uc_count = netdev_uc_count(ndev);
1253 size = uc_count * ETH_ALEN;
1254
1255 uc_macs = kzalloc(size, GFP_ATOMIC);
1256 if (!uc_macs) {
1257 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1258 netif_addr_unlock_bh(ndev);
1259 return;
1260 }
1261
1262 temp = uc_macs;
1263 netdev_for_each_uc_addr(ha, ndev) {
1264 ether_addr_copy(temp, ha->addr);
1265 temp += ETH_ALEN;
1266 }
1267
1268 netif_addr_unlock_bh(ndev);
1269
1270
1271 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1272 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1273
1274
1275
1276
1277 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1278 edev->ndev->dev_addr);
1279 if (rc)
1280 goto out;
1281
1282
1283 if (ndev->flags & IFF_PROMISC)
1284 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1285 else
1286 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1287
1288
1289 if (uc_count < edev->dev_info.num_mac_filters) {
1290 int i;
1291
1292 temp = uc_macs;
1293 for (i = 0; i < uc_count; i++) {
1294 rc = qede_set_ucast_rx_mac(edev,
1295 QED_FILTER_XCAST_TYPE_ADD,
1296 temp);
1297 if (rc)
1298 goto out;
1299
1300 temp += ETH_ALEN;
1301 }
1302 } else {
1303 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1304 }
1305
1306 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1307 if (rc)
1308 goto out;
1309
1310
1311 if (ndev->flags & IFF_PROMISC) {
1312 qede_config_accept_any_vlan(edev, true);
1313 } else if (!edev->non_configured_vlans) {
1314
1315
1316
1317
1318 qede_config_accept_any_vlan(edev, false);
1319 }
1320
1321 rx_mode.filter.accept_flags = accept_flags;
1322 edev->ops->filter_config(edev->cdev, &rx_mode);
1323out:
1324 kfree(uc_macs);
1325}
1326
1327static struct qede_arfs_fltr_node *
1328qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
1329{
1330 struct qede_arfs_fltr_node *fltr;
1331
1332 hlist_for_each_entry(fltr, head, node)
1333 if (location == fltr->sw_id)
1334 return fltr;
1335
1336 return NULL;
1337}
1338
1339static bool
1340qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos,
1341 struct ethtool_rx_flow_spec *fsp,
1342 __be16 proto)
1343{
1344 if (proto == htons(ETH_P_IP)) {
1345 struct ethtool_tcpip4_spec *ip;
1346
1347 ip = &fsp->h_u.tcp_ip4_spec;
1348
1349 if (tpos->tuple.src_ipv4 == ip->ip4src &&
1350 tpos->tuple.dst_ipv4 == ip->ip4dst)
1351 return true;
1352 else
1353 return false;
1354 } else {
1355 struct ethtool_tcpip6_spec *ip6;
1356 struct in6_addr *src;
1357
1358 ip6 = &fsp->h_u.tcp_ip6_spec;
1359 src = &tpos->tuple.src_ipv6;
1360
1361 if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) &&
1362 !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst,
1363 sizeof(struct in6_addr)))
1364 return true;
1365 else
1366 return false;
1367 }
1368 return false;
1369}
1370
1371int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1372 u32 *rule_locs)
1373{
1374 struct qede_arfs_fltr_node *fltr;
1375 struct hlist_head *head;
1376 int cnt = 0, rc = 0;
1377
1378 info->data = QEDE_RFS_MAX_FLTR;
1379
1380 __qede_lock(edev);
1381
1382 if (!edev->arfs) {
1383 rc = -EPERM;
1384 goto unlock;
1385 }
1386
1387 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1388
1389 hlist_for_each_entry(fltr, head, node) {
1390 if (cnt == info->rule_cnt) {
1391 rc = -EMSGSIZE;
1392 goto unlock;
1393 }
1394
1395 rule_locs[cnt] = fltr->sw_id;
1396 cnt++;
1397 }
1398
1399 info->rule_cnt = cnt;
1400
1401unlock:
1402 __qede_unlock(edev);
1403 return rc;
1404}
1405
1406int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1407{
1408 struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1409 struct qede_arfs_fltr_node *fltr = NULL;
1410 int rc = 0;
1411
1412 cmd->data = QEDE_RFS_MAX_FLTR;
1413
1414 __qede_lock(edev);
1415
1416 if (!edev->arfs) {
1417 rc = -EPERM;
1418 goto unlock;
1419 }
1420
1421 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1422 fsp->location);
1423 if (!fltr) {
1424 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1425 fsp->location);
1426 rc = -EINVAL;
1427 goto unlock;
1428 }
1429
1430 if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1431 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1432 fsp->flow_type = TCP_V4_FLOW;
1433 else
1434 fsp->flow_type = UDP_V4_FLOW;
1435
1436 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1437 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1438 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1439 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1440 } else {
1441 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1442 fsp->flow_type = TCP_V6_FLOW;
1443 else
1444 fsp->flow_type = UDP_V6_FLOW;
1445 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1446 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1447 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1448 &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1449 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1450 &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1451 }
1452
1453 fsp->ring_cookie = fltr->rxq_id;
1454
1455unlock:
1456 __qede_unlock(edev);
1457 return rc;
1458}
1459
1460static int
1461qede_validate_and_check_flow_exist(struct qede_dev *edev,
1462 struct ethtool_rx_flow_spec *fsp,
1463 int *min_hlen)
1464{
1465 __be16 src_port = 0x0, dst_port = 0x0;
1466 struct qede_arfs_fltr_node *fltr;
1467 struct hlist_node *temp;
1468 struct hlist_head *head;
1469 __be16 eth_proto;
1470 u8 ip_proto;
1471
1472 if (fsp->location >= QEDE_RFS_MAX_FLTR ||
1473 fsp->ring_cookie >= QEDE_RSS_COUNT(edev))
1474 return -EINVAL;
1475
1476 if (fsp->flow_type == TCP_V4_FLOW) {
1477 *min_hlen += sizeof(struct iphdr) +
1478 sizeof(struct tcphdr);
1479 eth_proto = htons(ETH_P_IP);
1480 ip_proto = IPPROTO_TCP;
1481 } else if (fsp->flow_type == UDP_V4_FLOW) {
1482 *min_hlen += sizeof(struct iphdr) +
1483 sizeof(struct udphdr);
1484 eth_proto = htons(ETH_P_IP);
1485 ip_proto = IPPROTO_UDP;
1486 } else if (fsp->flow_type == TCP_V6_FLOW) {
1487 *min_hlen += sizeof(struct ipv6hdr) +
1488 sizeof(struct tcphdr);
1489 eth_proto = htons(ETH_P_IPV6);
1490 ip_proto = IPPROTO_TCP;
1491 } else if (fsp->flow_type == UDP_V6_FLOW) {
1492 *min_hlen += sizeof(struct ipv6hdr) +
1493 sizeof(struct udphdr);
1494 eth_proto = htons(ETH_P_IPV6);
1495 ip_proto = IPPROTO_UDP;
1496 } else {
1497 DP_NOTICE(edev, "Unsupported flow type = 0x%x\n",
1498 fsp->flow_type);
1499 return -EPROTONOSUPPORT;
1500 }
1501
1502 if (eth_proto == htons(ETH_P_IP)) {
1503 src_port = fsp->h_u.tcp_ip4_spec.psrc;
1504 dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1505 } else {
1506 src_port = fsp->h_u.tcp_ip6_spec.psrc;
1507 dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1508 }
1509
1510 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1511 hlist_for_each_entry_safe(fltr, temp, head, node) {
1512 if ((fltr->tuple.ip_proto == ip_proto &&
1513 fltr->tuple.eth_proto == eth_proto &&
1514 qede_compare_user_flow_ips(fltr, fsp, eth_proto) &&
1515 fltr->tuple.src_port == src_port &&
1516 fltr->tuple.dst_port == dst_port) ||
1517 fltr->sw_id == fsp->location)
1518 return -EEXIST;
1519 }
1520
1521 return 0;
1522}
1523
1524static int
1525qede_poll_arfs_filter_config(struct qede_dev *edev,
1526 struct qede_arfs_fltr_node *fltr)
1527{
1528 int count = QEDE_ARFS_POLL_COUNT;
1529
1530 while (fltr->used && count) {
1531 msleep(20);
1532 count--;
1533 }
1534
1535 if (count == 0 || fltr->fw_rc) {
1536 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1537 return -EIO;
1538 }
1539
1540 return fltr->fw_rc;
1541}
1542
1543int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1544{
1545 struct ethtool_rx_flow_spec *fsp = &info->fs;
1546 struct qede_arfs_fltr_node *n;
1547 int min_hlen = ETH_HLEN, rc;
1548 struct ethhdr *eth;
1549 struct iphdr *ip;
1550 __be16 *ports;
1551
1552 __qede_lock(edev);
1553
1554 if (!edev->arfs) {
1555 rc = -EPERM;
1556 goto unlock;
1557 }
1558
1559 rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen);
1560 if (rc)
1561 goto unlock;
1562
1563 n = kzalloc(sizeof(*n), GFP_KERNEL);
1564 if (!n) {
1565 rc = -ENOMEM;
1566 goto unlock;
1567 }
1568
1569 n->data = kzalloc(min_hlen, GFP_KERNEL);
1570 if (!n->data) {
1571 kfree(n);
1572 rc = -ENOMEM;
1573 goto unlock;
1574 }
1575
1576 n->sw_id = fsp->location;
1577 set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
1578 n->buf_len = min_hlen;
1579 n->rxq_id = fsp->ring_cookie;
1580 n->next_rxq_id = n->rxq_id;
1581 eth = (struct ethhdr *)n->data;
1582
1583 if (info->fs.flow_type == TCP_V4_FLOW ||
1584 info->fs.flow_type == UDP_V4_FLOW) {
1585 ports = (__be16 *)(n->data + ETH_HLEN +
1586 sizeof(struct iphdr));
1587 eth->h_proto = htons(ETH_P_IP);
1588 n->tuple.eth_proto = htons(ETH_P_IP);
1589 n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src;
1590 n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst;
1591 n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc;
1592 n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst;
1593 ports[0] = n->tuple.src_port;
1594 ports[1] = n->tuple.dst_port;
1595 ip = (struct iphdr *)(n->data + ETH_HLEN);
1596 ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src;
1597 ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst;
1598 ip->version = 0x4;
1599 ip->ihl = 0x5;
1600
1601 if (info->fs.flow_type == TCP_V4_FLOW) {
1602 n->tuple.ip_proto = IPPROTO_TCP;
1603 ip->protocol = IPPROTO_TCP;
1604 } else {
1605 n->tuple.ip_proto = IPPROTO_UDP;
1606 ip->protocol = IPPROTO_UDP;
1607 }
1608 ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN);
1609 } else {
1610 struct ipv6hdr *ip6;
1611
1612 ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN);
1613 ports = (__be16 *)(n->data + ETH_HLEN +
1614 sizeof(struct ipv6hdr));
1615 eth->h_proto = htons(ETH_P_IPV6);
1616 n->tuple.eth_proto = htons(ETH_P_IPV6);
1617 memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src,
1618 sizeof(struct in6_addr));
1619 memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst,
1620 sizeof(struct in6_addr));
1621 n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc;
1622 n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst;
1623 ports[0] = n->tuple.src_port;
1624 ports[1] = n->tuple.dst_port;
1625 memcpy(&ip6->saddr, &n->tuple.src_ipv6,
1626 sizeof(struct in6_addr));
1627 memcpy(&ip6->daddr, &n->tuple.dst_ipv6,
1628 sizeof(struct in6_addr));
1629 ip6->version = 0x6;
1630
1631 if (info->fs.flow_type == TCP_V6_FLOW) {
1632 n->tuple.ip_proto = IPPROTO_TCP;
1633 ip6->nexthdr = NEXTHDR_TCP;
1634 ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1635 } else {
1636 n->tuple.ip_proto = IPPROTO_UDP;
1637 ip6->nexthdr = NEXTHDR_UDP;
1638 ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1639 }
1640 }
1641
1642 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1643 if (rc)
1644 goto unlock;
1645
1646 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1647 rc = qede_poll_arfs_filter_config(edev, n);
1648unlock:
1649 __qede_unlock(edev);
1650 return rc;
1651}
1652
1653int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1654{
1655 struct ethtool_rx_flow_spec *fsp = &info->fs;
1656 struct qede_arfs_fltr_node *fltr = NULL;
1657 int rc = -EPERM;
1658
1659 __qede_lock(edev);
1660 if (!edev->arfs)
1661 goto unlock;
1662
1663 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1664 fsp->location);
1665 if (!fltr)
1666 goto unlock;
1667
1668 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1669
1670 rc = qede_poll_arfs_filter_config(edev, fltr);
1671 if (rc == 0)
1672 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1673
1674unlock:
1675 __qede_unlock(edev);
1676 return rc;
1677}
1678
1679int qede_get_arfs_filter_count(struct qede_dev *edev)
1680{
1681 int count = 0;
1682
1683 __qede_lock(edev);
1684
1685 if (!edev->arfs)
1686 goto unlock;
1687
1688 count = edev->arfs->filter_count;
1689
1690unlock:
1691 __qede_unlock(edev);
1692 return count;
1693}
1694