1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/bpf.h>
35#include <linux/etherdevice.h>
36#include <linux/tcp.h>
37#include <linux/if_vlan.h>
38#include <linux/delay.h>
39#include <linux/slab.h>
40#include <linux/hash.h>
41#include <net/ip.h>
42#include <net/vxlan.h>
43#include <net/devlink.h>
44
45#include <linux/mlx4/driver.h>
46#include <linux/mlx4/device.h>
47#include <linux/mlx4/cmd.h>
48#include <linux/mlx4/cq.h>
49
50#include "mlx4_en.h"
51#include "en_port.h"
52
53#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
54 XDP_PACKET_HEADROOM - \
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
56
57int mlx4_en_setup_tc(struct net_device *dev, u8 up)
58{
59 struct mlx4_en_priv *priv = netdev_priv(dev);
60 int i;
61 unsigned int offset = 0;
62
63 if (up && up != MLX4_EN_NUM_UP_HIGH)
64 return -EINVAL;
65
66 netdev_set_num_tc(dev, up);
67 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
68
69 for (i = 0; i < up; i++) {
70 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
71 offset += priv->num_tx_rings_p_up;
72 }
73
74#ifdef CONFIG_MLX4_EN_DCB
75 if (!mlx4_is_slave(priv->mdev->dev)) {
76 if (up) {
77 if (priv->dcbx_cap)
78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
79 } else {
80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
81 priv->cee_config.pfc_state = false;
82 }
83 }
84#endif
85
86 return 0;
87}
88
89int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
90{
91 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev;
93 struct mlx4_en_port_profile new_prof;
94 struct mlx4_en_priv *tmp;
95 int total_count;
96 int port_up = 0;
97 int err = 0;
98
99 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
100 if (!tmp)
101 return -ENOMEM;
102
103 mutex_lock(&mdev->state_lock);
104 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
105 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
106 MLX4_EN_NUM_UP_HIGH;
107 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
108 new_prof.num_up;
109 total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
110 if (total_count > MAX_TX_RINGS) {
111 err = -EINVAL;
112 en_err(priv,
113 "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
114 total_count, MAX_TX_RINGS);
115 goto out;
116 }
117 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
118 if (err)
119 goto out;
120
121 if (priv->port_up) {
122 port_up = 1;
123 mlx4_en_stop_port(dev, 1);
124 }
125
126 mlx4_en_safe_replace_resources(priv, tmp);
127 if (port_up) {
128 err = mlx4_en_start_port(dev);
129 if (err) {
130 en_err(priv, "Failed starting port for setup TC\n");
131 goto out;
132 }
133 }
134
135 err = mlx4_en_setup_tc(dev, tc);
136out:
137 mutex_unlock(&mdev->state_lock);
138 kfree(tmp);
139 return err;
140}
141
142static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
143 void *type_data)
144{
145 struct tc_mqprio_qopt *mqprio = type_data;
146
147 if (type != TC_SETUP_QDISC_MQPRIO)
148 return -EOPNOTSUPP;
149
150 if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
151 return -EINVAL;
152
153 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
154
155 return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
156}
157
158#ifdef CONFIG_RFS_ACCEL
159
160struct mlx4_en_filter {
161 struct list_head next;
162 struct work_struct work;
163
164 u8 ip_proto;
165 __be32 src_ip;
166 __be32 dst_ip;
167 __be16 src_port;
168 __be16 dst_port;
169
170 int rxq_index;
171 struct mlx4_en_priv *priv;
172 u32 flow_id;
173 int id;
174 u64 reg_id;
175 u8 activated;
176
177
178 struct hlist_node filter_chain;
179};
180
181static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
182
183static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
184{
185 switch (ip_proto) {
186 case IPPROTO_UDP:
187 return MLX4_NET_TRANS_RULE_ID_UDP;
188 case IPPROTO_TCP:
189 return MLX4_NET_TRANS_RULE_ID_TCP;
190 default:
191 return MLX4_NET_TRANS_RULE_NUM;
192 }
193};
194
195
196
197
198static void mlx4_en_filter_work(struct work_struct *work)
199{
200 struct mlx4_en_filter *filter = container_of(work,
201 struct mlx4_en_filter,
202 work);
203 struct mlx4_en_priv *priv = filter->priv;
204 struct mlx4_spec_list spec_tcp_udp = {
205 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
206 {
207 .tcp_udp = {
208 .dst_port = filter->dst_port,
209 .dst_port_msk = (__force __be16)-1,
210 .src_port = filter->src_port,
211 .src_port_msk = (__force __be16)-1,
212 },
213 },
214 };
215 struct mlx4_spec_list spec_ip = {
216 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
217 {
218 .ipv4 = {
219 .dst_ip = filter->dst_ip,
220 .dst_ip_msk = (__force __be32)-1,
221 .src_ip = filter->src_ip,
222 .src_ip_msk = (__force __be32)-1,
223 },
224 },
225 };
226 struct mlx4_spec_list spec_eth = {
227 .id = MLX4_NET_TRANS_RULE_ID_ETH,
228 };
229 struct mlx4_net_trans_rule rule = {
230 .list = LIST_HEAD_INIT(rule.list),
231 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
232 .exclusive = 1,
233 .allow_loopback = 1,
234 .promisc_mode = MLX4_FS_REGULAR,
235 .port = priv->port,
236 .priority = MLX4_DOMAIN_RFS,
237 };
238 int rc;
239 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
240
241 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
242 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
243 filter->ip_proto);
244 goto ignore;
245 }
246 list_add_tail(&spec_eth.list, &rule.list);
247 list_add_tail(&spec_ip.list, &rule.list);
248 list_add_tail(&spec_tcp_udp.list, &rule.list);
249
250 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
251 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
252 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
253
254 filter->activated = 0;
255
256 if (filter->reg_id) {
257 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
258 if (rc && rc != -ENOENT)
259 en_err(priv, "Error detaching flow. rc = %d\n", rc);
260 }
261
262 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
263 if (rc)
264 en_err(priv, "Error attaching flow. err = %d\n", rc);
265
266ignore:
267 mlx4_en_filter_rfs_expire(priv);
268
269 filter->activated = 1;
270}
271
272static inline struct hlist_head *
273filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
274 __be16 src_port, __be16 dst_port)
275{
276 unsigned long l;
277 int bucket_idx;
278
279 l = (__force unsigned long)src_port |
280 ((__force unsigned long)dst_port << 2);
281 l ^= (__force unsigned long)(src_ip ^ dst_ip);
282
283 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
284
285 return &priv->filter_hash[bucket_idx];
286}
287
288static struct mlx4_en_filter *
289mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
290 __be32 dst_ip, u8 ip_proto, __be16 src_port,
291 __be16 dst_port, u32 flow_id)
292{
293 struct mlx4_en_filter *filter = NULL;
294
295 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
296 if (!filter)
297 return NULL;
298
299 filter->priv = priv;
300 filter->rxq_index = rxq_index;
301 INIT_WORK(&filter->work, mlx4_en_filter_work);
302
303 filter->src_ip = src_ip;
304 filter->dst_ip = dst_ip;
305 filter->ip_proto = ip_proto;
306 filter->src_port = src_port;
307 filter->dst_port = dst_port;
308
309 filter->flow_id = flow_id;
310
311 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
312
313 list_add_tail(&filter->next, &priv->filters);
314 hlist_add_head(&filter->filter_chain,
315 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
316 dst_port));
317
318 return filter;
319}
320
321static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
322{
323 struct mlx4_en_priv *priv = filter->priv;
324 int rc;
325
326 list_del(&filter->next);
327
328 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
329 if (rc && rc != -ENOENT)
330 en_err(priv, "Error detaching flow. rc = %d\n", rc);
331
332 kfree(filter);
333}
334
335static inline struct mlx4_en_filter *
336mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
337 u8 ip_proto, __be16 src_port, __be16 dst_port)
338{
339 struct mlx4_en_filter *filter;
340 struct mlx4_en_filter *ret = NULL;
341
342 hlist_for_each_entry(filter,
343 filter_hash_bucket(priv, src_ip, dst_ip,
344 src_port, dst_port),
345 filter_chain) {
346 if (filter->src_ip == src_ip &&
347 filter->dst_ip == dst_ip &&
348 filter->ip_proto == ip_proto &&
349 filter->src_port == src_port &&
350 filter->dst_port == dst_port) {
351 ret = filter;
352 break;
353 }
354 }
355
356 return ret;
357}
358
359static int
360mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
361 u16 rxq_index, u32 flow_id)
362{
363 struct mlx4_en_priv *priv = netdev_priv(net_dev);
364 struct mlx4_en_filter *filter;
365 const struct iphdr *ip;
366 const __be16 *ports;
367 u8 ip_proto;
368 __be32 src_ip;
369 __be32 dst_ip;
370 __be16 src_port;
371 __be16 dst_port;
372 int nhoff = skb_network_offset(skb);
373 int ret = 0;
374
375 if (skb->encapsulation)
376 return -EPROTONOSUPPORT;
377
378 if (skb->protocol != htons(ETH_P_IP))
379 return -EPROTONOSUPPORT;
380
381 ip = (const struct iphdr *)(skb->data + nhoff);
382 if (ip_is_fragment(ip))
383 return -EPROTONOSUPPORT;
384
385 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
386 return -EPROTONOSUPPORT;
387 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
388
389 ip_proto = ip->protocol;
390 src_ip = ip->saddr;
391 dst_ip = ip->daddr;
392 src_port = ports[0];
393 dst_port = ports[1];
394
395 spin_lock_bh(&priv->filters_lock);
396 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
397 src_port, dst_port);
398 if (filter) {
399 if (filter->rxq_index == rxq_index)
400 goto out;
401
402 filter->rxq_index = rxq_index;
403 } else {
404 filter = mlx4_en_filter_alloc(priv, rxq_index,
405 src_ip, dst_ip, ip_proto,
406 src_port, dst_port, flow_id);
407 if (!filter) {
408 ret = -ENOMEM;
409 goto err;
410 }
411 }
412
413 queue_work(priv->mdev->workqueue, &filter->work);
414
415out:
416 ret = filter->id;
417err:
418 spin_unlock_bh(&priv->filters_lock);
419
420 return ret;
421}
422
423void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
424{
425 struct mlx4_en_filter *filter, *tmp;
426 LIST_HEAD(del_list);
427
428 spin_lock_bh(&priv->filters_lock);
429 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
430 list_move(&filter->next, &del_list);
431 hlist_del(&filter->filter_chain);
432 }
433 spin_unlock_bh(&priv->filters_lock);
434
435 list_for_each_entry_safe(filter, tmp, &del_list, next) {
436 cancel_work_sync(&filter->work);
437 mlx4_en_filter_free(filter);
438 }
439}
440
441static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
442{
443 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
444 LIST_HEAD(del_list);
445 int i = 0;
446
447 spin_lock_bh(&priv->filters_lock);
448 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
449 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
450 break;
451
452 if (filter->activated &&
453 !work_pending(&filter->work) &&
454 rps_may_expire_flow(priv->dev,
455 filter->rxq_index, filter->flow_id,
456 filter->id)) {
457 list_move(&filter->next, &del_list);
458 hlist_del(&filter->filter_chain);
459 } else
460 last_filter = filter;
461
462 i++;
463 }
464
465 if (last_filter && (&last_filter->next != priv->filters.next))
466 list_move(&priv->filters, &last_filter->next);
467
468 spin_unlock_bh(&priv->filters_lock);
469
470 list_for_each_entry_safe(filter, tmp, &del_list, next)
471 mlx4_en_filter_free(filter);
472}
473#endif
474
475static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
476 __be16 proto, u16 vid)
477{
478 struct mlx4_en_priv *priv = netdev_priv(dev);
479 struct mlx4_en_dev *mdev = priv->mdev;
480 int err;
481 int idx;
482
483 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
484
485 set_bit(vid, priv->active_vlans);
486
487
488 mutex_lock(&mdev->state_lock);
489 if (mdev->device_up && priv->port_up) {
490 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
491 if (err) {
492 en_err(priv, "Failed configuring VLAN filter\n");
493 goto out;
494 }
495 }
496 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
497 if (err)
498 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
499
500out:
501 mutex_unlock(&mdev->state_lock);
502 return err;
503}
504
505static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
506 __be16 proto, u16 vid)
507{
508 struct mlx4_en_priv *priv = netdev_priv(dev);
509 struct mlx4_en_dev *mdev = priv->mdev;
510 int err = 0;
511
512 en_dbg(HW, priv, "Killing VID:%d\n", vid);
513
514 clear_bit(vid, priv->active_vlans);
515
516
517 mutex_lock(&mdev->state_lock);
518 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
519
520 if (mdev->device_up && priv->port_up) {
521 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
522 if (err)
523 en_err(priv, "Failed configuring VLAN filter\n");
524 }
525 mutex_unlock(&mdev->state_lock);
526
527 return err;
528}
529
530static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
531{
532 int i;
533 for (i = ETH_ALEN - 1; i >= 0; --i) {
534 dst_mac[i] = src_mac & 0xff;
535 src_mac >>= 8;
536 }
537 memset(&dst_mac[ETH_ALEN], 0, 2);
538}
539
540
541static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
542 int qpn, u64 *reg_id)
543{
544 int err;
545
546 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
547 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
548 return 0;
549
550 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
551 MLX4_DOMAIN_NIC, reg_id);
552 if (err) {
553 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
554 return err;
555 }
556 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
557 return 0;
558}
559
560
561static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
562 unsigned char *mac, int *qpn, u64 *reg_id)
563{
564 struct mlx4_en_dev *mdev = priv->mdev;
565 struct mlx4_dev *dev = mdev->dev;
566 int err;
567
568 switch (dev->caps.steering_mode) {
569 case MLX4_STEERING_MODE_B0: {
570 struct mlx4_qp qp;
571 u8 gid[16] = {0};
572
573 qp.qpn = *qpn;
574 memcpy(&gid[10], mac, ETH_ALEN);
575 gid[5] = priv->port;
576
577 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
578 break;
579 }
580 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
581 struct mlx4_spec_list spec_eth = { {NULL} };
582 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
583
584 struct mlx4_net_trans_rule rule = {
585 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
586 .exclusive = 0,
587 .allow_loopback = 1,
588 .promisc_mode = MLX4_FS_REGULAR,
589 .priority = MLX4_DOMAIN_NIC,
590 };
591
592 rule.port = priv->port;
593 rule.qpn = *qpn;
594 INIT_LIST_HEAD(&rule.list);
595
596 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
597 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
598 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
599 list_add_tail(&spec_eth.list, &rule.list);
600
601 err = mlx4_flow_attach(dev, &rule, reg_id);
602 break;
603 }
604 default:
605 return -EINVAL;
606 }
607 if (err)
608 en_warn(priv, "Failed Attaching Unicast\n");
609
610 return err;
611}
612
613static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
614 unsigned char *mac, int qpn, u64 reg_id)
615{
616 struct mlx4_en_dev *mdev = priv->mdev;
617 struct mlx4_dev *dev = mdev->dev;
618
619 switch (dev->caps.steering_mode) {
620 case MLX4_STEERING_MODE_B0: {
621 struct mlx4_qp qp;
622 u8 gid[16] = {0};
623
624 qp.qpn = qpn;
625 memcpy(&gid[10], mac, ETH_ALEN);
626 gid[5] = priv->port;
627
628 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
629 break;
630 }
631 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
632 mlx4_flow_detach(dev, reg_id);
633 break;
634 }
635 default:
636 en_err(priv, "Invalid steering mode.\n");
637 }
638}
639
640static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
641{
642 struct mlx4_en_dev *mdev = priv->mdev;
643 struct mlx4_dev *dev = mdev->dev;
644 int index = 0;
645 int err = 0;
646 int *qpn = &priv->base_qpn;
647 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
648
649 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
650 priv->dev->dev_addr);
651 index = mlx4_register_mac(dev, priv->port, mac);
652 if (index < 0) {
653 err = index;
654 en_err(priv, "Failed adding MAC: %pM\n",
655 priv->dev->dev_addr);
656 return err;
657 }
658
659 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
660
661 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
662 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
663 *qpn = base_qpn + index;
664 return 0;
665 }
666
667 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
668 MLX4_RES_USAGE_DRIVER);
669 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
670 if (err) {
671 en_err(priv, "Failed to reserve qp for mac registration\n");
672 mlx4_unregister_mac(dev, priv->port, mac);
673 return err;
674 }
675
676 return 0;
677}
678
679static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
680{
681 struct mlx4_en_dev *mdev = priv->mdev;
682 struct mlx4_dev *dev = mdev->dev;
683 int qpn = priv->base_qpn;
684
685 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
686 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
687 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
688 priv->dev->dev_addr);
689 mlx4_unregister_mac(dev, priv->port, mac);
690 } else {
691 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
692 priv->port, qpn);
693 mlx4_qp_release_range(dev, qpn, 1);
694 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
695 }
696}
697
698static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
699 unsigned char *new_mac, unsigned char *prev_mac)
700{
701 struct mlx4_en_dev *mdev = priv->mdev;
702 struct mlx4_dev *dev = mdev->dev;
703 int err = 0;
704 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
705
706 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
707 struct hlist_head *bucket;
708 unsigned int mac_hash;
709 struct mlx4_mac_entry *entry;
710 struct hlist_node *tmp;
711 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
712
713 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
714 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
715 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
716 mlx4_en_uc_steer_release(priv, entry->mac,
717 qpn, entry->reg_id);
718 mlx4_unregister_mac(dev, priv->port,
719 prev_mac_u64);
720 hlist_del_rcu(&entry->hlist);
721 synchronize_rcu();
722 memcpy(entry->mac, new_mac, ETH_ALEN);
723 entry->reg_id = 0;
724 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
725 hlist_add_head_rcu(&entry->hlist,
726 &priv->mac_hash[mac_hash]);
727 mlx4_register_mac(dev, priv->port, new_mac_u64);
728 err = mlx4_en_uc_steer_add(priv, new_mac,
729 &qpn,
730 &entry->reg_id);
731 if (err)
732 return err;
733 if (priv->tunnel_reg_id) {
734 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
735 priv->tunnel_reg_id = 0;
736 }
737 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
738 &priv->tunnel_reg_id);
739 return err;
740 }
741 }
742 return -EINVAL;
743 }
744
745 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
746}
747
748static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv,
749 unsigned char new_mac[ETH_ALEN + 2])
750{
751 struct mlx4_en_dev *mdev = priv->mdev;
752 int err;
753
754 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN))
755 return;
756
757 err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac);
758 if (err)
759 en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n",
760 new_mac, priv->port, err);
761}
762
763static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
764 unsigned char new_mac[ETH_ALEN + 2])
765{
766 int err = 0;
767
768 if (priv->port_up) {
769
770 err = mlx4_en_replace_mac(priv, priv->base_qpn,
771 new_mac, priv->current_mac);
772 if (err)
773 en_err(priv, "Failed changing HW MAC address\n");
774 } else
775 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
776
777 if (!err)
778 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
779
780 return err;
781}
782
783static int mlx4_en_set_mac(struct net_device *dev, void *addr)
784{
785 struct mlx4_en_priv *priv = netdev_priv(dev);
786 struct mlx4_en_dev *mdev = priv->mdev;
787 struct sockaddr *saddr = addr;
788 unsigned char new_mac[ETH_ALEN + 2];
789 int err;
790
791 if (!is_valid_ether_addr(saddr->sa_data))
792 return -EADDRNOTAVAIL;
793
794 mutex_lock(&mdev->state_lock);
795 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
796 err = mlx4_en_do_set_mac(priv, new_mac);
797 if (err)
798 goto out;
799
800 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
801 mlx4_en_update_user_mac(priv, new_mac);
802out:
803 mutex_unlock(&mdev->state_lock);
804
805 return err;
806}
807
808static void mlx4_en_clear_list(struct net_device *dev)
809{
810 struct mlx4_en_priv *priv = netdev_priv(dev);
811 struct mlx4_en_mc_list *tmp, *mc_to_del;
812
813 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
814 list_del(&mc_to_del->list);
815 kfree(mc_to_del);
816 }
817}
818
819static void mlx4_en_cache_mclist(struct net_device *dev)
820{
821 struct mlx4_en_priv *priv = netdev_priv(dev);
822 struct netdev_hw_addr *ha;
823 struct mlx4_en_mc_list *tmp;
824
825 mlx4_en_clear_list(dev);
826 netdev_for_each_mc_addr(ha, dev) {
827 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
828 if (!tmp) {
829 mlx4_en_clear_list(dev);
830 return;
831 }
832 memcpy(tmp->addr, ha->addr, ETH_ALEN);
833 list_add_tail(&tmp->list, &priv->mc_list);
834 }
835}
836
837static void update_mclist_flags(struct mlx4_en_priv *priv,
838 struct list_head *dst,
839 struct list_head *src)
840{
841 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
842 bool found;
843
844
845
846
847 list_for_each_entry(dst_tmp, dst, list) {
848 found = false;
849 list_for_each_entry(src_tmp, src, list) {
850 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
851 found = true;
852 break;
853 }
854 }
855 if (!found)
856 dst_tmp->action = MCLIST_REM;
857 }
858
859
860
861
862 list_for_each_entry(src_tmp, src, list) {
863 found = false;
864 list_for_each_entry(dst_tmp, dst, list) {
865 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
866 dst_tmp->action = MCLIST_NONE;
867 found = true;
868 break;
869 }
870 }
871 if (!found) {
872 new_mc = kmemdup(src_tmp,
873 sizeof(struct mlx4_en_mc_list),
874 GFP_KERNEL);
875 if (!new_mc)
876 return;
877
878 new_mc->action = MCLIST_ADD;
879 list_add_tail(&new_mc->list, dst);
880 }
881 }
882}
883
884static void mlx4_en_set_rx_mode(struct net_device *dev)
885{
886 struct mlx4_en_priv *priv = netdev_priv(dev);
887
888 if (!priv->port_up)
889 return;
890
891 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
892}
893
894static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
895 struct mlx4_en_dev *mdev)
896{
897 int err = 0;
898
899 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
900 if (netif_msg_rx_status(priv))
901 en_warn(priv, "Entering promiscuous mode\n");
902 priv->flags |= MLX4_EN_FLAG_PROMISC;
903
904
905 switch (mdev->dev->caps.steering_mode) {
906 case MLX4_STEERING_MODE_DEVICE_MANAGED:
907 err = mlx4_flow_steer_promisc_add(mdev->dev,
908 priv->port,
909 priv->base_qpn,
910 MLX4_FS_ALL_DEFAULT);
911 if (err)
912 en_err(priv, "Failed enabling promiscuous mode\n");
913 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
914 break;
915
916 case MLX4_STEERING_MODE_B0:
917 err = mlx4_unicast_promisc_add(mdev->dev,
918 priv->base_qpn,
919 priv->port);
920 if (err)
921 en_err(priv, "Failed enabling unicast promiscuous mode\n");
922
923
924
925
926 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
927 err = mlx4_multicast_promisc_add(mdev->dev,
928 priv->base_qpn,
929 priv->port);
930 if (err)
931 en_err(priv, "Failed enabling multicast promiscuous mode\n");
932 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
933 }
934 break;
935
936 case MLX4_STEERING_MODE_A0:
937 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
938 priv->port,
939 priv->base_qpn,
940 1);
941 if (err)
942 en_err(priv, "Failed enabling promiscuous mode\n");
943 break;
944 }
945
946
947 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
948 0, MLX4_MCAST_DISABLE);
949 if (err)
950 en_err(priv, "Failed disabling multicast filter\n");
951 }
952}
953
954static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
955 struct mlx4_en_dev *mdev)
956{
957 int err = 0;
958
959 if (netif_msg_rx_status(priv))
960 en_warn(priv, "Leaving promiscuous mode\n");
961 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
962
963
964 switch (mdev->dev->caps.steering_mode) {
965 case MLX4_STEERING_MODE_DEVICE_MANAGED:
966 err = mlx4_flow_steer_promisc_remove(mdev->dev,
967 priv->port,
968 MLX4_FS_ALL_DEFAULT);
969 if (err)
970 en_err(priv, "Failed disabling promiscuous mode\n");
971 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
972 break;
973
974 case MLX4_STEERING_MODE_B0:
975 err = mlx4_unicast_promisc_remove(mdev->dev,
976 priv->base_qpn,
977 priv->port);
978 if (err)
979 en_err(priv, "Failed disabling unicast promiscuous mode\n");
980
981 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
982 err = mlx4_multicast_promisc_remove(mdev->dev,
983 priv->base_qpn,
984 priv->port);
985 if (err)
986 en_err(priv, "Failed disabling multicast promiscuous mode\n");
987 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
988 }
989 break;
990
991 case MLX4_STEERING_MODE_A0:
992 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
993 priv->port,
994 priv->base_qpn, 0);
995 if (err)
996 en_err(priv, "Failed disabling promiscuous mode\n");
997 break;
998 }
999}
1000
1001static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1002 struct net_device *dev,
1003 struct mlx4_en_dev *mdev)
1004{
1005 struct mlx4_en_mc_list *mclist, *tmp;
1006 u64 mcast_addr = 0;
1007 u8 mc_list[16] = {0};
1008 int err = 0;
1009
1010
1011 if (dev->flags & IFF_ALLMULTI) {
1012 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1013 0, MLX4_MCAST_DISABLE);
1014 if (err)
1015 en_err(priv, "Failed disabling multicast filter\n");
1016
1017
1018 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
1019 switch (mdev->dev->caps.steering_mode) {
1020 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1021 err = mlx4_flow_steer_promisc_add(mdev->dev,
1022 priv->port,
1023 priv->base_qpn,
1024 MLX4_FS_MC_DEFAULT);
1025 break;
1026
1027 case MLX4_STEERING_MODE_B0:
1028 err = mlx4_multicast_promisc_add(mdev->dev,
1029 priv->base_qpn,
1030 priv->port);
1031 break;
1032
1033 case MLX4_STEERING_MODE_A0:
1034 break;
1035 }
1036 if (err)
1037 en_err(priv, "Failed entering multicast promisc mode\n");
1038 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1039 }
1040 } else {
1041
1042 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1043 switch (mdev->dev->caps.steering_mode) {
1044 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1045 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1046 priv->port,
1047 MLX4_FS_MC_DEFAULT);
1048 break;
1049
1050 case MLX4_STEERING_MODE_B0:
1051 err = mlx4_multicast_promisc_remove(mdev->dev,
1052 priv->base_qpn,
1053 priv->port);
1054 break;
1055
1056 case MLX4_STEERING_MODE_A0:
1057 break;
1058 }
1059 if (err)
1060 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1061 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1062 }
1063
1064 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1065 0, MLX4_MCAST_DISABLE);
1066 if (err)
1067 en_err(priv, "Failed disabling multicast filter\n");
1068
1069
1070 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1071 1, MLX4_MCAST_CONFIG);
1072
1073
1074
1075 netif_addr_lock_bh(dev);
1076 mlx4_en_cache_mclist(dev);
1077 netif_addr_unlock_bh(dev);
1078 list_for_each_entry(mclist, &priv->mc_list, list) {
1079 mcast_addr = mlx4_mac_to_u64(mclist->addr);
1080 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1081 mcast_addr, 0, MLX4_MCAST_CONFIG);
1082 }
1083 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1084 0, MLX4_MCAST_ENABLE);
1085 if (err)
1086 en_err(priv, "Failed enabling multicast filter\n");
1087
1088 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1089 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1090 if (mclist->action == MCLIST_REM) {
1091
1092 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1093 mc_list[5] = priv->port;
1094 err = mlx4_multicast_detach(mdev->dev,
1095 priv->rss_map.indir_qp,
1096 mc_list,
1097 MLX4_PROT_ETH,
1098 mclist->reg_id);
1099 if (err)
1100 en_err(priv, "Fail to detach multicast address\n");
1101
1102 if (mclist->tunnel_reg_id) {
1103 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1104 if (err)
1105 en_err(priv, "Failed to detach multicast address\n");
1106 }
1107
1108
1109 list_del(&mclist->list);
1110 kfree(mclist);
1111 } else if (mclist->action == MCLIST_ADD) {
1112
1113 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1114
1115 mc_list[5] = priv->port;
1116 err = mlx4_multicast_attach(mdev->dev,
1117 priv->rss_map.indir_qp,
1118 mc_list,
1119 priv->port, 0,
1120 MLX4_PROT_ETH,
1121 &mclist->reg_id);
1122 if (err)
1123 en_err(priv, "Fail to attach multicast address\n");
1124
1125 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1126 &mclist->tunnel_reg_id);
1127 if (err)
1128 en_err(priv, "Failed to attach multicast address\n");
1129 }
1130 }
1131 }
1132}
1133
1134static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1135 struct net_device *dev,
1136 struct mlx4_en_dev *mdev)
1137{
1138 struct netdev_hw_addr *ha;
1139 struct mlx4_mac_entry *entry;
1140 struct hlist_node *tmp;
1141 bool found;
1142 u64 mac;
1143 int err = 0;
1144 struct hlist_head *bucket;
1145 unsigned int i;
1146 int removed = 0;
1147 u32 prev_flags;
1148
1149
1150
1151
1152
1153
1154 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1155 bucket = &priv->mac_hash[i];
1156 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1157 found = false;
1158 netdev_for_each_uc_addr(ha, dev) {
1159 if (ether_addr_equal_64bits(entry->mac,
1160 ha->addr)) {
1161 found = true;
1162 break;
1163 }
1164 }
1165
1166
1167 if (ether_addr_equal_64bits(entry->mac,
1168 priv->current_mac))
1169 found = true;
1170
1171 if (!found) {
1172 mac = mlx4_mac_to_u64(entry->mac);
1173 mlx4_en_uc_steer_release(priv, entry->mac,
1174 priv->base_qpn,
1175 entry->reg_id);
1176 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1177
1178 hlist_del_rcu(&entry->hlist);
1179 kfree_rcu(entry, rcu);
1180 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1181 entry->mac, priv->port);
1182 ++removed;
1183 }
1184 }
1185 }
1186
1187
1188
1189
1190 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1191 return;
1192
1193 prev_flags = priv->flags;
1194 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1195
1196
1197 netdev_for_each_uc_addr(ha, dev) {
1198 found = false;
1199 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1200 hlist_for_each_entry(entry, bucket, hlist) {
1201 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1202 found = true;
1203 break;
1204 }
1205 }
1206
1207 if (!found) {
1208 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1209 if (!entry) {
1210 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1211 ha->addr, priv->port);
1212 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1213 break;
1214 }
1215 mac = mlx4_mac_to_u64(ha->addr);
1216 memcpy(entry->mac, ha->addr, ETH_ALEN);
1217 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1218 if (err < 0) {
1219 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1220 ha->addr, priv->port, err);
1221 kfree(entry);
1222 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1223 break;
1224 }
1225 err = mlx4_en_uc_steer_add(priv, ha->addr,
1226 &priv->base_qpn,
1227 &entry->reg_id);
1228 if (err) {
1229 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1230 ha->addr, priv->port, err);
1231 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1232 kfree(entry);
1233 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1234 break;
1235 } else {
1236 unsigned int mac_hash;
1237 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1238 ha->addr, priv->port);
1239 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1240 bucket = &priv->mac_hash[mac_hash];
1241 hlist_add_head_rcu(&entry->hlist, bucket);
1242 }
1243 }
1244 }
1245
1246 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1247 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1248 priv->port);
1249 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1250 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1251 priv->port);
1252 }
1253}
1254
1255static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1256{
1257 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1258 rx_mode_task);
1259 struct mlx4_en_dev *mdev = priv->mdev;
1260 struct net_device *dev = priv->dev;
1261
1262 mutex_lock(&mdev->state_lock);
1263 if (!mdev->device_up) {
1264 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1265 goto out;
1266 }
1267 if (!priv->port_up) {
1268 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1269 goto out;
1270 }
1271
1272 if (!netif_carrier_ok(dev)) {
1273 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1274 if (priv->port_state.link_state) {
1275 netif_carrier_on(dev);
1276 en_dbg(LINK, priv, "Link Up\n");
1277 }
1278 }
1279 }
1280
1281 if (dev->priv_flags & IFF_UNICAST_FLT)
1282 mlx4_en_do_uc_filter(priv, dev, mdev);
1283
1284
1285 if ((dev->flags & IFF_PROMISC) ||
1286 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1287 mlx4_en_set_promisc_mode(priv, mdev);
1288 goto out;
1289 }
1290
1291
1292 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1293 mlx4_en_clear_promisc_mode(priv, mdev);
1294
1295 mlx4_en_do_multicast(priv, dev, mdev);
1296out:
1297 mutex_unlock(&mdev->state_lock);
1298}
1299
1300static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1301{
1302 u64 reg_id;
1303 int err = 0;
1304 int *qpn = &priv->base_qpn;
1305 struct mlx4_mac_entry *entry;
1306
1307 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
1308 if (err)
1309 return err;
1310
1311 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1312 &priv->tunnel_reg_id);
1313 if (err)
1314 goto tunnel_err;
1315
1316 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1317 if (!entry) {
1318 err = -ENOMEM;
1319 goto alloc_err;
1320 }
1321
1322 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1323 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1324 entry->reg_id = reg_id;
1325 hlist_add_head_rcu(&entry->hlist,
1326 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1327
1328 return 0;
1329
1330alloc_err:
1331 if (priv->tunnel_reg_id)
1332 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1333
1334tunnel_err:
1335 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1336 return err;
1337}
1338
1339static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1340{
1341 u64 mac;
1342 unsigned int i;
1343 int qpn = priv->base_qpn;
1344 struct hlist_head *bucket;
1345 struct hlist_node *tmp;
1346 struct mlx4_mac_entry *entry;
1347
1348 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1349 bucket = &priv->mac_hash[i];
1350 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1351 mac = mlx4_mac_to_u64(entry->mac);
1352 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1353 entry->mac);
1354 mlx4_en_uc_steer_release(priv, entry->mac,
1355 qpn, entry->reg_id);
1356
1357 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1358 hlist_del_rcu(&entry->hlist);
1359 kfree_rcu(entry, rcu);
1360 }
1361 }
1362
1363 if (priv->tunnel_reg_id) {
1364 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1365 priv->tunnel_reg_id = 0;
1366 }
1367}
1368
1369static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
1370{
1371 struct mlx4_en_priv *priv = netdev_priv(dev);
1372 struct mlx4_en_dev *mdev = priv->mdev;
1373 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue];
1374
1375 if (netif_msg_timer(priv))
1376 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1377
1378 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1379 txqueue, tx_ring->qpn, tx_ring->sp_cqn,
1380 tx_ring->cons, tx_ring->prod);
1381
1382 priv->port_stats.tx_timeout++;
1383 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
1384 en_dbg(DRV, priv, "Scheduling port restart\n");
1385 queue_work(mdev->workqueue, &priv->restart_task);
1386 }
1387}
1388
1389
1390static void
1391mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1392{
1393 struct mlx4_en_priv *priv = netdev_priv(dev);
1394
1395 spin_lock_bh(&priv->stats_lock);
1396 mlx4_en_fold_software_stats(dev);
1397 netdev_stats_to_stats64(stats, &dev->stats);
1398 spin_unlock_bh(&priv->stats_lock);
1399}
1400
1401static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1402{
1403 struct mlx4_en_cq *cq;
1404 int i, t;
1405
1406
1407
1408
1409
1410
1411
1412 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1413 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1414 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1415 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1416 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1417 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1418
1419
1420 for (i = 0; i < priv->rx_ring_num; i++) {
1421 cq = priv->rx_cq[i];
1422 cq->moder_cnt = priv->rx_frames;
1423 cq->moder_time = priv->rx_usecs;
1424 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1425 priv->last_moder_packets[i] = 0;
1426 priv->last_moder_bytes[i] = 0;
1427 }
1428
1429 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1430 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1431 cq = priv->tx_cq[t][i];
1432 cq->moder_cnt = priv->tx_frames;
1433 cq->moder_time = priv->tx_usecs;
1434 }
1435 }
1436
1437
1438 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1439 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1440 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1441 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1442 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1443 priv->adaptive_rx_coal = 1;
1444 priv->last_moder_jiffies = 0;
1445 priv->last_moder_tx_packets = 0;
1446}
1447
1448static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1449{
1450 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1451 u32 pkt_rate_high, pkt_rate_low;
1452 struct mlx4_en_cq *cq;
1453 unsigned long packets;
1454 unsigned long rate;
1455 unsigned long avg_pkt_size;
1456 unsigned long rx_packets;
1457 unsigned long rx_bytes;
1458 unsigned long rx_pkt_diff;
1459 int moder_time;
1460 int ring, err;
1461
1462 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1463 return;
1464
1465 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1466 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1467
1468 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1469 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1470 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
1471
1472 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
1473 packets = rx_pkt_diff;
1474 rate = packets * HZ / period;
1475 avg_pkt_size = packets ? (rx_bytes -
1476 priv->last_moder_bytes[ring]) / packets : 0;
1477
1478
1479
1480 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1481 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1482 if (rate <= pkt_rate_low)
1483 moder_time = priv->rx_usecs_low;
1484 else if (rate >= pkt_rate_high)
1485 moder_time = priv->rx_usecs_high;
1486 else
1487 moder_time = (rate - pkt_rate_low) *
1488 (priv->rx_usecs_high - priv->rx_usecs_low) /
1489 (pkt_rate_high - pkt_rate_low) +
1490 priv->rx_usecs_low;
1491 } else {
1492 moder_time = priv->rx_usecs_low;
1493 }
1494
1495 cq = priv->rx_cq[ring];
1496 if (moder_time != priv->last_moder_time[ring] ||
1497 cq->moder_cnt != priv->rx_frames) {
1498 priv->last_moder_time[ring] = moder_time;
1499 cq->moder_time = moder_time;
1500 cq->moder_cnt = priv->rx_frames;
1501 err = mlx4_en_set_cq_moder(priv, cq);
1502 if (err)
1503 en_err(priv, "Failed modifying moderation for cq:%d\n",
1504 ring);
1505 }
1506 priv->last_moder_packets[ring] = rx_packets;
1507 priv->last_moder_bytes[ring] = rx_bytes;
1508 }
1509
1510 priv->last_moder_jiffies = jiffies;
1511}
1512
1513static void mlx4_en_do_get_stats(struct work_struct *work)
1514{
1515 struct delayed_work *delay = to_delayed_work(work);
1516 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1517 stats_task);
1518 struct mlx4_en_dev *mdev = priv->mdev;
1519 int err;
1520
1521 mutex_lock(&mdev->state_lock);
1522 if (mdev->device_up) {
1523 if (priv->port_up) {
1524 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1525 if (err)
1526 en_dbg(HW, priv, "Could not update stats\n");
1527
1528 mlx4_en_auto_moderation(priv);
1529 }
1530
1531 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1532 }
1533 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1534 mlx4_en_do_set_mac(priv, priv->current_mac);
1535 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1536 }
1537 mutex_unlock(&mdev->state_lock);
1538}
1539
1540
1541
1542
1543static void mlx4_en_service_task(struct work_struct *work)
1544{
1545 struct delayed_work *delay = to_delayed_work(work);
1546 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1547 service_task);
1548 struct mlx4_en_dev *mdev = priv->mdev;
1549
1550 mutex_lock(&mdev->state_lock);
1551 if (mdev->device_up) {
1552 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1553 mlx4_en_ptp_overflow_check(mdev);
1554
1555 mlx4_en_recover_from_oom(priv);
1556 queue_delayed_work(mdev->workqueue, &priv->service_task,
1557 SERVICE_TASK_DELAY);
1558 }
1559 mutex_unlock(&mdev->state_lock);
1560}
1561
1562static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
1563{
1564 struct mlx4_en_port_state *port_state = &priv->port_state;
1565 struct mlx4_en_dev *mdev = priv->mdev;
1566 struct net_device *dev = priv->dev;
1567 bool up;
1568
1569 if (mlx4_en_QUERY_PORT(mdev, priv->port))
1570 port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
1571
1572 up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
1573 if (up == netif_carrier_ok(dev))
1574 netif_carrier_event(dev);
1575 if (!up) {
1576 en_info(priv, "Link Down\n");
1577 netif_carrier_off(dev);
1578 } else {
1579 en_info(priv, "Link Up\n");
1580 netif_carrier_on(dev);
1581 }
1582}
1583
1584static void mlx4_en_linkstate_work(struct work_struct *work)
1585{
1586 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1587 linkstate_task);
1588 struct mlx4_en_dev *mdev = priv->mdev;
1589
1590 mutex_lock(&mdev->state_lock);
1591 mlx4_en_linkstate(priv);
1592 mutex_unlock(&mdev->state_lock);
1593}
1594
1595static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1596{
1597 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1598 int numa_node = priv->mdev->dev->numa_node;
1599
1600 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1601 return -ENOMEM;
1602
1603 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1604 ring->affinity_mask);
1605 return 0;
1606}
1607
1608static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1609{
1610 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1611}
1612
1613static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1614 int tx_ring_idx)
1615{
1616 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1617 int rr_index = tx_ring_idx;
1618
1619 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1620 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1621 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1622 TX_XDP, tx_ring_idx, rr_index);
1623}
1624
1625int mlx4_en_start_port(struct net_device *dev)
1626{
1627 struct mlx4_en_priv *priv = netdev_priv(dev);
1628 struct mlx4_en_dev *mdev = priv->mdev;
1629 struct mlx4_en_cq *cq;
1630 struct mlx4_en_tx_ring *tx_ring;
1631 int rx_index = 0;
1632 int err = 0;
1633 int i, t;
1634 int j;
1635 u8 mc_list[16] = {0};
1636
1637 if (priv->port_up) {
1638 en_dbg(DRV, priv, "start port called while port already up\n");
1639 return 0;
1640 }
1641
1642 INIT_LIST_HEAD(&priv->mc_list);
1643 INIT_LIST_HEAD(&priv->curr_list);
1644 INIT_LIST_HEAD(&priv->ethtool_list);
1645 memset(&priv->ethtool_rules[0], 0,
1646 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1647
1648
1649 dev->mtu = min(dev->mtu, priv->max_mtu);
1650 mlx4_en_calc_rx_buf(dev);
1651 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1652
1653
1654 err = mlx4_en_activate_rx_rings(priv);
1655 if (err) {
1656 en_err(priv, "Failed to activate RX rings\n");
1657 return err;
1658 }
1659 for (i = 0; i < priv->rx_ring_num; i++) {
1660 cq = priv->rx_cq[i];
1661
1662 err = mlx4_en_init_affinity_hint(priv, i);
1663 if (err) {
1664 en_err(priv, "Failed preparing IRQ affinity hint\n");
1665 goto cq_err;
1666 }
1667
1668 err = mlx4_en_activate_cq(priv, cq, i);
1669 if (err) {
1670 en_err(priv, "Failed activating Rx CQ\n");
1671 mlx4_en_free_affinity_hint(priv, i);
1672 goto cq_err;
1673 }
1674
1675 for (j = 0; j < cq->size; j++) {
1676 struct mlx4_cqe *cqe = NULL;
1677
1678 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1679 priv->cqe_factor;
1680 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1681 }
1682
1683 err = mlx4_en_set_cq_moder(priv, cq);
1684 if (err) {
1685 en_err(priv, "Failed setting cq moderation parameters\n");
1686 mlx4_en_deactivate_cq(priv, cq);
1687 mlx4_en_free_affinity_hint(priv, i);
1688 goto cq_err;
1689 }
1690 mlx4_en_arm_cq(priv, cq);
1691 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1692 ++rx_index;
1693 }
1694
1695
1696 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1697 err = mlx4_en_get_qp(priv);
1698 if (err) {
1699 en_err(priv, "Failed getting eth qp\n");
1700 goto cq_err;
1701 }
1702 mdev->mac_removed[priv->port] = 0;
1703
1704 priv->counter_index =
1705 mlx4_get_default_counter_index(mdev->dev, priv->port);
1706
1707 err = mlx4_en_config_rss_steer(priv);
1708 if (err) {
1709 en_err(priv, "Failed configuring rss steering\n");
1710 goto mac_err;
1711 }
1712
1713 err = mlx4_en_create_drop_qp(priv);
1714 if (err)
1715 goto rss_err;
1716
1717
1718 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1719 u8 num_tx_rings_p_up = t == TX ?
1720 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
1721
1722 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1723
1724 cq = priv->tx_cq[t][i];
1725 err = mlx4_en_activate_cq(priv, cq, i);
1726 if (err) {
1727 en_err(priv, "Failed allocating Tx CQ\n");
1728 goto tx_err;
1729 }
1730 err = mlx4_en_set_cq_moder(priv, cq);
1731 if (err) {
1732 en_err(priv, "Failed setting cq moderation parameters\n");
1733 mlx4_en_deactivate_cq(priv, cq);
1734 goto tx_err;
1735 }
1736 en_dbg(DRV, priv,
1737 "Resetting index of collapsed CQ:%d to -1\n", i);
1738 cq->buf->wqe_index = cpu_to_be16(0xffff);
1739
1740
1741 tx_ring = priv->tx_ring[t][i];
1742 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1743 cq->mcq.cqn,
1744 i / num_tx_rings_p_up);
1745 if (err) {
1746 en_err(priv, "Failed allocating Tx ring\n");
1747 mlx4_en_deactivate_cq(priv, cq);
1748 goto tx_err;
1749 }
1750 clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
1751 if (t != TX_XDP) {
1752 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1753 tx_ring->recycle_ring = NULL;
1754
1755
1756 mlx4_en_arm_cq(priv, cq);
1757
1758 } else {
1759 mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
1760 mlx4_en_init_recycle_ring(priv, i);
1761
1762 }
1763
1764
1765 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1766 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
1767 }
1768 }
1769
1770
1771 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1772 priv->rx_skb_size + ETH_FCS_LEN,
1773 priv->prof->tx_pause,
1774 priv->prof->tx_ppp,
1775 priv->prof->rx_pause,
1776 priv->prof->rx_ppp);
1777 if (err) {
1778 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1779 priv->port, err);
1780 goto tx_err;
1781 }
1782
1783 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1784 if (err) {
1785 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1786 dev->mtu, priv->port, err);
1787 goto tx_err;
1788 }
1789
1790
1791 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1792 if (err) {
1793 en_err(priv, "Failed setting default qp numbers\n");
1794 goto tx_err;
1795 }
1796
1797 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1798 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1799 if (err) {
1800 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1801 err);
1802 goto tx_err;
1803 }
1804 }
1805
1806
1807 en_dbg(HW, priv, "Initializing port\n");
1808 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1809 if (err) {
1810 en_err(priv, "Failed Initializing port\n");
1811 goto tx_err;
1812 }
1813
1814
1815 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1816 mlx4_en_set_rss_steer_rules(priv))
1817 mlx4_warn(mdev, "Failed setting steering rules\n");
1818
1819
1820 eth_broadcast_addr(&mc_list[10]);
1821 mc_list[5] = priv->port;
1822 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1823 priv->port, 0, MLX4_PROT_ETH,
1824 &priv->broadcast_id))
1825 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1826
1827
1828 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1829
1830
1831 queue_work(mdev->workqueue, &priv->rx_mode_task);
1832
1833 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1834 udp_tunnel_nic_reset_ntf(dev);
1835
1836 priv->port_up = true;
1837
1838
1839
1840
1841 for (i = 0; i < priv->rx_ring_num; i++) {
1842 local_bh_disable();
1843 napi_schedule(&priv->rx_cq[i]->napi);
1844 local_bh_enable();
1845 }
1846
1847 clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
1848 netif_tx_start_all_queues(dev);
1849 netif_device_attach(dev);
1850
1851 return 0;
1852
1853tx_err:
1854 if (t == MLX4_EN_NUM_TX_TYPES) {
1855 t--;
1856 i = priv->tx_ring_num[t];
1857 }
1858 while (t >= 0) {
1859 while (i--) {
1860 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1861 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1862 }
1863 if (!t--)
1864 break;
1865 i = priv->tx_ring_num[t];
1866 }
1867 mlx4_en_destroy_drop_qp(priv);
1868rss_err:
1869 mlx4_en_release_rss_steer(priv);
1870mac_err:
1871 mlx4_en_put_qp(priv);
1872cq_err:
1873 while (rx_index--) {
1874 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1875 mlx4_en_free_affinity_hint(priv, rx_index);
1876 }
1877 for (i = 0; i < priv->rx_ring_num; i++)
1878 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1879
1880 return err;
1881}
1882
1883
1884void mlx4_en_stop_port(struct net_device *dev, int detach)
1885{
1886 struct mlx4_en_priv *priv = netdev_priv(dev);
1887 struct mlx4_en_dev *mdev = priv->mdev;
1888 struct mlx4_en_mc_list *mclist, *tmp;
1889 struct ethtool_flow_id *flow, *tmp_flow;
1890 int i, t;
1891 u8 mc_list[16] = {0};
1892
1893 if (!priv->port_up) {
1894 en_dbg(DRV, priv, "stop port called while port already down\n");
1895 return;
1896 }
1897
1898
1899 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1900
1901
1902 netif_tx_lock_bh(dev);
1903 if (detach)
1904 netif_device_detach(dev);
1905 netif_tx_stop_all_queues(dev);
1906 netif_tx_unlock_bh(dev);
1907
1908 netif_tx_disable(dev);
1909
1910 spin_lock_bh(&priv->stats_lock);
1911 mlx4_en_fold_software_stats(dev);
1912
1913 priv->port_up = false;
1914 spin_unlock_bh(&priv->stats_lock);
1915
1916 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1917
1918
1919 if (mdev->dev->caps.steering_mode ==
1920 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1921 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1922 MLX4_EN_FLAG_MC_PROMISC);
1923 mlx4_flow_steer_promisc_remove(mdev->dev,
1924 priv->port,
1925 MLX4_FS_ALL_DEFAULT);
1926 mlx4_flow_steer_promisc_remove(mdev->dev,
1927 priv->port,
1928 MLX4_FS_MC_DEFAULT);
1929 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1930 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1931
1932
1933 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1934 priv->port);
1935
1936
1937 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1938 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1939 priv->port);
1940 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1941 }
1942 }
1943
1944
1945 eth_broadcast_addr(&mc_list[10]);
1946 mc_list[5] = priv->port;
1947 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1948 MLX4_PROT_ETH, priv->broadcast_id);
1949 list_for_each_entry(mclist, &priv->curr_list, list) {
1950 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1951 mc_list[5] = priv->port;
1952 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
1953 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1954 if (mclist->tunnel_reg_id)
1955 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1956 }
1957 mlx4_en_clear_list(dev);
1958 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1959 list_del(&mclist->list);
1960 kfree(mclist);
1961 }
1962
1963
1964 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1965
1966
1967 if (mdev->dev->caps.steering_mode ==
1968 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1969 ASSERT_RTNL();
1970 list_for_each_entry_safe(flow, tmp_flow,
1971 &priv->ethtool_list, list) {
1972 mlx4_flow_detach(mdev->dev, flow->id);
1973 list_del(&flow->list);
1974 }
1975 }
1976
1977 mlx4_en_destroy_drop_qp(priv);
1978
1979
1980 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1981 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1982 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1983 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1984 }
1985 }
1986 msleep(10);
1987
1988 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1989 for (i = 0; i < priv->tx_ring_num[t]; i++)
1990 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
1991
1992 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1993 mlx4_en_delete_rss_steer_rules(priv);
1994
1995
1996 mlx4_en_release_rss_steer(priv);
1997
1998
1999 mlx4_en_put_qp(priv);
2000 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
2001 mdev->mac_removed[priv->port] = 1;
2002
2003
2004 for (i = 0; i < priv->rx_ring_num; i++) {
2005 struct mlx4_en_cq *cq = priv->rx_cq[i];
2006
2007 napi_synchronize(&cq->napi);
2008 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
2009 mlx4_en_deactivate_cq(priv, cq);
2010
2011 mlx4_en_free_affinity_hint(priv, i);
2012 }
2013}
2014
2015static void mlx4_en_restart(struct work_struct *work)
2016{
2017 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2018 restart_task);
2019 struct mlx4_en_dev *mdev = priv->mdev;
2020 struct net_device *dev = priv->dev;
2021
2022 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
2023
2024 rtnl_lock();
2025 mutex_lock(&mdev->state_lock);
2026 if (priv->port_up) {
2027 mlx4_en_stop_port(dev, 1);
2028 if (mlx4_en_start_port(dev))
2029 en_err(priv, "Failed restarting port %d\n", priv->port);
2030 }
2031 mutex_unlock(&mdev->state_lock);
2032 rtnl_unlock();
2033}
2034
2035static void mlx4_en_clear_stats(struct net_device *dev)
2036{
2037 struct mlx4_en_priv *priv = netdev_priv(dev);
2038 struct mlx4_en_dev *mdev = priv->mdev;
2039 struct mlx4_en_tx_ring **tx_ring;
2040 int i;
2041
2042 if (!mlx4_is_slave(mdev->dev))
2043 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
2044 en_dbg(HW, priv, "Failed dumping statistics\n");
2045
2046 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
2047 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
2048 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
2049 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
2050 memset(&priv->rx_priority_flowstats, 0,
2051 sizeof(priv->rx_priority_flowstats));
2052 memset(&priv->tx_priority_flowstats, 0,
2053 sizeof(priv->tx_priority_flowstats));
2054 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
2055
2056 tx_ring = priv->tx_ring[TX];
2057 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
2058 tx_ring[i]->bytes = 0;
2059 tx_ring[i]->packets = 0;
2060 tx_ring[i]->tx_csum = 0;
2061 tx_ring[i]->tx_dropped = 0;
2062 tx_ring[i]->queue_stopped = 0;
2063 tx_ring[i]->wake_queue = 0;
2064 tx_ring[i]->tso_packets = 0;
2065 tx_ring[i]->xmit_more = 0;
2066 }
2067 for (i = 0; i < priv->rx_ring_num; i++) {
2068 priv->rx_ring[i]->bytes = 0;
2069 priv->rx_ring[i]->packets = 0;
2070 priv->rx_ring[i]->csum_ok = 0;
2071 priv->rx_ring[i]->csum_none = 0;
2072 priv->rx_ring[i]->csum_complete = 0;
2073 }
2074}
2075
2076static int mlx4_en_open(struct net_device *dev)
2077{
2078 struct mlx4_en_priv *priv = netdev_priv(dev);
2079 struct mlx4_en_dev *mdev = priv->mdev;
2080 int err = 0;
2081
2082 mutex_lock(&mdev->state_lock);
2083
2084 if (!mdev->device_up) {
2085 en_err(priv, "Cannot open - device down/disabled\n");
2086 err = -EBUSY;
2087 goto out;
2088 }
2089
2090
2091 mlx4_en_clear_stats(dev);
2092
2093 err = mlx4_en_start_port(dev);
2094 if (err) {
2095 en_err(priv, "Failed starting port:%d\n", priv->port);
2096 goto out;
2097 }
2098 mlx4_en_linkstate(priv);
2099out:
2100 mutex_unlock(&mdev->state_lock);
2101 return err;
2102}
2103
2104
2105static int mlx4_en_close(struct net_device *dev)
2106{
2107 struct mlx4_en_priv *priv = netdev_priv(dev);
2108 struct mlx4_en_dev *mdev = priv->mdev;
2109
2110 en_dbg(IFDOWN, priv, "Close port called\n");
2111
2112 mutex_lock(&mdev->state_lock);
2113
2114 mlx4_en_stop_port(dev, 0);
2115 netif_carrier_off(dev);
2116
2117 mutex_unlock(&mdev->state_lock);
2118 return 0;
2119}
2120
2121static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2122{
2123 int i, t;
2124
2125#ifdef CONFIG_RFS_ACCEL
2126 priv->dev->rx_cpu_rmap = NULL;
2127#endif
2128
2129 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2130 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2131 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2132 mlx4_en_destroy_tx_ring(priv,
2133 &priv->tx_ring[t][i]);
2134 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2135 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2136 }
2137 kfree(priv->tx_ring[t]);
2138 kfree(priv->tx_cq[t]);
2139 }
2140
2141 for (i = 0; i < priv->rx_ring_num; i++) {
2142 if (priv->rx_ring[i])
2143 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2144 priv->prof->rx_ring_size, priv->stride);
2145 if (priv->rx_cq[i])
2146 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2147 }
2148
2149}
2150
2151static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2152{
2153 struct mlx4_en_port_profile *prof = priv->prof;
2154 int i, t;
2155 int node;
2156
2157
2158 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2159 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2160 node = cpu_to_node(i % num_online_cpus());
2161 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2162 prof->tx_ring_size, i, t, node))
2163 goto err;
2164
2165 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2166 prof->tx_ring_size,
2167 TXBB_SIZE, node, i))
2168 goto err;
2169 }
2170 }
2171
2172
2173 for (i = 0; i < priv->rx_ring_num; i++) {
2174 node = cpu_to_node(i % num_online_cpus());
2175 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2176 prof->rx_ring_size, i, RX, node))
2177 goto err;
2178
2179 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2180 prof->rx_ring_size, priv->stride,
2181 node, i))
2182 goto err;
2183
2184 }
2185
2186#ifdef CONFIG_RFS_ACCEL
2187 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2188#endif
2189
2190 return 0;
2191
2192err:
2193 en_err(priv, "Failed to allocate NIC resources\n");
2194 for (i = 0; i < priv->rx_ring_num; i++) {
2195 if (priv->rx_ring[i])
2196 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2197 prof->rx_ring_size,
2198 priv->stride);
2199 if (priv->rx_cq[i])
2200 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2201 }
2202 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2203 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2204 if (priv->tx_ring[t][i])
2205 mlx4_en_destroy_tx_ring(priv,
2206 &priv->tx_ring[t][i]);
2207 if (priv->tx_cq[t][i])
2208 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2209 }
2210 }
2211 return -ENOMEM;
2212}
2213
2214
2215static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2216 struct mlx4_en_priv *src,
2217 struct mlx4_en_port_profile *prof)
2218{
2219 int t;
2220
2221 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2222 sizeof(dst->hwtstamp_config));
2223 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
2224 dst->rx_ring_num = prof->rx_ring_num;
2225 dst->flags = prof->flags;
2226 dst->mdev = src->mdev;
2227 dst->port = src->port;
2228 dst->dev = src->dev;
2229 dst->prof = prof;
2230 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2231 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2232
2233 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2234 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2235 if (!dst->tx_ring_num[t])
2236 continue;
2237
2238 dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
2239 sizeof(struct mlx4_en_tx_ring *),
2240 GFP_KERNEL);
2241 if (!dst->tx_ring[t])
2242 goto err_free_tx;
2243
2244 dst->tx_cq[t] = kcalloc(MAX_TX_RINGS,
2245 sizeof(struct mlx4_en_cq *),
2246 GFP_KERNEL);
2247 if (!dst->tx_cq[t]) {
2248 kfree(dst->tx_ring[t]);
2249 goto err_free_tx;
2250 }
2251 }
2252
2253 return 0;
2254
2255err_free_tx:
2256 while (t--) {
2257 kfree(dst->tx_ring[t]);
2258 kfree(dst->tx_cq[t]);
2259 }
2260 return -ENOMEM;
2261}
2262
2263static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2264 struct mlx4_en_priv *src)
2265{
2266 int t;
2267 memcpy(dst->rx_ring, src->rx_ring,
2268 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2269 memcpy(dst->rx_cq, src->rx_cq,
2270 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2271 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2272 sizeof(dst->hwtstamp_config));
2273 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2274 dst->tx_ring_num[t] = src->tx_ring_num[t];
2275 dst->tx_ring[t] = src->tx_ring[t];
2276 dst->tx_cq[t] = src->tx_cq[t];
2277 }
2278 dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
2279 dst->rx_ring_num = src->rx_ring_num;
2280 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2281}
2282
2283int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2284 struct mlx4_en_priv *tmp,
2285 struct mlx4_en_port_profile *prof,
2286 bool carry_xdp_prog)
2287{
2288 struct bpf_prog *xdp_prog;
2289 int i, t;
2290
2291 mlx4_en_copy_priv(tmp, priv, prof);
2292
2293 if (mlx4_en_alloc_resources(tmp)) {
2294 en_warn(priv,
2295 "%s: Resource allocation failed, using previous configuration\n",
2296 __func__);
2297 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2298 kfree(tmp->tx_ring[t]);
2299 kfree(tmp->tx_cq[t]);
2300 }
2301 return -ENOMEM;
2302 }
2303
2304
2305 xdp_prog = rcu_dereference_protected(
2306 priv->rx_ring[0]->xdp_prog,
2307 lockdep_is_held(&priv->mdev->state_lock));
2308
2309 if (xdp_prog && carry_xdp_prog) {
2310 bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2311 for (i = 0; i < tmp->rx_ring_num; i++)
2312 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2313 xdp_prog);
2314 }
2315
2316 return 0;
2317}
2318
2319void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2320 struct mlx4_en_priv *tmp)
2321{
2322 mlx4_en_free_resources(priv);
2323 mlx4_en_update_priv(priv, tmp);
2324}
2325
2326void mlx4_en_destroy_netdev(struct net_device *dev)
2327{
2328 struct mlx4_en_priv *priv = netdev_priv(dev);
2329 struct mlx4_en_dev *mdev = priv->mdev;
2330
2331 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2332
2333
2334 if (priv->registered) {
2335 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2336 priv->port));
2337 unregister_netdev(dev);
2338 }
2339
2340 if (priv->allocated)
2341 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2342
2343 cancel_delayed_work(&priv->stats_task);
2344 cancel_delayed_work(&priv->service_task);
2345
2346 flush_workqueue(mdev->workqueue);
2347
2348 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2349 mlx4_en_remove_timestamp(mdev);
2350
2351
2352 mutex_lock(&mdev->state_lock);
2353 mdev->pndev[priv->port] = NULL;
2354 mdev->upper[priv->port] = NULL;
2355
2356#ifdef CONFIG_RFS_ACCEL
2357 mlx4_en_cleanup_filters(priv);
2358#endif
2359
2360 mlx4_en_free_resources(priv);
2361 mutex_unlock(&mdev->state_lock);
2362
2363 free_netdev(dev);
2364}
2365
2366static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2367{
2368 struct mlx4_en_priv *priv = netdev_priv(dev);
2369
2370 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2371 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2372 mtu, MLX4_EN_MAX_XDP_MTU);
2373 return false;
2374 }
2375
2376 return true;
2377}
2378
2379static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2380{
2381 struct mlx4_en_priv *priv = netdev_priv(dev);
2382 struct mlx4_en_dev *mdev = priv->mdev;
2383 int err = 0;
2384
2385 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2386 dev->mtu, new_mtu);
2387
2388 if (priv->tx_ring_num[TX_XDP] &&
2389 !mlx4_en_check_xdp_mtu(dev, new_mtu))
2390 return -EOPNOTSUPP;
2391
2392 dev->mtu = new_mtu;
2393
2394 if (netif_running(dev)) {
2395 mutex_lock(&mdev->state_lock);
2396 if (!mdev->device_up) {
2397
2398
2399 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2400 } else {
2401 mlx4_en_stop_port(dev, 1);
2402 err = mlx4_en_start_port(dev);
2403 if (err) {
2404 en_err(priv, "Failed restarting port:%d\n",
2405 priv->port);
2406 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
2407 &priv->state))
2408 queue_work(mdev->workqueue, &priv->restart_task);
2409 }
2410 }
2411 mutex_unlock(&mdev->state_lock);
2412 }
2413 return 0;
2414}
2415
2416static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2417{
2418 struct mlx4_en_priv *priv = netdev_priv(dev);
2419 struct mlx4_en_dev *mdev = priv->mdev;
2420 struct hwtstamp_config config;
2421
2422 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2423 return -EFAULT;
2424
2425
2426 if (config.flags)
2427 return -EINVAL;
2428
2429
2430 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2431 return -EINVAL;
2432
2433
2434 switch (config.tx_type) {
2435 case HWTSTAMP_TX_OFF:
2436 case HWTSTAMP_TX_ON:
2437 break;
2438 default:
2439 return -ERANGE;
2440 }
2441
2442
2443 switch (config.rx_filter) {
2444 case HWTSTAMP_FILTER_NONE:
2445 break;
2446 case HWTSTAMP_FILTER_ALL:
2447 case HWTSTAMP_FILTER_SOME:
2448 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2449 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2450 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2451 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2452 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2453 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2454 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2455 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2456 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2457 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2458 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2459 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2460 case HWTSTAMP_FILTER_NTP_ALL:
2461 config.rx_filter = HWTSTAMP_FILTER_ALL;
2462 break;
2463 default:
2464 return -ERANGE;
2465 }
2466
2467 if (mlx4_en_reset_config(dev, config, dev->features)) {
2468 config.tx_type = HWTSTAMP_TX_OFF;
2469 config.rx_filter = HWTSTAMP_FILTER_NONE;
2470 }
2471
2472 return copy_to_user(ifr->ifr_data, &config,
2473 sizeof(config)) ? -EFAULT : 0;
2474}
2475
2476static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2477{
2478 struct mlx4_en_priv *priv = netdev_priv(dev);
2479
2480 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2481 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2482}
2483
2484static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2485{
2486 switch (cmd) {
2487 case SIOCSHWTSTAMP:
2488 return mlx4_en_hwtstamp_set(dev, ifr);
2489 case SIOCGHWTSTAMP:
2490 return mlx4_en_hwtstamp_get(dev, ifr);
2491 default:
2492 return -EOPNOTSUPP;
2493 }
2494}
2495
2496static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2497 netdev_features_t features)
2498{
2499 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2500 struct mlx4_en_dev *mdev = en_priv->mdev;
2501
2502
2503
2504
2505
2506 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2507 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2508 features |= NETIF_F_HW_VLAN_STAG_RX;
2509 else
2510 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2511
2512 return features;
2513}
2514
2515static int mlx4_en_set_features(struct net_device *netdev,
2516 netdev_features_t features)
2517{
2518 struct mlx4_en_priv *priv = netdev_priv(netdev);
2519 bool reset = false;
2520 int ret = 0;
2521
2522 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2523 en_info(priv, "Turn %s RX-FCS\n",
2524 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2525 reset = true;
2526 }
2527
2528 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2529 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2530
2531 en_info(priv, "Turn %s RX-ALL\n",
2532 ignore_fcs_value ? "ON" : "OFF");
2533 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2534 priv->port, ignore_fcs_value);
2535 if (ret)
2536 return ret;
2537 }
2538
2539 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2540 en_info(priv, "Turn %s RX vlan strip offload\n",
2541 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2542 reset = true;
2543 }
2544
2545 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2546 en_info(priv, "Turn %s TX vlan strip offload\n",
2547 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2548
2549 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2550 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2551 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2552
2553 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2554 en_info(priv, "Turn %s loopback\n",
2555 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2556 mlx4_en_update_loopback_state(netdev, features);
2557 }
2558
2559 if (reset) {
2560 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2561 features);
2562 if (ret)
2563 return ret;
2564 }
2565
2566 return 0;
2567}
2568
2569static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2570{
2571 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2572 struct mlx4_en_dev *mdev = en_priv->mdev;
2573
2574 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
2575}
2576
2577static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2578 __be16 vlan_proto)
2579{
2580 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2581 struct mlx4_en_dev *mdev = en_priv->mdev;
2582
2583 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2584 vlan_proto);
2585}
2586
2587static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2588 int max_tx_rate)
2589{
2590 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2591 struct mlx4_en_dev *mdev = en_priv->mdev;
2592
2593 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2594 max_tx_rate);
2595}
2596
2597static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2598{
2599 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2600 struct mlx4_en_dev *mdev = en_priv->mdev;
2601
2602 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2603}
2604
2605static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2606{
2607 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2608 struct mlx4_en_dev *mdev = en_priv->mdev;
2609
2610 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2611}
2612
2613static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2614{
2615 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2616 struct mlx4_en_dev *mdev = en_priv->mdev;
2617
2618 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2619}
2620
2621static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2622 struct ifla_vf_stats *vf_stats)
2623{
2624 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2625 struct mlx4_en_dev *mdev = en_priv->mdev;
2626
2627 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2628}
2629
2630#define PORT_ID_BYTE_LEN 8
2631static int mlx4_en_get_phys_port_id(struct net_device *dev,
2632 struct netdev_phys_item_id *ppid)
2633{
2634 struct mlx4_en_priv *priv = netdev_priv(dev);
2635 struct mlx4_dev *mdev = priv->mdev->dev;
2636 int i;
2637 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2638
2639 if (!phys_port_id)
2640 return -EOPNOTSUPP;
2641
2642 ppid->id_len = sizeof(phys_port_id);
2643 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2644 ppid->id[i] = phys_port_id & 0xff;
2645 phys_port_id >>= 8;
2646 }
2647 return 0;
2648}
2649
2650static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
2651{
2652 struct mlx4_en_priv *priv = netdev_priv(dev);
2653 struct udp_tunnel_info ti;
2654 int ret;
2655
2656 udp_tunnel_nic_get_port(dev, table, 0, &ti);
2657 priv->vxlan_port = ti.port;
2658
2659 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2660 if (ret)
2661 return ret;
2662
2663 return mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2664 VXLAN_STEER_BY_OUTER_MAC,
2665 !!priv->vxlan_port);
2666}
2667
2668static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
2669 .sync_table = mlx4_udp_tunnel_sync,
2670 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
2671 UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
2672 .tables = {
2673 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2674 },
2675};
2676
2677static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2678 struct net_device *dev,
2679 netdev_features_t features)
2680{
2681 features = vlan_features_check(skb, features);
2682 features = vxlan_features_check(skb, features);
2683
2684
2685
2686
2687
2688 if (skb->encapsulation &&
2689 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2690 struct mlx4_en_priv *priv = netdev_priv(dev);
2691
2692 if (!priv->vxlan_port ||
2693 (ip_hdr(skb)->version != 4) ||
2694 (udp_hdr(skb)->dest != priv->vxlan_port))
2695 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2696 }
2697
2698 return features;
2699}
2700
2701static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2702{
2703 struct mlx4_en_priv *priv = netdev_priv(dev);
2704 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
2705 struct mlx4_update_qp_params params;
2706 int err;
2707
2708 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2709 return -EOPNOTSUPP;
2710
2711
2712 if (maxrate >> 12) {
2713 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2714 params.rate_val = maxrate / 1000;
2715 } else if (maxrate) {
2716 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2717 params.rate_val = maxrate;
2718 } else {
2719 params.rate_unit = 0;
2720 params.rate_val = 0;
2721 }
2722
2723 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2724 ¶ms);
2725 return err;
2726}
2727
2728static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2729{
2730 struct mlx4_en_priv *priv = netdev_priv(dev);
2731 struct mlx4_en_dev *mdev = priv->mdev;
2732 struct mlx4_en_port_profile new_prof;
2733 struct bpf_prog *old_prog;
2734 struct mlx4_en_priv *tmp;
2735 int tx_changed = 0;
2736 int xdp_ring_num;
2737 int port_up = 0;
2738 int err;
2739 int i;
2740
2741 xdp_ring_num = prog ? priv->rx_ring_num : 0;
2742
2743
2744
2745
2746 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
2747 if (prog)
2748 bpf_prog_add(prog, priv->rx_ring_num - 1);
2749
2750 mutex_lock(&mdev->state_lock);
2751 for (i = 0; i < priv->rx_ring_num; i++) {
2752 old_prog = rcu_dereference_protected(
2753 priv->rx_ring[i]->xdp_prog,
2754 lockdep_is_held(&mdev->state_lock));
2755 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2756 if (old_prog)
2757 bpf_prog_put(old_prog);
2758 }
2759 mutex_unlock(&mdev->state_lock);
2760 return 0;
2761 }
2762
2763 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
2764 return -EOPNOTSUPP;
2765
2766 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2767 if (!tmp)
2768 return -ENOMEM;
2769
2770 if (prog)
2771 bpf_prog_add(prog, priv->rx_ring_num - 1);
2772
2773 mutex_lock(&mdev->state_lock);
2774 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2775 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2776
2777 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2778 tx_changed = 1;
2779 new_prof.tx_ring_num[TX] =
2780 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
2781 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2782 }
2783
2784 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
2785 if (err) {
2786 if (prog)
2787 bpf_prog_sub(prog, priv->rx_ring_num - 1);
2788 goto unlock_out;
2789 }
2790
2791 if (priv->port_up) {
2792 port_up = 1;
2793 mlx4_en_stop_port(dev, 1);
2794 }
2795
2796 mlx4_en_safe_replace_resources(priv, tmp);
2797 if (tx_changed)
2798 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
2799
2800 for (i = 0; i < priv->rx_ring_num; i++) {
2801 old_prog = rcu_dereference_protected(
2802 priv->rx_ring[i]->xdp_prog,
2803 lockdep_is_held(&mdev->state_lock));
2804 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2805 if (old_prog)
2806 bpf_prog_put(old_prog);
2807 }
2808
2809 if (port_up) {
2810 err = mlx4_en_start_port(dev);
2811 if (err) {
2812 en_err(priv, "Failed starting port %d for XDP change\n",
2813 priv->port);
2814 if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
2815 queue_work(mdev->workqueue, &priv->restart_task);
2816 }
2817 }
2818
2819unlock_out:
2820 mutex_unlock(&mdev->state_lock);
2821 kfree(tmp);
2822 return err;
2823}
2824
2825static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2826{
2827 switch (xdp->command) {
2828 case XDP_SETUP_PROG:
2829 return mlx4_xdp_set(dev, xdp->prog);
2830 default:
2831 return -EINVAL;
2832 }
2833}
2834
2835static const struct net_device_ops mlx4_netdev_ops = {
2836 .ndo_open = mlx4_en_open,
2837 .ndo_stop = mlx4_en_close,
2838 .ndo_start_xmit = mlx4_en_xmit,
2839 .ndo_select_queue = mlx4_en_select_queue,
2840 .ndo_get_stats64 = mlx4_en_get_stats64,
2841 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2842 .ndo_set_mac_address = mlx4_en_set_mac,
2843 .ndo_validate_addr = eth_validate_addr,
2844 .ndo_change_mtu = mlx4_en_change_mtu,
2845 .ndo_eth_ioctl = mlx4_en_ioctl,
2846 .ndo_tx_timeout = mlx4_en_tx_timeout,
2847 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2848 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2849 .ndo_set_features = mlx4_en_set_features,
2850 .ndo_fix_features = mlx4_en_fix_features,
2851 .ndo_setup_tc = __mlx4_en_setup_tc,
2852#ifdef CONFIG_RFS_ACCEL
2853 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2854#endif
2855 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2856 .ndo_features_check = mlx4_en_features_check,
2857 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2858 .ndo_bpf = mlx4_xdp,
2859};
2860
2861static const struct net_device_ops mlx4_netdev_ops_master = {
2862 .ndo_open = mlx4_en_open,
2863 .ndo_stop = mlx4_en_close,
2864 .ndo_start_xmit = mlx4_en_xmit,
2865 .ndo_select_queue = mlx4_en_select_queue,
2866 .ndo_get_stats64 = mlx4_en_get_stats64,
2867 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2868 .ndo_set_mac_address = mlx4_en_set_mac,
2869 .ndo_validate_addr = eth_validate_addr,
2870 .ndo_change_mtu = mlx4_en_change_mtu,
2871 .ndo_tx_timeout = mlx4_en_tx_timeout,
2872 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2873 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2874 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2875 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2876 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2877 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2878 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2879 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2880 .ndo_get_vf_config = mlx4_en_get_vf_config,
2881 .ndo_set_features = mlx4_en_set_features,
2882 .ndo_fix_features = mlx4_en_fix_features,
2883 .ndo_setup_tc = __mlx4_en_setup_tc,
2884#ifdef CONFIG_RFS_ACCEL
2885 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2886#endif
2887 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2888 .ndo_features_check = mlx4_en_features_check,
2889 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2890 .ndo_bpf = mlx4_xdp,
2891};
2892
2893struct mlx4_en_bond {
2894 struct work_struct work;
2895 struct mlx4_en_priv *priv;
2896 int is_bonded;
2897 struct mlx4_port_map port_map;
2898};
2899
2900static void mlx4_en_bond_work(struct work_struct *work)
2901{
2902 struct mlx4_en_bond *bond = container_of(work,
2903 struct mlx4_en_bond,
2904 work);
2905 int err = 0;
2906 struct mlx4_dev *dev = bond->priv->mdev->dev;
2907
2908 if (bond->is_bonded) {
2909 if (!mlx4_is_bonded(dev)) {
2910 err = mlx4_bond(dev);
2911 if (err)
2912 en_err(bond->priv, "Fail to bond device\n");
2913 }
2914 if (!err) {
2915 err = mlx4_port_map_set(dev, &bond->port_map);
2916 if (err)
2917 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2918 bond->port_map.port1,
2919 bond->port_map.port2,
2920 err);
2921 }
2922 } else if (mlx4_is_bonded(dev)) {
2923 err = mlx4_unbond(dev);
2924 if (err)
2925 en_err(bond->priv, "Fail to unbond device\n");
2926 }
2927 dev_put(bond->priv->dev);
2928 kfree(bond);
2929}
2930
2931static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2932 u8 v2p_p1, u8 v2p_p2)
2933{
2934 struct mlx4_en_bond *bond = NULL;
2935
2936 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2937 if (!bond)
2938 return -ENOMEM;
2939
2940 INIT_WORK(&bond->work, mlx4_en_bond_work);
2941 bond->priv = priv;
2942 bond->is_bonded = is_bonded;
2943 bond->port_map.port1 = v2p_p1;
2944 bond->port_map.port2 = v2p_p2;
2945 dev_hold(priv->dev);
2946 queue_work(priv->mdev->workqueue, &bond->work);
2947 return 0;
2948}
2949
2950int mlx4_en_netdev_event(struct notifier_block *this,
2951 unsigned long event, void *ptr)
2952{
2953 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2954 u8 port = 0;
2955 struct mlx4_en_dev *mdev;
2956 struct mlx4_dev *dev;
2957 int i, num_eth_ports = 0;
2958 bool do_bond = true;
2959 struct mlx4_en_priv *priv;
2960 u8 v2p_port1 = 0;
2961 u8 v2p_port2 = 0;
2962
2963 if (!net_eq(dev_net(ndev), &init_net))
2964 return NOTIFY_DONE;
2965
2966 mdev = container_of(this, struct mlx4_en_dev, nb);
2967 dev = mdev->dev;
2968
2969
2970
2971
2972 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2973 ++num_eth_ports;
2974 if (!port && (mdev->pndev[i] == ndev))
2975 port = i;
2976 mdev->upper[i] = mdev->pndev[i] ?
2977 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2978
2979 if (!mdev->upper[i])
2980 do_bond = false;
2981 if (num_eth_ports < 2)
2982 continue;
2983
2984 if (mdev->upper[i] != mdev->upper[i-1])
2985 do_bond = false;
2986 }
2987
2988 do_bond = (num_eth_ports == 2) ? do_bond : false;
2989
2990
2991 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2992 return NOTIFY_DONE;
2993
2994 priv = netdev_priv(ndev);
2995 if (do_bond) {
2996 struct netdev_notifier_bonding_info *notifier_info = ptr;
2997 struct netdev_bonding_info *bonding_info =
2998 ¬ifier_info->bonding_info;
2999
3000
3001 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3002 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3003 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3004 do_bond = false;
3005
3006
3007 if (bonding_info->master.num_slaves != 2)
3008 do_bond = false;
3009
3010
3011 if (do_bond) {
3012 if (bonding_info->master.bond_mode ==
3013 BOND_MODE_ACTIVEBACKUP) {
3014
3015
3016
3017 if (bonding_info->slave.state ==
3018 BOND_STATE_BACKUP) {
3019 if (port == 1) {
3020 v2p_port1 = 2;
3021 v2p_port2 = 2;
3022 } else {
3023 v2p_port1 = 1;
3024 v2p_port2 = 1;
3025 }
3026 } else {
3027 if (port == 1) {
3028 v2p_port1 = 1;
3029 v2p_port2 = 1;
3030 } else {
3031 v2p_port1 = 2;
3032 v2p_port2 = 2;
3033 }
3034 }
3035 } else {
3036
3037
3038
3039 __s8 link = bonding_info->slave.link;
3040
3041 if (port == 1)
3042 v2p_port2 = 2;
3043 else
3044 v2p_port1 = 1;
3045 if ((link == BOND_LINK_UP) ||
3046 (link == BOND_LINK_FAIL)) {
3047 if (port == 1)
3048 v2p_port1 = 1;
3049 else
3050 v2p_port2 = 2;
3051 } else {
3052 if (port == 1)
3053 v2p_port1 = 2;
3054 else
3055 v2p_port2 = 1;
3056 }
3057 }
3058 }
3059 }
3060
3061 mlx4_en_queue_bond_work(priv, do_bond,
3062 v2p_port1, v2p_port2);
3063
3064 return NOTIFY_DONE;
3065}
3066
3067void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3068 struct mlx4_en_stats_bitmap *stats_bitmap,
3069 u8 rx_ppp, u8 rx_pause,
3070 u8 tx_ppp, u8 tx_pause)
3071{
3072 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
3073
3074 if (!mlx4_is_slave(dev) &&
3075 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3076 mutex_lock(&stats_bitmap->mutex);
3077 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3078
3079 if (rx_ppp)
3080 bitmap_set(stats_bitmap->bitmap, last_i,
3081 NUM_FLOW_PRIORITY_STATS_RX);
3082 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3083
3084 if (rx_pause && !(rx_ppp))
3085 bitmap_set(stats_bitmap->bitmap, last_i,
3086 NUM_FLOW_STATS_RX);
3087 last_i += NUM_FLOW_STATS_RX;
3088
3089 if (tx_ppp)
3090 bitmap_set(stats_bitmap->bitmap, last_i,
3091 NUM_FLOW_PRIORITY_STATS_TX);
3092 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3093
3094 if (tx_pause && !(tx_ppp))
3095 bitmap_set(stats_bitmap->bitmap, last_i,
3096 NUM_FLOW_STATS_TX);
3097 last_i += NUM_FLOW_STATS_TX;
3098
3099 mutex_unlock(&stats_bitmap->mutex);
3100 }
3101}
3102
3103void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
3104 struct mlx4_en_stats_bitmap *stats_bitmap,
3105 u8 rx_ppp, u8 rx_pause,
3106 u8 tx_ppp, u8 tx_pause)
3107{
3108 int last_i = 0;
3109
3110 mutex_init(&stats_bitmap->mutex);
3111 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
3112
3113 if (mlx4_is_slave(dev)) {
3114 bitmap_set(stats_bitmap->bitmap, last_i +
3115 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3116 bitmap_set(stats_bitmap->bitmap, last_i +
3117 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3118 bitmap_set(stats_bitmap->bitmap, last_i +
3119 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3120 bitmap_set(stats_bitmap->bitmap, last_i +
3121 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3122 bitmap_set(stats_bitmap->bitmap, last_i +
3123 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3124 bitmap_set(stats_bitmap->bitmap, last_i +
3125 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3126 } else {
3127 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
3128 }
3129 last_i += NUM_MAIN_STATS;
3130
3131 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
3132 last_i += NUM_PORT_STATS;
3133
3134 if (mlx4_is_master(dev))
3135 bitmap_set(stats_bitmap->bitmap, last_i,
3136 NUM_PF_STATS);
3137 last_i += NUM_PF_STATS;
3138
3139 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3140 rx_ppp, rx_pause,
3141 tx_ppp, tx_pause);
3142 last_i += NUM_FLOW_STATS;
3143
3144 if (!mlx4_is_slave(dev))
3145 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
3146 last_i += NUM_PKT_STATS;
3147
3148 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3149 last_i += NUM_XDP_STATS;
3150
3151 if (!mlx4_is_slave(dev))
3152 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
3153 last_i += NUM_PHY_STATS;
3154}
3155
3156int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3157 struct mlx4_en_port_profile *prof)
3158{
3159 struct net_device *dev;
3160 struct mlx4_en_priv *priv;
3161 int i, t;
3162 int err;
3163
3164 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3165 MAX_TX_RINGS, MAX_RX_RINGS);
3166 if (dev == NULL)
3167 return -ENOMEM;
3168
3169 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
3170 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3171
3172 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
3173 dev->dev_port = port - 1;
3174
3175
3176
3177
3178
3179 priv = netdev_priv(dev);
3180 memset(priv, 0, sizeof(struct mlx4_en_priv));
3181 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
3182 spin_lock_init(&priv->stats_lock);
3183 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3184 INIT_WORK(&priv->restart_task, mlx4_en_restart);
3185 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
3186 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3187 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3188#ifdef CONFIG_RFS_ACCEL
3189 INIT_LIST_HEAD(&priv->filters);
3190 spin_lock_init(&priv->filters_lock);
3191#endif
3192
3193 priv->dev = dev;
3194 priv->mdev = mdev;
3195 priv->ddev = &mdev->pdev->dev;
3196 priv->prof = prof;
3197 priv->port = port;
3198 priv->port_up = false;
3199 priv->flags = prof->flags;
3200 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
3201 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3202 MLX4_WQE_CTRL_SOLICITED);
3203 priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
3204 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
3205 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
3206
3207 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3208 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3209 if (!priv->tx_ring_num[t])
3210 continue;
3211
3212 priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
3213 sizeof(struct mlx4_en_tx_ring *),
3214 GFP_KERNEL);
3215 if (!priv->tx_ring[t]) {
3216 err = -ENOMEM;
3217 goto out;
3218 }
3219 priv->tx_cq[t] = kcalloc(MAX_TX_RINGS,
3220 sizeof(struct mlx4_en_cq *),
3221 GFP_KERNEL);
3222 if (!priv->tx_cq[t]) {
3223 err = -ENOMEM;
3224 goto out;
3225 }
3226 }
3227 priv->rx_ring_num = prof->rx_ring_num;
3228 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
3229 priv->cqe_size = mdev->dev->caps.cqe_size;
3230 priv->mac_index = -1;
3231 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3232#ifdef CONFIG_MLX4_EN_DCB
3233 if (!mlx4_is_slave(priv->mdev->dev)) {
3234 u8 prio;
3235
3236 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
3237 priv->ets.prio_tc[prio] = prio;
3238 priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
3239 }
3240
3241 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3242 DCB_CAP_DCBX_VER_IEEE;
3243 priv->flags |= MLX4_EN_DCB_ENABLED;
3244 priv->cee_config.pfc_state = false;
3245
3246 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
3247 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3248
3249 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3250 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3251 } else {
3252 en_info(priv, "enabling only PFC DCB ops\n");
3253 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3254 }
3255 }
3256#endif
3257
3258 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3259 INIT_HLIST_HEAD(&priv->mac_hash[i]);
3260
3261
3262 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
3263
3264 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3265 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3266 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3267
3268
3269 dev->addr_len = ETH_ALEN;
3270 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3271 if (!is_valid_ether_addr(dev->dev_addr)) {
3272 en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
3273 priv->port, dev->dev_addr);
3274 err = -EINVAL;
3275 goto out;
3276 } else if (mlx4_is_slave(priv->mdev->dev) &&
3277 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3278
3279
3280
3281 dev->addr_assign_type |= NET_ADDR_RANDOM;
3282 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
3283 }
3284
3285 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
3286
3287 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3288 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3289 err = mlx4_en_alloc_resources(priv);
3290 if (err)
3291 goto out;
3292
3293
3294 priv->hwtstamp_config.flags = 0;
3295 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3296 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3297
3298
3299 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
3300 MLX4_EN_PAGE_SIZE);
3301 if (err) {
3302 en_err(priv, "Failed to allocate page for rx qps\n");
3303 goto out;
3304 }
3305 priv->allocated = 1;
3306
3307
3308
3309
3310 if (mlx4_is_master(priv->mdev->dev))
3311 dev->netdev_ops = &mlx4_netdev_ops_master;
3312 else
3313 dev->netdev_ops = &mlx4_netdev_ops;
3314 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
3315 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
3316 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3317
3318 dev->ethtool_ops = &mlx4_en_ethtool_ops;
3319
3320
3321
3322
3323 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3324 if (mdev->LSO_support)
3325 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3326
3327 if (mdev->dev->caps.tunnel_offload_mode ==
3328 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3329 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3330 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3331 NETIF_F_GSO_PARTIAL;
3332 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3333 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3334 NETIF_F_GSO_PARTIAL;
3335 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3336 dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3337 NETIF_F_RXCSUM |
3338 NETIF_F_TSO | NETIF_F_TSO6 |
3339 NETIF_F_GSO_UDP_TUNNEL |
3340 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3341 NETIF_F_GSO_PARTIAL;
3342
3343 dev->udp_tunnel_nic_info = &mlx4_udp_tunnels;
3344 }
3345
3346 dev->vlan_features = dev->hw_features;
3347
3348 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
3349 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
3350 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3351 NETIF_F_HW_VLAN_CTAG_FILTER;
3352 dev->hw_features |= NETIF_F_LOOPBACK |
3353 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3354
3355 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3356 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3357 NETIF_F_HW_VLAN_STAG_FILTER;
3358 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3359 }
3360
3361 if (mlx4_is_slave(mdev->dev)) {
3362 bool vlan_offload_disabled;
3363 int phv;
3364
3365 err = get_phv_bit(mdev->dev, port, &phv);
3366 if (!err && phv) {
3367 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3368 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3369 }
3370 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3371 &vlan_offload_disabled);
3372 if (!err && vlan_offload_disabled) {
3373 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3374 NETIF_F_HW_VLAN_CTAG_RX |
3375 NETIF_F_HW_VLAN_STAG_TX |
3376 NETIF_F_HW_VLAN_STAG_RX);
3377 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3378 NETIF_F_HW_VLAN_CTAG_RX |
3379 NETIF_F_HW_VLAN_STAG_TX |
3380 NETIF_F_HW_VLAN_STAG_RX);
3381 }
3382 } else {
3383 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3384 !(mdev->dev->caps.flags2 &
3385 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3386 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3387 }
3388
3389 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3390 dev->hw_features |= NETIF_F_RXFCS;
3391
3392 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3393 dev->hw_features |= NETIF_F_RXALL;
3394
3395 if (mdev->dev->caps.steering_mode ==
3396 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3397 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3398 dev->hw_features |= NETIF_F_NTUPLE;
3399
3400 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3401 dev->priv_flags |= IFF_UNICAST_FLT;
3402
3403
3404 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3405 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3406 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3407 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3408 } else {
3409 en_warn(priv,
3410 "No RSS hash capabilities exposed, using Toeplitz\n");
3411 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3412 }
3413
3414
3415 dev->min_mtu = ETH_MIN_MTU;
3416 dev->max_mtu = priv->max_mtu;
3417
3418 mdev->pndev[port] = dev;
3419 mdev->upper[port] = NULL;
3420
3421 netif_carrier_off(dev);
3422 mlx4_en_set_default_moderation(priv);
3423
3424 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
3425 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3426
3427 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3428
3429
3430 mlx4_en_calc_rx_buf(dev);
3431 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3432 priv->rx_skb_size + ETH_FCS_LEN,
3433 prof->tx_pause, prof->tx_ppp,
3434 prof->rx_pause, prof->rx_ppp);
3435 if (err) {
3436 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3437 priv->port, err);
3438 goto out;
3439 }
3440
3441 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3442 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3443 if (err) {
3444 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3445 err);
3446 goto out;
3447 }
3448 }
3449
3450
3451 en_warn(priv, "Initializing port\n");
3452 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3453 if (err) {
3454 en_err(priv, "Failed Initializing port\n");
3455 goto out;
3456 }
3457 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3458
3459
3460 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3461 mlx4_en_init_timestamp(mdev);
3462
3463 queue_delayed_work(mdev->workqueue, &priv->service_task,
3464 SERVICE_TASK_DELAY);
3465
3466 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3467 mdev->profile.prof[priv->port].rx_ppp,
3468 mdev->profile.prof[priv->port].rx_pause,
3469 mdev->profile.prof[priv->port].tx_ppp,
3470 mdev->profile.prof[priv->port].tx_pause);
3471
3472 err = register_netdev(dev);
3473 if (err) {
3474 en_err(priv, "Netdev registration failed for port %d\n", port);
3475 goto out;
3476 }
3477
3478 priv->registered = 1;
3479 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3480 dev);
3481
3482 return 0;
3483
3484out:
3485 mlx4_en_destroy_netdev(dev);
3486 return err;
3487}
3488
3489int mlx4_en_reset_config(struct net_device *dev,
3490 struct hwtstamp_config ts_config,
3491 netdev_features_t features)
3492{
3493 struct mlx4_en_priv *priv = netdev_priv(dev);
3494 struct mlx4_en_dev *mdev = priv->mdev;
3495 struct mlx4_en_port_profile new_prof;
3496 struct mlx4_en_priv *tmp;
3497 int port_up = 0;
3498 int err = 0;
3499
3500 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3501 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3502 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3503 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3504 return 0;
3505
3506 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3507 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3508 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3509 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3510 return -EINVAL;
3511 }
3512
3513 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3514 if (!tmp)
3515 return -ENOMEM;
3516
3517 mutex_lock(&mdev->state_lock);
3518
3519 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3520 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3521
3522 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
3523 if (err)
3524 goto out;
3525
3526 if (priv->port_up) {
3527 port_up = 1;
3528 mlx4_en_stop_port(dev, 1);
3529 }
3530
3531 mlx4_en_safe_replace_resources(priv, tmp);
3532
3533 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3534 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3535 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3536 else
3537 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3538 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3539
3540
3541
3542 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3543 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3544 else
3545 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3546 }
3547
3548 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3549 if (features & NETIF_F_RXFCS)
3550 dev->features |= NETIF_F_RXFCS;
3551 else
3552 dev->features &= ~NETIF_F_RXFCS;
3553 }
3554
3555
3556
3557
3558
3559 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3560 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3561 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3562 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3563 }
3564
3565 if (port_up) {
3566 err = mlx4_en_start_port(dev);
3567 if (err)
3568 en_err(priv, "Failed starting port\n");
3569 }
3570
3571 if (!err)
3572 err = mlx4_en_moderation_update(priv);
3573out:
3574 mutex_unlock(&mdev->state_lock);
3575 kfree(tmp);
3576 if (!err)
3577 netdev_features_change(dev);
3578 return err;
3579}
3580