1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/bpf.h>
35#include <linux/etherdevice.h>
36#include <linux/tcp.h>
37#include <linux/if_vlan.h>
38#include <linux/delay.h>
39#include <linux/slab.h>
40#include <linux/hash.h>
41#include <net/ip.h>
42#include <net/vxlan.h>
43#include <net/devlink.h>
44
45#include <linux/mlx4/driver.h>
46#include <linux/mlx4/device.h>
47#include <linux/mlx4/cmd.h>
48#include <linux/mlx4/cq.h>
49
50#include "mlx4_en.h"
51#include "en_port.h"
52
53#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
54 XDP_PACKET_HEADROOM))
55
56int mlx4_en_setup_tc(struct net_device *dev, u8 up)
57{
58 struct mlx4_en_priv *priv = netdev_priv(dev);
59 int i;
60 unsigned int offset = 0;
61
62 if (up && up != MLX4_EN_NUM_UP_HIGH)
63 return -EINVAL;
64
65 netdev_set_num_tc(dev, up);
66 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
67
68 for (i = 0; i < up; i++) {
69 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
70 offset += priv->num_tx_rings_p_up;
71 }
72
73#ifdef CONFIG_MLX4_EN_DCB
74 if (!mlx4_is_slave(priv->mdev->dev)) {
75 if (up) {
76 if (priv->dcbx_cap)
77 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
78 } else {
79 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
80 priv->cee_config.pfc_state = false;
81 }
82 }
83#endif
84
85 return 0;
86}
87
88int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
89{
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91 struct mlx4_en_dev *mdev = priv->mdev;
92 struct mlx4_en_port_profile new_prof;
93 struct mlx4_en_priv *tmp;
94 int port_up = 0;
95 int err = 0;
96
97 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
98 if (!tmp)
99 return -ENOMEM;
100
101 mutex_lock(&mdev->state_lock);
102 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
103 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
104 MLX4_EN_NUM_UP_HIGH;
105 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
106 new_prof.num_up;
107 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
108 if (err)
109 goto out;
110
111 if (priv->port_up) {
112 port_up = 1;
113 mlx4_en_stop_port(dev, 1);
114 }
115
116 mlx4_en_safe_replace_resources(priv, tmp);
117 if (port_up) {
118 err = mlx4_en_start_port(dev);
119 if (err) {
120 en_err(priv, "Failed starting port for setup TC\n");
121 goto out;
122 }
123 }
124
125 err = mlx4_en_setup_tc(dev, tc);
126out:
127 mutex_unlock(&mdev->state_lock);
128 kfree(tmp);
129 return err;
130}
131
132static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
133 void *type_data)
134{
135 struct tc_mqprio_qopt *mqprio = type_data;
136
137 if (type != TC_SETUP_QDISC_MQPRIO)
138 return -EOPNOTSUPP;
139
140 if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
141 return -EINVAL;
142
143 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
144
145 return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
146}
147
148#ifdef CONFIG_RFS_ACCEL
149
150struct mlx4_en_filter {
151 struct list_head next;
152 struct work_struct work;
153
154 u8 ip_proto;
155 __be32 src_ip;
156 __be32 dst_ip;
157 __be16 src_port;
158 __be16 dst_port;
159
160 int rxq_index;
161 struct mlx4_en_priv *priv;
162 u32 flow_id;
163 int id;
164 u64 reg_id;
165 u8 activated;
166
167
168 struct hlist_node filter_chain;
169};
170
171static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
172
173static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
174{
175 switch (ip_proto) {
176 case IPPROTO_UDP:
177 return MLX4_NET_TRANS_RULE_ID_UDP;
178 case IPPROTO_TCP:
179 return MLX4_NET_TRANS_RULE_ID_TCP;
180 default:
181 return MLX4_NET_TRANS_RULE_NUM;
182 }
183};
184
185
186
187
188static void mlx4_en_filter_work(struct work_struct *work)
189{
190 struct mlx4_en_filter *filter = container_of(work,
191 struct mlx4_en_filter,
192 work);
193 struct mlx4_en_priv *priv = filter->priv;
194 struct mlx4_spec_list spec_tcp_udp = {
195 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
196 {
197 .tcp_udp = {
198 .dst_port = filter->dst_port,
199 .dst_port_msk = (__force __be16)-1,
200 .src_port = filter->src_port,
201 .src_port_msk = (__force __be16)-1,
202 },
203 },
204 };
205 struct mlx4_spec_list spec_ip = {
206 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
207 {
208 .ipv4 = {
209 .dst_ip = filter->dst_ip,
210 .dst_ip_msk = (__force __be32)-1,
211 .src_ip = filter->src_ip,
212 .src_ip_msk = (__force __be32)-1,
213 },
214 },
215 };
216 struct mlx4_spec_list spec_eth = {
217 .id = MLX4_NET_TRANS_RULE_ID_ETH,
218 };
219 struct mlx4_net_trans_rule rule = {
220 .list = LIST_HEAD_INIT(rule.list),
221 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
222 .exclusive = 1,
223 .allow_loopback = 1,
224 .promisc_mode = MLX4_FS_REGULAR,
225 .port = priv->port,
226 .priority = MLX4_DOMAIN_RFS,
227 };
228 int rc;
229 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
230
231 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
232 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
233 filter->ip_proto);
234 goto ignore;
235 }
236 list_add_tail(&spec_eth.list, &rule.list);
237 list_add_tail(&spec_ip.list, &rule.list);
238 list_add_tail(&spec_tcp_udp.list, &rule.list);
239
240 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
241 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
242 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
243
244 filter->activated = 0;
245
246 if (filter->reg_id) {
247 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
248 if (rc && rc != -ENOENT)
249 en_err(priv, "Error detaching flow. rc = %d\n", rc);
250 }
251
252 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
253 if (rc)
254 en_err(priv, "Error attaching flow. err = %d\n", rc);
255
256ignore:
257 mlx4_en_filter_rfs_expire(priv);
258
259 filter->activated = 1;
260}
261
262static inline struct hlist_head *
263filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
264 __be16 src_port, __be16 dst_port)
265{
266 unsigned long l;
267 int bucket_idx;
268
269 l = (__force unsigned long)src_port |
270 ((__force unsigned long)dst_port << 2);
271 l ^= (__force unsigned long)(src_ip ^ dst_ip);
272
273 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
274
275 return &priv->filter_hash[bucket_idx];
276}
277
278static struct mlx4_en_filter *
279mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
280 __be32 dst_ip, u8 ip_proto, __be16 src_port,
281 __be16 dst_port, u32 flow_id)
282{
283 struct mlx4_en_filter *filter = NULL;
284
285 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
286 if (!filter)
287 return NULL;
288
289 filter->priv = priv;
290 filter->rxq_index = rxq_index;
291 INIT_WORK(&filter->work, mlx4_en_filter_work);
292
293 filter->src_ip = src_ip;
294 filter->dst_ip = dst_ip;
295 filter->ip_proto = ip_proto;
296 filter->src_port = src_port;
297 filter->dst_port = dst_port;
298
299 filter->flow_id = flow_id;
300
301 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
302
303 list_add_tail(&filter->next, &priv->filters);
304 hlist_add_head(&filter->filter_chain,
305 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
306 dst_port));
307
308 return filter;
309}
310
311static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
312{
313 struct mlx4_en_priv *priv = filter->priv;
314 int rc;
315
316 list_del(&filter->next);
317
318 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
319 if (rc && rc != -ENOENT)
320 en_err(priv, "Error detaching flow. rc = %d\n", rc);
321
322 kfree(filter);
323}
324
325static inline struct mlx4_en_filter *
326mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
327 u8 ip_proto, __be16 src_port, __be16 dst_port)
328{
329 struct mlx4_en_filter *filter;
330 struct mlx4_en_filter *ret = NULL;
331
332 hlist_for_each_entry(filter,
333 filter_hash_bucket(priv, src_ip, dst_ip,
334 src_port, dst_port),
335 filter_chain) {
336 if (filter->src_ip == src_ip &&
337 filter->dst_ip == dst_ip &&
338 filter->ip_proto == ip_proto &&
339 filter->src_port == src_port &&
340 filter->dst_port == dst_port) {
341 ret = filter;
342 break;
343 }
344 }
345
346 return ret;
347}
348
349static int
350mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
351 u16 rxq_index, u32 flow_id)
352{
353 struct mlx4_en_priv *priv = netdev_priv(net_dev);
354 struct mlx4_en_filter *filter;
355 const struct iphdr *ip;
356 const __be16 *ports;
357 u8 ip_proto;
358 __be32 src_ip;
359 __be32 dst_ip;
360 __be16 src_port;
361 __be16 dst_port;
362 int nhoff = skb_network_offset(skb);
363 int ret = 0;
364
365 if (skb->protocol != htons(ETH_P_IP))
366 return -EPROTONOSUPPORT;
367
368 ip = (const struct iphdr *)(skb->data + nhoff);
369 if (ip_is_fragment(ip))
370 return -EPROTONOSUPPORT;
371
372 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
373 return -EPROTONOSUPPORT;
374 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
375
376 ip_proto = ip->protocol;
377 src_ip = ip->saddr;
378 dst_ip = ip->daddr;
379 src_port = ports[0];
380 dst_port = ports[1];
381
382 spin_lock_bh(&priv->filters_lock);
383 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
384 src_port, dst_port);
385 if (filter) {
386 if (filter->rxq_index == rxq_index)
387 goto out;
388
389 filter->rxq_index = rxq_index;
390 } else {
391 filter = mlx4_en_filter_alloc(priv, rxq_index,
392 src_ip, dst_ip, ip_proto,
393 src_port, dst_port, flow_id);
394 if (!filter) {
395 ret = -ENOMEM;
396 goto err;
397 }
398 }
399
400 queue_work(priv->mdev->workqueue, &filter->work);
401
402out:
403 ret = filter->id;
404err:
405 spin_unlock_bh(&priv->filters_lock);
406
407 return ret;
408}
409
410void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
411{
412 struct mlx4_en_filter *filter, *tmp;
413 LIST_HEAD(del_list);
414
415 spin_lock_bh(&priv->filters_lock);
416 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
417 list_move(&filter->next, &del_list);
418 hlist_del(&filter->filter_chain);
419 }
420 spin_unlock_bh(&priv->filters_lock);
421
422 list_for_each_entry_safe(filter, tmp, &del_list, next) {
423 cancel_work_sync(&filter->work);
424 mlx4_en_filter_free(filter);
425 }
426}
427
428static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
429{
430 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
431 LIST_HEAD(del_list);
432 int i = 0;
433
434 spin_lock_bh(&priv->filters_lock);
435 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
436 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
437 break;
438
439 if (filter->activated &&
440 !work_pending(&filter->work) &&
441 rps_may_expire_flow(priv->dev,
442 filter->rxq_index, filter->flow_id,
443 filter->id)) {
444 list_move(&filter->next, &del_list);
445 hlist_del(&filter->filter_chain);
446 } else
447 last_filter = filter;
448
449 i++;
450 }
451
452 if (last_filter && (&last_filter->next != priv->filters.next))
453 list_move(&priv->filters, &last_filter->next);
454
455 spin_unlock_bh(&priv->filters_lock);
456
457 list_for_each_entry_safe(filter, tmp, &del_list, next)
458 mlx4_en_filter_free(filter);
459}
460#endif
461
462static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
463 __be16 proto, u16 vid)
464{
465 struct mlx4_en_priv *priv = netdev_priv(dev);
466 struct mlx4_en_dev *mdev = priv->mdev;
467 int err;
468 int idx;
469
470 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
471
472 set_bit(vid, priv->active_vlans);
473
474
475 mutex_lock(&mdev->state_lock);
476 if (mdev->device_up && priv->port_up) {
477 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
478 if (err) {
479 en_err(priv, "Failed configuring VLAN filter\n");
480 goto out;
481 }
482 }
483 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
484 if (err)
485 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
486
487out:
488 mutex_unlock(&mdev->state_lock);
489 return err;
490}
491
492static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
493 __be16 proto, u16 vid)
494{
495 struct mlx4_en_priv *priv = netdev_priv(dev);
496 struct mlx4_en_dev *mdev = priv->mdev;
497 int err = 0;
498
499 en_dbg(HW, priv, "Killing VID:%d\n", vid);
500
501 clear_bit(vid, priv->active_vlans);
502
503
504 mutex_lock(&mdev->state_lock);
505 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
506
507 if (mdev->device_up && priv->port_up) {
508 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
509 if (err)
510 en_err(priv, "Failed configuring VLAN filter\n");
511 }
512 mutex_unlock(&mdev->state_lock);
513
514 return err;
515}
516
517static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
518{
519 int i;
520 for (i = ETH_ALEN - 1; i >= 0; --i) {
521 dst_mac[i] = src_mac & 0xff;
522 src_mac >>= 8;
523 }
524 memset(&dst_mac[ETH_ALEN], 0, 2);
525}
526
527
528static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
529 int qpn, u64 *reg_id)
530{
531 int err;
532
533 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
534 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
535 return 0;
536
537 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
538 MLX4_DOMAIN_NIC, reg_id);
539 if (err) {
540 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
541 return err;
542 }
543 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
544 return 0;
545}
546
547
548static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
549 unsigned char *mac, int *qpn, u64 *reg_id)
550{
551 struct mlx4_en_dev *mdev = priv->mdev;
552 struct mlx4_dev *dev = mdev->dev;
553 int err;
554
555 switch (dev->caps.steering_mode) {
556 case MLX4_STEERING_MODE_B0: {
557 struct mlx4_qp qp;
558 u8 gid[16] = {0};
559
560 qp.qpn = *qpn;
561 memcpy(&gid[10], mac, ETH_ALEN);
562 gid[5] = priv->port;
563
564 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
565 break;
566 }
567 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
568 struct mlx4_spec_list spec_eth = { {NULL} };
569 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
570
571 struct mlx4_net_trans_rule rule = {
572 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
573 .exclusive = 0,
574 .allow_loopback = 1,
575 .promisc_mode = MLX4_FS_REGULAR,
576 .priority = MLX4_DOMAIN_NIC,
577 };
578
579 rule.port = priv->port;
580 rule.qpn = *qpn;
581 INIT_LIST_HEAD(&rule.list);
582
583 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
584 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
585 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
586 list_add_tail(&spec_eth.list, &rule.list);
587
588 err = mlx4_flow_attach(dev, &rule, reg_id);
589 break;
590 }
591 default:
592 return -EINVAL;
593 }
594 if (err)
595 en_warn(priv, "Failed Attaching Unicast\n");
596
597 return err;
598}
599
600static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
601 unsigned char *mac, int qpn, u64 reg_id)
602{
603 struct mlx4_en_dev *mdev = priv->mdev;
604 struct mlx4_dev *dev = mdev->dev;
605
606 switch (dev->caps.steering_mode) {
607 case MLX4_STEERING_MODE_B0: {
608 struct mlx4_qp qp;
609 u8 gid[16] = {0};
610
611 qp.qpn = qpn;
612 memcpy(&gid[10], mac, ETH_ALEN);
613 gid[5] = priv->port;
614
615 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
616 break;
617 }
618 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
619 mlx4_flow_detach(dev, reg_id);
620 break;
621 }
622 default:
623 en_err(priv, "Invalid steering mode.\n");
624 }
625}
626
627static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
628{
629 struct mlx4_en_dev *mdev = priv->mdev;
630 struct mlx4_dev *dev = mdev->dev;
631 int index = 0;
632 int err = 0;
633 int *qpn = &priv->base_qpn;
634 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
635
636 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
637 priv->dev->dev_addr);
638 index = mlx4_register_mac(dev, priv->port, mac);
639 if (index < 0) {
640 err = index;
641 en_err(priv, "Failed adding MAC: %pM\n",
642 priv->dev->dev_addr);
643 return err;
644 }
645
646 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
647
648 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
649 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
650 *qpn = base_qpn + index;
651 return 0;
652 }
653
654 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
655 MLX4_RES_USAGE_DRIVER);
656 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
657 if (err) {
658 en_err(priv, "Failed to reserve qp for mac registration\n");
659 mlx4_unregister_mac(dev, priv->port, mac);
660 return err;
661 }
662
663 return 0;
664}
665
666static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
667{
668 struct mlx4_en_dev *mdev = priv->mdev;
669 struct mlx4_dev *dev = mdev->dev;
670 int qpn = priv->base_qpn;
671
672 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
673 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
674 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
675 priv->dev->dev_addr);
676 mlx4_unregister_mac(dev, priv->port, mac);
677 } else {
678 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
679 priv->port, qpn);
680 mlx4_qp_release_range(dev, qpn, 1);
681 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
682 }
683}
684
685static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
686 unsigned char *new_mac, unsigned char *prev_mac)
687{
688 struct mlx4_en_dev *mdev = priv->mdev;
689 struct mlx4_dev *dev = mdev->dev;
690 int err = 0;
691 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
692
693 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
694 struct hlist_head *bucket;
695 unsigned int mac_hash;
696 struct mlx4_mac_entry *entry;
697 struct hlist_node *tmp;
698 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
699
700 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
701 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
702 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
703 mlx4_en_uc_steer_release(priv, entry->mac,
704 qpn, entry->reg_id);
705 mlx4_unregister_mac(dev, priv->port,
706 prev_mac_u64);
707 hlist_del_rcu(&entry->hlist);
708 synchronize_rcu();
709 memcpy(entry->mac, new_mac, ETH_ALEN);
710 entry->reg_id = 0;
711 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
712 hlist_add_head_rcu(&entry->hlist,
713 &priv->mac_hash[mac_hash]);
714 mlx4_register_mac(dev, priv->port, new_mac_u64);
715 err = mlx4_en_uc_steer_add(priv, new_mac,
716 &qpn,
717 &entry->reg_id);
718 if (err)
719 return err;
720 if (priv->tunnel_reg_id) {
721 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
722 priv->tunnel_reg_id = 0;
723 }
724 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
725 &priv->tunnel_reg_id);
726 return err;
727 }
728 }
729 return -EINVAL;
730 }
731
732 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
733}
734
735static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv,
736 unsigned char new_mac[ETH_ALEN + 2])
737{
738 struct mlx4_en_dev *mdev = priv->mdev;
739 int err;
740
741 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN))
742 return;
743
744 err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac);
745 if (err)
746 en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n",
747 new_mac, priv->port, err);
748}
749
750static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
751 unsigned char new_mac[ETH_ALEN + 2])
752{
753 int err = 0;
754
755 if (priv->port_up) {
756
757 err = mlx4_en_replace_mac(priv, priv->base_qpn,
758 new_mac, priv->current_mac);
759 if (err)
760 en_err(priv, "Failed changing HW MAC address\n");
761 } else
762 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
763
764 if (!err)
765 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
766
767 return err;
768}
769
770static int mlx4_en_set_mac(struct net_device *dev, void *addr)
771{
772 struct mlx4_en_priv *priv = netdev_priv(dev);
773 struct mlx4_en_dev *mdev = priv->mdev;
774 struct sockaddr *saddr = addr;
775 unsigned char new_mac[ETH_ALEN + 2];
776 int err;
777
778 if (!is_valid_ether_addr(saddr->sa_data))
779 return -EADDRNOTAVAIL;
780
781 mutex_lock(&mdev->state_lock);
782 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
783 err = mlx4_en_do_set_mac(priv, new_mac);
784 if (err)
785 goto out;
786
787 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
788 mlx4_en_update_user_mac(priv, new_mac);
789out:
790 mutex_unlock(&mdev->state_lock);
791
792 return err;
793}
794
795static void mlx4_en_clear_list(struct net_device *dev)
796{
797 struct mlx4_en_priv *priv = netdev_priv(dev);
798 struct mlx4_en_mc_list *tmp, *mc_to_del;
799
800 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
801 list_del(&mc_to_del->list);
802 kfree(mc_to_del);
803 }
804}
805
806static void mlx4_en_cache_mclist(struct net_device *dev)
807{
808 struct mlx4_en_priv *priv = netdev_priv(dev);
809 struct netdev_hw_addr *ha;
810 struct mlx4_en_mc_list *tmp;
811
812 mlx4_en_clear_list(dev);
813 netdev_for_each_mc_addr(ha, dev) {
814 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
815 if (!tmp) {
816 mlx4_en_clear_list(dev);
817 return;
818 }
819 memcpy(tmp->addr, ha->addr, ETH_ALEN);
820 list_add_tail(&tmp->list, &priv->mc_list);
821 }
822}
823
824static void update_mclist_flags(struct mlx4_en_priv *priv,
825 struct list_head *dst,
826 struct list_head *src)
827{
828 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
829 bool found;
830
831
832
833
834 list_for_each_entry(dst_tmp, dst, list) {
835 found = false;
836 list_for_each_entry(src_tmp, src, list) {
837 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
838 found = true;
839 break;
840 }
841 }
842 if (!found)
843 dst_tmp->action = MCLIST_REM;
844 }
845
846
847
848
849 list_for_each_entry(src_tmp, src, list) {
850 found = false;
851 list_for_each_entry(dst_tmp, dst, list) {
852 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
853 dst_tmp->action = MCLIST_NONE;
854 found = true;
855 break;
856 }
857 }
858 if (!found) {
859 new_mc = kmemdup(src_tmp,
860 sizeof(struct mlx4_en_mc_list),
861 GFP_KERNEL);
862 if (!new_mc)
863 return;
864
865 new_mc->action = MCLIST_ADD;
866 list_add_tail(&new_mc->list, dst);
867 }
868 }
869}
870
871static void mlx4_en_set_rx_mode(struct net_device *dev)
872{
873 struct mlx4_en_priv *priv = netdev_priv(dev);
874
875 if (!priv->port_up)
876 return;
877
878 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
879}
880
881static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
882 struct mlx4_en_dev *mdev)
883{
884 int err = 0;
885
886 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
887 if (netif_msg_rx_status(priv))
888 en_warn(priv, "Entering promiscuous mode\n");
889 priv->flags |= MLX4_EN_FLAG_PROMISC;
890
891
892 switch (mdev->dev->caps.steering_mode) {
893 case MLX4_STEERING_MODE_DEVICE_MANAGED:
894 err = mlx4_flow_steer_promisc_add(mdev->dev,
895 priv->port,
896 priv->base_qpn,
897 MLX4_FS_ALL_DEFAULT);
898 if (err)
899 en_err(priv, "Failed enabling promiscuous mode\n");
900 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
901 break;
902
903 case MLX4_STEERING_MODE_B0:
904 err = mlx4_unicast_promisc_add(mdev->dev,
905 priv->base_qpn,
906 priv->port);
907 if (err)
908 en_err(priv, "Failed enabling unicast promiscuous mode\n");
909
910
911
912
913 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
914 err = mlx4_multicast_promisc_add(mdev->dev,
915 priv->base_qpn,
916 priv->port);
917 if (err)
918 en_err(priv, "Failed enabling multicast promiscuous mode\n");
919 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
920 }
921 break;
922
923 case MLX4_STEERING_MODE_A0:
924 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
925 priv->port,
926 priv->base_qpn,
927 1);
928 if (err)
929 en_err(priv, "Failed enabling promiscuous mode\n");
930 break;
931 }
932
933
934 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
935 0, MLX4_MCAST_DISABLE);
936 if (err)
937 en_err(priv, "Failed disabling multicast filter\n");
938 }
939}
940
941static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
942 struct mlx4_en_dev *mdev)
943{
944 int err = 0;
945
946 if (netif_msg_rx_status(priv))
947 en_warn(priv, "Leaving promiscuous mode\n");
948 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
949
950
951 switch (mdev->dev->caps.steering_mode) {
952 case MLX4_STEERING_MODE_DEVICE_MANAGED:
953 err = mlx4_flow_steer_promisc_remove(mdev->dev,
954 priv->port,
955 MLX4_FS_ALL_DEFAULT);
956 if (err)
957 en_err(priv, "Failed disabling promiscuous mode\n");
958 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
959 break;
960
961 case MLX4_STEERING_MODE_B0:
962 err = mlx4_unicast_promisc_remove(mdev->dev,
963 priv->base_qpn,
964 priv->port);
965 if (err)
966 en_err(priv, "Failed disabling unicast promiscuous mode\n");
967
968 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
969 err = mlx4_multicast_promisc_remove(mdev->dev,
970 priv->base_qpn,
971 priv->port);
972 if (err)
973 en_err(priv, "Failed disabling multicast promiscuous mode\n");
974 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
975 }
976 break;
977
978 case MLX4_STEERING_MODE_A0:
979 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
980 priv->port,
981 priv->base_qpn, 0);
982 if (err)
983 en_err(priv, "Failed disabling promiscuous mode\n");
984 break;
985 }
986}
987
988static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
989 struct net_device *dev,
990 struct mlx4_en_dev *mdev)
991{
992 struct mlx4_en_mc_list *mclist, *tmp;
993 u64 mcast_addr = 0;
994 u8 mc_list[16] = {0};
995 int err = 0;
996
997
998 if (dev->flags & IFF_ALLMULTI) {
999 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1000 0, MLX4_MCAST_DISABLE);
1001 if (err)
1002 en_err(priv, "Failed disabling multicast filter\n");
1003
1004
1005 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
1006 switch (mdev->dev->caps.steering_mode) {
1007 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1008 err = mlx4_flow_steer_promisc_add(mdev->dev,
1009 priv->port,
1010 priv->base_qpn,
1011 MLX4_FS_MC_DEFAULT);
1012 break;
1013
1014 case MLX4_STEERING_MODE_B0:
1015 err = mlx4_multicast_promisc_add(mdev->dev,
1016 priv->base_qpn,
1017 priv->port);
1018 break;
1019
1020 case MLX4_STEERING_MODE_A0:
1021 break;
1022 }
1023 if (err)
1024 en_err(priv, "Failed entering multicast promisc mode\n");
1025 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1026 }
1027 } else {
1028
1029 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1030 switch (mdev->dev->caps.steering_mode) {
1031 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1032 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1033 priv->port,
1034 MLX4_FS_MC_DEFAULT);
1035 break;
1036
1037 case MLX4_STEERING_MODE_B0:
1038 err = mlx4_multicast_promisc_remove(mdev->dev,
1039 priv->base_qpn,
1040 priv->port);
1041 break;
1042
1043 case MLX4_STEERING_MODE_A0:
1044 break;
1045 }
1046 if (err)
1047 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1048 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1049 }
1050
1051 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1052 0, MLX4_MCAST_DISABLE);
1053 if (err)
1054 en_err(priv, "Failed disabling multicast filter\n");
1055
1056
1057 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1058 1, MLX4_MCAST_CONFIG);
1059
1060
1061
1062 netif_addr_lock_bh(dev);
1063 mlx4_en_cache_mclist(dev);
1064 netif_addr_unlock_bh(dev);
1065 list_for_each_entry(mclist, &priv->mc_list, list) {
1066 mcast_addr = mlx4_mac_to_u64(mclist->addr);
1067 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1068 mcast_addr, 0, MLX4_MCAST_CONFIG);
1069 }
1070 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1071 0, MLX4_MCAST_ENABLE);
1072 if (err)
1073 en_err(priv, "Failed enabling multicast filter\n");
1074
1075 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1076 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1077 if (mclist->action == MCLIST_REM) {
1078
1079 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1080 mc_list[5] = priv->port;
1081 err = mlx4_multicast_detach(mdev->dev,
1082 priv->rss_map.indir_qp,
1083 mc_list,
1084 MLX4_PROT_ETH,
1085 mclist->reg_id);
1086 if (err)
1087 en_err(priv, "Fail to detach multicast address\n");
1088
1089 if (mclist->tunnel_reg_id) {
1090 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1091 if (err)
1092 en_err(priv, "Failed to detach multicast address\n");
1093 }
1094
1095
1096 list_del(&mclist->list);
1097 kfree(mclist);
1098 } else if (mclist->action == MCLIST_ADD) {
1099
1100 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1101
1102 mc_list[5] = priv->port;
1103 err = mlx4_multicast_attach(mdev->dev,
1104 priv->rss_map.indir_qp,
1105 mc_list,
1106 priv->port, 0,
1107 MLX4_PROT_ETH,
1108 &mclist->reg_id);
1109 if (err)
1110 en_err(priv, "Fail to attach multicast address\n");
1111
1112 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1113 &mclist->tunnel_reg_id);
1114 if (err)
1115 en_err(priv, "Failed to attach multicast address\n");
1116 }
1117 }
1118 }
1119}
1120
1121static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1122 struct net_device *dev,
1123 struct mlx4_en_dev *mdev)
1124{
1125 struct netdev_hw_addr *ha;
1126 struct mlx4_mac_entry *entry;
1127 struct hlist_node *tmp;
1128 bool found;
1129 u64 mac;
1130 int err = 0;
1131 struct hlist_head *bucket;
1132 unsigned int i;
1133 int removed = 0;
1134 u32 prev_flags;
1135
1136
1137
1138
1139
1140
1141 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1142 bucket = &priv->mac_hash[i];
1143 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1144 found = false;
1145 netdev_for_each_uc_addr(ha, dev) {
1146 if (ether_addr_equal_64bits(entry->mac,
1147 ha->addr)) {
1148 found = true;
1149 break;
1150 }
1151 }
1152
1153
1154 if (ether_addr_equal_64bits(entry->mac,
1155 priv->current_mac))
1156 found = true;
1157
1158 if (!found) {
1159 mac = mlx4_mac_to_u64(entry->mac);
1160 mlx4_en_uc_steer_release(priv, entry->mac,
1161 priv->base_qpn,
1162 entry->reg_id);
1163 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1164
1165 hlist_del_rcu(&entry->hlist);
1166 kfree_rcu(entry, rcu);
1167 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1168 entry->mac, priv->port);
1169 ++removed;
1170 }
1171 }
1172 }
1173
1174
1175
1176
1177 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1178 return;
1179
1180 prev_flags = priv->flags;
1181 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1182
1183
1184 netdev_for_each_uc_addr(ha, dev) {
1185 found = false;
1186 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1187 hlist_for_each_entry(entry, bucket, hlist) {
1188 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1189 found = true;
1190 break;
1191 }
1192 }
1193
1194 if (!found) {
1195 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1196 if (!entry) {
1197 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1198 ha->addr, priv->port);
1199 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1200 break;
1201 }
1202 mac = mlx4_mac_to_u64(ha->addr);
1203 memcpy(entry->mac, ha->addr, ETH_ALEN);
1204 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1205 if (err < 0) {
1206 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1207 ha->addr, priv->port, err);
1208 kfree(entry);
1209 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1210 break;
1211 }
1212 err = mlx4_en_uc_steer_add(priv, ha->addr,
1213 &priv->base_qpn,
1214 &entry->reg_id);
1215 if (err) {
1216 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1217 ha->addr, priv->port, err);
1218 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1219 kfree(entry);
1220 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1221 break;
1222 } else {
1223 unsigned int mac_hash;
1224 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1225 ha->addr, priv->port);
1226 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1227 bucket = &priv->mac_hash[mac_hash];
1228 hlist_add_head_rcu(&entry->hlist, bucket);
1229 }
1230 }
1231 }
1232
1233 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1234 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1235 priv->port);
1236 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1237 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1238 priv->port);
1239 }
1240}
1241
1242static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1243{
1244 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1245 rx_mode_task);
1246 struct mlx4_en_dev *mdev = priv->mdev;
1247 struct net_device *dev = priv->dev;
1248
1249 mutex_lock(&mdev->state_lock);
1250 if (!mdev->device_up) {
1251 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1252 goto out;
1253 }
1254 if (!priv->port_up) {
1255 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1256 goto out;
1257 }
1258
1259 if (!netif_carrier_ok(dev)) {
1260 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1261 if (priv->port_state.link_state) {
1262 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1263 netif_carrier_on(dev);
1264 en_dbg(LINK, priv, "Link Up\n");
1265 }
1266 }
1267 }
1268
1269 if (dev->priv_flags & IFF_UNICAST_FLT)
1270 mlx4_en_do_uc_filter(priv, dev, mdev);
1271
1272
1273 if ((dev->flags & IFF_PROMISC) ||
1274 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1275 mlx4_en_set_promisc_mode(priv, mdev);
1276 goto out;
1277 }
1278
1279
1280 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1281 mlx4_en_clear_promisc_mode(priv, mdev);
1282
1283 mlx4_en_do_multicast(priv, dev, mdev);
1284out:
1285 mutex_unlock(&mdev->state_lock);
1286}
1287
1288static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1289{
1290 u64 reg_id;
1291 int err = 0;
1292 int *qpn = &priv->base_qpn;
1293 struct mlx4_mac_entry *entry;
1294
1295 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
1296 if (err)
1297 return err;
1298
1299 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1300 &priv->tunnel_reg_id);
1301 if (err)
1302 goto tunnel_err;
1303
1304 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1305 if (!entry) {
1306 err = -ENOMEM;
1307 goto alloc_err;
1308 }
1309
1310 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1311 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1312 entry->reg_id = reg_id;
1313 hlist_add_head_rcu(&entry->hlist,
1314 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1315
1316 return 0;
1317
1318alloc_err:
1319 if (priv->tunnel_reg_id)
1320 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1321
1322tunnel_err:
1323 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1324 return err;
1325}
1326
1327static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1328{
1329 u64 mac;
1330 unsigned int i;
1331 int qpn = priv->base_qpn;
1332 struct hlist_head *bucket;
1333 struct hlist_node *tmp;
1334 struct mlx4_mac_entry *entry;
1335
1336 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1337 bucket = &priv->mac_hash[i];
1338 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1339 mac = mlx4_mac_to_u64(entry->mac);
1340 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1341 entry->mac);
1342 mlx4_en_uc_steer_release(priv, entry->mac,
1343 qpn, entry->reg_id);
1344
1345 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1346 hlist_del_rcu(&entry->hlist);
1347 kfree_rcu(entry, rcu);
1348 }
1349 }
1350
1351 if (priv->tunnel_reg_id) {
1352 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1353 priv->tunnel_reg_id = 0;
1354 }
1355}
1356
1357static void mlx4_en_tx_timeout(struct net_device *dev)
1358{
1359 struct mlx4_en_priv *priv = netdev_priv(dev);
1360 struct mlx4_en_dev *mdev = priv->mdev;
1361 int i;
1362
1363 if (netif_msg_timer(priv))
1364 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1365
1366 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1367 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1368
1369 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1370 continue;
1371 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1372 i, tx_ring->qpn, tx_ring->sp_cqn,
1373 tx_ring->cons, tx_ring->prod);
1374 }
1375
1376 priv->port_stats.tx_timeout++;
1377 en_dbg(DRV, priv, "Scheduling watchdog\n");
1378 queue_work(mdev->workqueue, &priv->watchdog_task);
1379}
1380
1381
1382static void
1383mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1384{
1385 struct mlx4_en_priv *priv = netdev_priv(dev);
1386
1387 spin_lock_bh(&priv->stats_lock);
1388 mlx4_en_fold_software_stats(dev);
1389 netdev_stats_to_stats64(stats, &dev->stats);
1390 spin_unlock_bh(&priv->stats_lock);
1391}
1392
1393static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1394{
1395 struct mlx4_en_cq *cq;
1396 int i, t;
1397
1398
1399
1400
1401
1402
1403
1404 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1405 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1406 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1407 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1408 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1409 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1410
1411
1412 for (i = 0; i < priv->rx_ring_num; i++) {
1413 cq = priv->rx_cq[i];
1414 cq->moder_cnt = priv->rx_frames;
1415 cq->moder_time = priv->rx_usecs;
1416 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1417 priv->last_moder_packets[i] = 0;
1418 priv->last_moder_bytes[i] = 0;
1419 }
1420
1421 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1422 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1423 cq = priv->tx_cq[t][i];
1424 cq->moder_cnt = priv->tx_frames;
1425 cq->moder_time = priv->tx_usecs;
1426 }
1427 }
1428
1429
1430 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1431 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1432 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1433 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1434 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1435 priv->adaptive_rx_coal = 1;
1436 priv->last_moder_jiffies = 0;
1437 priv->last_moder_tx_packets = 0;
1438}
1439
1440static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1441{
1442 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1443 u32 pkt_rate_high, pkt_rate_low;
1444 struct mlx4_en_cq *cq;
1445 unsigned long packets;
1446 unsigned long rate;
1447 unsigned long avg_pkt_size;
1448 unsigned long rx_packets;
1449 unsigned long rx_bytes;
1450 unsigned long rx_pkt_diff;
1451 int moder_time;
1452 int ring, err;
1453
1454 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1455 return;
1456
1457 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1458 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1459
1460 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1461 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1462 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
1463
1464 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
1465 packets = rx_pkt_diff;
1466 rate = packets * HZ / period;
1467 avg_pkt_size = packets ? (rx_bytes -
1468 priv->last_moder_bytes[ring]) / packets : 0;
1469
1470
1471
1472 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1473 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1474 if (rate <= pkt_rate_low)
1475 moder_time = priv->rx_usecs_low;
1476 else if (rate >= pkt_rate_high)
1477 moder_time = priv->rx_usecs_high;
1478 else
1479 moder_time = (rate - pkt_rate_low) *
1480 (priv->rx_usecs_high - priv->rx_usecs_low) /
1481 (pkt_rate_high - pkt_rate_low) +
1482 priv->rx_usecs_low;
1483 } else {
1484 moder_time = priv->rx_usecs_low;
1485 }
1486
1487 cq = priv->rx_cq[ring];
1488 if (moder_time != priv->last_moder_time[ring] ||
1489 cq->moder_cnt != priv->rx_frames) {
1490 priv->last_moder_time[ring] = moder_time;
1491 cq->moder_time = moder_time;
1492 cq->moder_cnt = priv->rx_frames;
1493 err = mlx4_en_set_cq_moder(priv, cq);
1494 if (err)
1495 en_err(priv, "Failed modifying moderation for cq:%d\n",
1496 ring);
1497 }
1498 priv->last_moder_packets[ring] = rx_packets;
1499 priv->last_moder_bytes[ring] = rx_bytes;
1500 }
1501
1502 priv->last_moder_jiffies = jiffies;
1503}
1504
1505static void mlx4_en_do_get_stats(struct work_struct *work)
1506{
1507 struct delayed_work *delay = to_delayed_work(work);
1508 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1509 stats_task);
1510 struct mlx4_en_dev *mdev = priv->mdev;
1511 int err;
1512
1513 mutex_lock(&mdev->state_lock);
1514 if (mdev->device_up) {
1515 if (priv->port_up) {
1516 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1517 if (err)
1518 en_dbg(HW, priv, "Could not update stats\n");
1519
1520 mlx4_en_auto_moderation(priv);
1521 }
1522
1523 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1524 }
1525 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1526 mlx4_en_do_set_mac(priv, priv->current_mac);
1527 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1528 }
1529 mutex_unlock(&mdev->state_lock);
1530}
1531
1532
1533
1534
1535static void mlx4_en_service_task(struct work_struct *work)
1536{
1537 struct delayed_work *delay = to_delayed_work(work);
1538 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1539 service_task);
1540 struct mlx4_en_dev *mdev = priv->mdev;
1541
1542 mutex_lock(&mdev->state_lock);
1543 if (mdev->device_up) {
1544 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1545 mlx4_en_ptp_overflow_check(mdev);
1546
1547 mlx4_en_recover_from_oom(priv);
1548 queue_delayed_work(mdev->workqueue, &priv->service_task,
1549 SERVICE_TASK_DELAY);
1550 }
1551 mutex_unlock(&mdev->state_lock);
1552}
1553
1554static void mlx4_en_linkstate(struct work_struct *work)
1555{
1556 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1557 linkstate_task);
1558 struct mlx4_en_dev *mdev = priv->mdev;
1559 int linkstate = priv->link_state;
1560
1561 mutex_lock(&mdev->state_lock);
1562
1563
1564 if (priv->last_link_state != linkstate) {
1565 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1566 en_info(priv, "Link Down\n");
1567 netif_carrier_off(priv->dev);
1568 } else {
1569 en_info(priv, "Link Up\n");
1570 netif_carrier_on(priv->dev);
1571 }
1572 }
1573 priv->last_link_state = linkstate;
1574 mutex_unlock(&mdev->state_lock);
1575}
1576
1577static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1578{
1579 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1580 int numa_node = priv->mdev->dev->numa_node;
1581
1582 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1583 return -ENOMEM;
1584
1585 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1586 ring->affinity_mask);
1587 return 0;
1588}
1589
1590static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1591{
1592 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1593}
1594
1595static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1596 int tx_ring_idx)
1597{
1598 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1599 int rr_index = tx_ring_idx;
1600
1601 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1602 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1603 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1604 TX_XDP, tx_ring_idx, rr_index);
1605}
1606
1607int mlx4_en_start_port(struct net_device *dev)
1608{
1609 struct mlx4_en_priv *priv = netdev_priv(dev);
1610 struct mlx4_en_dev *mdev = priv->mdev;
1611 struct mlx4_en_cq *cq;
1612 struct mlx4_en_tx_ring *tx_ring;
1613 int rx_index = 0;
1614 int err = 0;
1615 int i, t;
1616 int j;
1617 u8 mc_list[16] = {0};
1618
1619 if (priv->port_up) {
1620 en_dbg(DRV, priv, "start port called while port already up\n");
1621 return 0;
1622 }
1623
1624 INIT_LIST_HEAD(&priv->mc_list);
1625 INIT_LIST_HEAD(&priv->curr_list);
1626 INIT_LIST_HEAD(&priv->ethtool_list);
1627 memset(&priv->ethtool_rules[0], 0,
1628 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1629
1630
1631 dev->mtu = min(dev->mtu, priv->max_mtu);
1632 mlx4_en_calc_rx_buf(dev);
1633 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1634
1635
1636 err = mlx4_en_activate_rx_rings(priv);
1637 if (err) {
1638 en_err(priv, "Failed to activate RX rings\n");
1639 return err;
1640 }
1641 for (i = 0; i < priv->rx_ring_num; i++) {
1642 cq = priv->rx_cq[i];
1643
1644 err = mlx4_en_init_affinity_hint(priv, i);
1645 if (err) {
1646 en_err(priv, "Failed preparing IRQ affinity hint\n");
1647 goto cq_err;
1648 }
1649
1650 err = mlx4_en_activate_cq(priv, cq, i);
1651 if (err) {
1652 en_err(priv, "Failed activating Rx CQ\n");
1653 mlx4_en_free_affinity_hint(priv, i);
1654 goto cq_err;
1655 }
1656
1657 for (j = 0; j < cq->size; j++) {
1658 struct mlx4_cqe *cqe = NULL;
1659
1660 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1661 priv->cqe_factor;
1662 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1663 }
1664
1665 err = mlx4_en_set_cq_moder(priv, cq);
1666 if (err) {
1667 en_err(priv, "Failed setting cq moderation parameters\n");
1668 mlx4_en_deactivate_cq(priv, cq);
1669 mlx4_en_free_affinity_hint(priv, i);
1670 goto cq_err;
1671 }
1672 mlx4_en_arm_cq(priv, cq);
1673 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1674 ++rx_index;
1675 }
1676
1677
1678 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1679 err = mlx4_en_get_qp(priv);
1680 if (err) {
1681 en_err(priv, "Failed getting eth qp\n");
1682 goto cq_err;
1683 }
1684 mdev->mac_removed[priv->port] = 0;
1685
1686 priv->counter_index =
1687 mlx4_get_default_counter_index(mdev->dev, priv->port);
1688
1689 err = mlx4_en_config_rss_steer(priv);
1690 if (err) {
1691 en_err(priv, "Failed configuring rss steering\n");
1692 goto mac_err;
1693 }
1694
1695 err = mlx4_en_create_drop_qp(priv);
1696 if (err)
1697 goto rss_err;
1698
1699
1700 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1701 u8 num_tx_rings_p_up = t == TX ?
1702 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
1703
1704 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1705
1706 cq = priv->tx_cq[t][i];
1707 err = mlx4_en_activate_cq(priv, cq, i);
1708 if (err) {
1709 en_err(priv, "Failed allocating Tx CQ\n");
1710 goto tx_err;
1711 }
1712 err = mlx4_en_set_cq_moder(priv, cq);
1713 if (err) {
1714 en_err(priv, "Failed setting cq moderation parameters\n");
1715 mlx4_en_deactivate_cq(priv, cq);
1716 goto tx_err;
1717 }
1718 en_dbg(DRV, priv,
1719 "Resetting index of collapsed CQ:%d to -1\n", i);
1720 cq->buf->wqe_index = cpu_to_be16(0xffff);
1721
1722
1723 tx_ring = priv->tx_ring[t][i];
1724 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1725 cq->mcq.cqn,
1726 i / num_tx_rings_p_up);
1727 if (err) {
1728 en_err(priv, "Failed allocating Tx ring\n");
1729 mlx4_en_deactivate_cq(priv, cq);
1730 goto tx_err;
1731 }
1732 if (t != TX_XDP) {
1733 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1734 tx_ring->recycle_ring = NULL;
1735
1736
1737 mlx4_en_arm_cq(priv, cq);
1738
1739 } else {
1740 mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
1741 mlx4_en_init_recycle_ring(priv, i);
1742
1743 }
1744
1745
1746 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1747 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
1748 }
1749 }
1750
1751
1752 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1753 priv->rx_skb_size + ETH_FCS_LEN,
1754 priv->prof->tx_pause,
1755 priv->prof->tx_ppp,
1756 priv->prof->rx_pause,
1757 priv->prof->rx_ppp);
1758 if (err) {
1759 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1760 priv->port, err);
1761 goto tx_err;
1762 }
1763
1764 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1765 if (err) {
1766 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1767 dev->mtu, priv->port, err);
1768 goto tx_err;
1769 }
1770
1771
1772 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1773 if (err) {
1774 en_err(priv, "Failed setting default qp numbers\n");
1775 goto tx_err;
1776 }
1777
1778 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1779 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1780 if (err) {
1781 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1782 err);
1783 goto tx_err;
1784 }
1785 }
1786
1787
1788 en_dbg(HW, priv, "Initializing port\n");
1789 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1790 if (err) {
1791 en_err(priv, "Failed Initializing port\n");
1792 goto tx_err;
1793 }
1794
1795
1796 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1797 mlx4_en_set_rss_steer_rules(priv))
1798 mlx4_warn(mdev, "Failed setting steering rules\n");
1799
1800
1801 eth_broadcast_addr(&mc_list[10]);
1802 mc_list[5] = priv->port;
1803 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1804 priv->port, 0, MLX4_PROT_ETH,
1805 &priv->broadcast_id))
1806 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1807
1808
1809 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1810
1811
1812 queue_work(mdev->workqueue, &priv->rx_mode_task);
1813
1814 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1815 udp_tunnel_get_rx_info(dev);
1816
1817 priv->port_up = true;
1818
1819
1820
1821
1822 for (i = 0; i < priv->rx_ring_num; i++) {
1823 local_bh_disable();
1824 napi_schedule(&priv->rx_cq[i]->napi);
1825 local_bh_enable();
1826 }
1827
1828 netif_tx_start_all_queues(dev);
1829 netif_device_attach(dev);
1830
1831 return 0;
1832
1833tx_err:
1834 if (t == MLX4_EN_NUM_TX_TYPES) {
1835 t--;
1836 i = priv->tx_ring_num[t];
1837 }
1838 while (t >= 0) {
1839 while (i--) {
1840 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1841 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1842 }
1843 if (!t--)
1844 break;
1845 i = priv->tx_ring_num[t];
1846 }
1847 mlx4_en_destroy_drop_qp(priv);
1848rss_err:
1849 mlx4_en_release_rss_steer(priv);
1850mac_err:
1851 mlx4_en_put_qp(priv);
1852cq_err:
1853 while (rx_index--) {
1854 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1855 mlx4_en_free_affinity_hint(priv, rx_index);
1856 }
1857 for (i = 0; i < priv->rx_ring_num; i++)
1858 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1859
1860 return err;
1861}
1862
1863
1864void mlx4_en_stop_port(struct net_device *dev, int detach)
1865{
1866 struct mlx4_en_priv *priv = netdev_priv(dev);
1867 struct mlx4_en_dev *mdev = priv->mdev;
1868 struct mlx4_en_mc_list *mclist, *tmp;
1869 struct ethtool_flow_id *flow, *tmp_flow;
1870 int i, t;
1871 u8 mc_list[16] = {0};
1872
1873 if (!priv->port_up) {
1874 en_dbg(DRV, priv, "stop port called while port already down\n");
1875 return;
1876 }
1877
1878
1879 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1880
1881
1882 netif_tx_lock_bh(dev);
1883 if (detach)
1884 netif_device_detach(dev);
1885 netif_tx_stop_all_queues(dev);
1886 netif_tx_unlock_bh(dev);
1887
1888 netif_tx_disable(dev);
1889
1890 spin_lock_bh(&priv->stats_lock);
1891 mlx4_en_fold_software_stats(dev);
1892
1893 priv->port_up = false;
1894 spin_unlock_bh(&priv->stats_lock);
1895
1896 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1897
1898
1899 if (mdev->dev->caps.steering_mode ==
1900 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1901 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1902 MLX4_EN_FLAG_MC_PROMISC);
1903 mlx4_flow_steer_promisc_remove(mdev->dev,
1904 priv->port,
1905 MLX4_FS_ALL_DEFAULT);
1906 mlx4_flow_steer_promisc_remove(mdev->dev,
1907 priv->port,
1908 MLX4_FS_MC_DEFAULT);
1909 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1910 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1911
1912
1913 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1914 priv->port);
1915
1916
1917 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1918 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1919 priv->port);
1920 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1921 }
1922 }
1923
1924
1925 eth_broadcast_addr(&mc_list[10]);
1926 mc_list[5] = priv->port;
1927 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
1928 MLX4_PROT_ETH, priv->broadcast_id);
1929 list_for_each_entry(mclist, &priv->curr_list, list) {
1930 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1931 mc_list[5] = priv->port;
1932 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
1933 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1934 if (mclist->tunnel_reg_id)
1935 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1936 }
1937 mlx4_en_clear_list(dev);
1938 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1939 list_del(&mclist->list);
1940 kfree(mclist);
1941 }
1942
1943
1944 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1945
1946
1947 if (mdev->dev->caps.steering_mode ==
1948 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1949 ASSERT_RTNL();
1950 list_for_each_entry_safe(flow, tmp_flow,
1951 &priv->ethtool_list, list) {
1952 mlx4_flow_detach(mdev->dev, flow->id);
1953 list_del(&flow->list);
1954 }
1955 }
1956
1957 mlx4_en_destroy_drop_qp(priv);
1958
1959
1960 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1961 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1962 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1963 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1964 }
1965 }
1966 msleep(10);
1967
1968 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1969 for (i = 0; i < priv->tx_ring_num[t]; i++)
1970 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
1971
1972 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1973 mlx4_en_delete_rss_steer_rules(priv);
1974
1975
1976 mlx4_en_release_rss_steer(priv);
1977
1978
1979 mlx4_en_put_qp(priv);
1980 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1981 mdev->mac_removed[priv->port] = 1;
1982
1983
1984 for (i = 0; i < priv->rx_ring_num; i++) {
1985 struct mlx4_en_cq *cq = priv->rx_cq[i];
1986
1987 napi_synchronize(&cq->napi);
1988 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1989 mlx4_en_deactivate_cq(priv, cq);
1990
1991 mlx4_en_free_affinity_hint(priv, i);
1992 }
1993}
1994
1995static void mlx4_en_restart(struct work_struct *work)
1996{
1997 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1998 watchdog_task);
1999 struct mlx4_en_dev *mdev = priv->mdev;
2000 struct net_device *dev = priv->dev;
2001
2002 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
2003
2004 rtnl_lock();
2005 mutex_lock(&mdev->state_lock);
2006 if (priv->port_up) {
2007 mlx4_en_stop_port(dev, 1);
2008 if (mlx4_en_start_port(dev))
2009 en_err(priv, "Failed restarting port %d\n", priv->port);
2010 }
2011 mutex_unlock(&mdev->state_lock);
2012 rtnl_unlock();
2013}
2014
2015static void mlx4_en_clear_stats(struct net_device *dev)
2016{
2017 struct mlx4_en_priv *priv = netdev_priv(dev);
2018 struct mlx4_en_dev *mdev = priv->mdev;
2019 struct mlx4_en_tx_ring **tx_ring;
2020 int i;
2021
2022 if (!mlx4_is_slave(mdev->dev))
2023 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
2024 en_dbg(HW, priv, "Failed dumping statistics\n");
2025
2026 memset(&priv->pstats, 0, sizeof(priv->pstats));
2027 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
2028 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
2029 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
2030 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
2031 memset(&priv->rx_priority_flowstats, 0,
2032 sizeof(priv->rx_priority_flowstats));
2033 memset(&priv->tx_priority_flowstats, 0,
2034 sizeof(priv->tx_priority_flowstats));
2035 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
2036
2037 tx_ring = priv->tx_ring[TX];
2038 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
2039 tx_ring[i]->bytes = 0;
2040 tx_ring[i]->packets = 0;
2041 tx_ring[i]->tx_csum = 0;
2042 tx_ring[i]->tx_dropped = 0;
2043 tx_ring[i]->queue_stopped = 0;
2044 tx_ring[i]->wake_queue = 0;
2045 tx_ring[i]->tso_packets = 0;
2046 tx_ring[i]->xmit_more = 0;
2047 }
2048 for (i = 0; i < priv->rx_ring_num; i++) {
2049 priv->rx_ring[i]->bytes = 0;
2050 priv->rx_ring[i]->packets = 0;
2051 priv->rx_ring[i]->csum_ok = 0;
2052 priv->rx_ring[i]->csum_none = 0;
2053 priv->rx_ring[i]->csum_complete = 0;
2054 }
2055}
2056
2057static int mlx4_en_open(struct net_device *dev)
2058{
2059 struct mlx4_en_priv *priv = netdev_priv(dev);
2060 struct mlx4_en_dev *mdev = priv->mdev;
2061 int err = 0;
2062
2063 mutex_lock(&mdev->state_lock);
2064
2065 if (!mdev->device_up) {
2066 en_err(priv, "Cannot open - device down/disabled\n");
2067 err = -EBUSY;
2068 goto out;
2069 }
2070
2071
2072 mlx4_en_clear_stats(dev);
2073
2074 err = mlx4_en_start_port(dev);
2075 if (err)
2076 en_err(priv, "Failed starting port:%d\n", priv->port);
2077
2078out:
2079 mutex_unlock(&mdev->state_lock);
2080 return err;
2081}
2082
2083
2084static int mlx4_en_close(struct net_device *dev)
2085{
2086 struct mlx4_en_priv *priv = netdev_priv(dev);
2087 struct mlx4_en_dev *mdev = priv->mdev;
2088
2089 en_dbg(IFDOWN, priv, "Close port called\n");
2090
2091 mutex_lock(&mdev->state_lock);
2092
2093 mlx4_en_stop_port(dev, 0);
2094 netif_carrier_off(dev);
2095
2096 mutex_unlock(&mdev->state_lock);
2097 return 0;
2098}
2099
2100static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2101{
2102 int i, t;
2103
2104#ifdef CONFIG_RFS_ACCEL
2105 priv->dev->rx_cpu_rmap = NULL;
2106#endif
2107
2108 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2109 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2110 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2111 mlx4_en_destroy_tx_ring(priv,
2112 &priv->tx_ring[t][i]);
2113 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2114 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2115 }
2116 kfree(priv->tx_ring[t]);
2117 kfree(priv->tx_cq[t]);
2118 }
2119
2120 for (i = 0; i < priv->rx_ring_num; i++) {
2121 if (priv->rx_ring[i])
2122 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2123 priv->prof->rx_ring_size, priv->stride);
2124 if (priv->rx_cq[i])
2125 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2126 }
2127
2128}
2129
2130static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2131{
2132 struct mlx4_en_port_profile *prof = priv->prof;
2133 int i, t;
2134 int node;
2135
2136
2137 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2138 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2139 node = cpu_to_node(i % num_online_cpus());
2140 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2141 prof->tx_ring_size, i, t, node))
2142 goto err;
2143
2144 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2145 prof->tx_ring_size,
2146 TXBB_SIZE, node, i))
2147 goto err;
2148 }
2149 }
2150
2151
2152 for (i = 0; i < priv->rx_ring_num; i++) {
2153 node = cpu_to_node(i % num_online_cpus());
2154 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2155 prof->rx_ring_size, i, RX, node))
2156 goto err;
2157
2158 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2159 prof->rx_ring_size, priv->stride,
2160 node, i))
2161 goto err;
2162
2163 }
2164
2165#ifdef CONFIG_RFS_ACCEL
2166 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2167#endif
2168
2169 return 0;
2170
2171err:
2172 en_err(priv, "Failed to allocate NIC resources\n");
2173 for (i = 0; i < priv->rx_ring_num; i++) {
2174 if (priv->rx_ring[i])
2175 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2176 prof->rx_ring_size,
2177 priv->stride);
2178 if (priv->rx_cq[i])
2179 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2180 }
2181 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2182 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2183 if (priv->tx_ring[t][i])
2184 mlx4_en_destroy_tx_ring(priv,
2185 &priv->tx_ring[t][i]);
2186 if (priv->tx_cq[t][i])
2187 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2188 }
2189 }
2190 return -ENOMEM;
2191}
2192
2193
2194static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2195 struct mlx4_en_priv *src,
2196 struct mlx4_en_port_profile *prof)
2197{
2198 int t;
2199
2200 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2201 sizeof(dst->hwtstamp_config));
2202 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
2203 dst->rx_ring_num = prof->rx_ring_num;
2204 dst->flags = prof->flags;
2205 dst->mdev = src->mdev;
2206 dst->port = src->port;
2207 dst->dev = src->dev;
2208 dst->prof = prof;
2209 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2210 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2211
2212 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2213 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2214 if (!dst->tx_ring_num[t])
2215 continue;
2216
2217 dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
2218 sizeof(struct mlx4_en_tx_ring *),
2219 GFP_KERNEL);
2220 if (!dst->tx_ring[t])
2221 goto err_free_tx;
2222
2223 dst->tx_cq[t] = kcalloc(MAX_TX_RINGS,
2224 sizeof(struct mlx4_en_cq *),
2225 GFP_KERNEL);
2226 if (!dst->tx_cq[t]) {
2227 kfree(dst->tx_ring[t]);
2228 goto err_free_tx;
2229 }
2230 }
2231
2232 return 0;
2233
2234err_free_tx:
2235 while (t--) {
2236 kfree(dst->tx_ring[t]);
2237 kfree(dst->tx_cq[t]);
2238 }
2239 return -ENOMEM;
2240}
2241
2242static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2243 struct mlx4_en_priv *src)
2244{
2245 int t;
2246 memcpy(dst->rx_ring, src->rx_ring,
2247 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2248 memcpy(dst->rx_cq, src->rx_cq,
2249 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2250 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2251 sizeof(dst->hwtstamp_config));
2252 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2253 dst->tx_ring_num[t] = src->tx_ring_num[t];
2254 dst->tx_ring[t] = src->tx_ring[t];
2255 dst->tx_cq[t] = src->tx_cq[t];
2256 }
2257 dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
2258 dst->rx_ring_num = src->rx_ring_num;
2259 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2260}
2261
2262int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2263 struct mlx4_en_priv *tmp,
2264 struct mlx4_en_port_profile *prof,
2265 bool carry_xdp_prog)
2266{
2267 struct bpf_prog *xdp_prog;
2268 int i, t;
2269
2270 mlx4_en_copy_priv(tmp, priv, prof);
2271
2272 if (mlx4_en_alloc_resources(tmp)) {
2273 en_warn(priv,
2274 "%s: Resource allocation failed, using previous configuration\n",
2275 __func__);
2276 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2277 kfree(tmp->tx_ring[t]);
2278 kfree(tmp->tx_cq[t]);
2279 }
2280 return -ENOMEM;
2281 }
2282
2283
2284 xdp_prog = rcu_dereference_protected(
2285 priv->rx_ring[0]->xdp_prog,
2286 lockdep_is_held(&priv->mdev->state_lock));
2287
2288 if (xdp_prog && carry_xdp_prog) {
2289 xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2290 if (IS_ERR(xdp_prog)) {
2291 mlx4_en_free_resources(tmp);
2292 return PTR_ERR(xdp_prog);
2293 }
2294 for (i = 0; i < tmp->rx_ring_num; i++)
2295 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2296 xdp_prog);
2297 }
2298
2299 return 0;
2300}
2301
2302void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2303 struct mlx4_en_priv *tmp)
2304{
2305 mlx4_en_free_resources(priv);
2306 mlx4_en_update_priv(priv, tmp);
2307}
2308
2309void mlx4_en_destroy_netdev(struct net_device *dev)
2310{
2311 struct mlx4_en_priv *priv = netdev_priv(dev);
2312 struct mlx4_en_dev *mdev = priv->mdev;
2313
2314 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2315
2316
2317 if (priv->registered) {
2318 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2319 priv->port));
2320 unregister_netdev(dev);
2321 }
2322
2323 if (priv->allocated)
2324 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2325
2326 cancel_delayed_work(&priv->stats_task);
2327 cancel_delayed_work(&priv->service_task);
2328
2329 flush_workqueue(mdev->workqueue);
2330
2331 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2332 mlx4_en_remove_timestamp(mdev);
2333
2334
2335 mutex_lock(&mdev->state_lock);
2336 mdev->pndev[priv->port] = NULL;
2337 mdev->upper[priv->port] = NULL;
2338
2339#ifdef CONFIG_RFS_ACCEL
2340 mlx4_en_cleanup_filters(priv);
2341#endif
2342
2343 mlx4_en_free_resources(priv);
2344 mutex_unlock(&mdev->state_lock);
2345
2346 free_netdev(dev);
2347}
2348
2349static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2350{
2351 struct mlx4_en_priv *priv = netdev_priv(dev);
2352
2353 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2354 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2355 mtu, MLX4_EN_MAX_XDP_MTU);
2356 return false;
2357 }
2358
2359 return true;
2360}
2361
2362static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2363{
2364 struct mlx4_en_priv *priv = netdev_priv(dev);
2365 struct mlx4_en_dev *mdev = priv->mdev;
2366 int err = 0;
2367
2368 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2369 dev->mtu, new_mtu);
2370
2371 if (priv->tx_ring_num[TX_XDP] &&
2372 !mlx4_en_check_xdp_mtu(dev, new_mtu))
2373 return -EOPNOTSUPP;
2374
2375 dev->mtu = new_mtu;
2376
2377 if (netif_running(dev)) {
2378 mutex_lock(&mdev->state_lock);
2379 if (!mdev->device_up) {
2380
2381
2382 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2383 } else {
2384 mlx4_en_stop_port(dev, 1);
2385 err = mlx4_en_start_port(dev);
2386 if (err) {
2387 en_err(priv, "Failed restarting port:%d\n",
2388 priv->port);
2389 queue_work(mdev->workqueue, &priv->watchdog_task);
2390 }
2391 }
2392 mutex_unlock(&mdev->state_lock);
2393 }
2394 return 0;
2395}
2396
2397static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2398{
2399 struct mlx4_en_priv *priv = netdev_priv(dev);
2400 struct mlx4_en_dev *mdev = priv->mdev;
2401 struct hwtstamp_config config;
2402
2403 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2404 return -EFAULT;
2405
2406
2407 if (config.flags)
2408 return -EINVAL;
2409
2410
2411 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2412 return -EINVAL;
2413
2414
2415 switch (config.tx_type) {
2416 case HWTSTAMP_TX_OFF:
2417 case HWTSTAMP_TX_ON:
2418 break;
2419 default:
2420 return -ERANGE;
2421 }
2422
2423
2424 switch (config.rx_filter) {
2425 case HWTSTAMP_FILTER_NONE:
2426 break;
2427 case HWTSTAMP_FILTER_ALL:
2428 case HWTSTAMP_FILTER_SOME:
2429 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2430 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2431 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2432 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2433 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2434 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2435 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2436 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2437 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2438 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2439 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2440 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2441 case HWTSTAMP_FILTER_NTP_ALL:
2442 config.rx_filter = HWTSTAMP_FILTER_ALL;
2443 break;
2444 default:
2445 return -ERANGE;
2446 }
2447
2448 if (mlx4_en_reset_config(dev, config, dev->features)) {
2449 config.tx_type = HWTSTAMP_TX_OFF;
2450 config.rx_filter = HWTSTAMP_FILTER_NONE;
2451 }
2452
2453 return copy_to_user(ifr->ifr_data, &config,
2454 sizeof(config)) ? -EFAULT : 0;
2455}
2456
2457static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2458{
2459 struct mlx4_en_priv *priv = netdev_priv(dev);
2460
2461 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2462 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2463}
2464
2465static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2466{
2467 switch (cmd) {
2468 case SIOCSHWTSTAMP:
2469 return mlx4_en_hwtstamp_set(dev, ifr);
2470 case SIOCGHWTSTAMP:
2471 return mlx4_en_hwtstamp_get(dev, ifr);
2472 default:
2473 return -EOPNOTSUPP;
2474 }
2475}
2476
2477static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2478 netdev_features_t features)
2479{
2480 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2481 struct mlx4_en_dev *mdev = en_priv->mdev;
2482
2483
2484
2485
2486
2487 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2488 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2489 features |= NETIF_F_HW_VLAN_STAG_RX;
2490 else
2491 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2492
2493 return features;
2494}
2495
2496static int mlx4_en_set_features(struct net_device *netdev,
2497 netdev_features_t features)
2498{
2499 struct mlx4_en_priv *priv = netdev_priv(netdev);
2500 bool reset = false;
2501 int ret = 0;
2502
2503 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2504 en_info(priv, "Turn %s RX-FCS\n",
2505 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2506 reset = true;
2507 }
2508
2509 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2510 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2511
2512 en_info(priv, "Turn %s RX-ALL\n",
2513 ignore_fcs_value ? "ON" : "OFF");
2514 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2515 priv->port, ignore_fcs_value);
2516 if (ret)
2517 return ret;
2518 }
2519
2520 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2521 en_info(priv, "Turn %s RX vlan strip offload\n",
2522 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2523 reset = true;
2524 }
2525
2526 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2527 en_info(priv, "Turn %s TX vlan strip offload\n",
2528 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2529
2530 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2531 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2532 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2533
2534 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2535 en_info(priv, "Turn %s loopback\n",
2536 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2537 mlx4_en_update_loopback_state(netdev, features);
2538 }
2539
2540 if (reset) {
2541 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2542 features);
2543 if (ret)
2544 return ret;
2545 }
2546
2547 return 0;
2548}
2549
2550static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2551{
2552 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2553 struct mlx4_en_dev *mdev = en_priv->mdev;
2554
2555 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
2556}
2557
2558static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2559 __be16 vlan_proto)
2560{
2561 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2562 struct mlx4_en_dev *mdev = en_priv->mdev;
2563
2564 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2565 vlan_proto);
2566}
2567
2568static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2569 int max_tx_rate)
2570{
2571 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2572 struct mlx4_en_dev *mdev = en_priv->mdev;
2573
2574 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2575 max_tx_rate);
2576}
2577
2578static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2579{
2580 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2581 struct mlx4_en_dev *mdev = en_priv->mdev;
2582
2583 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2584}
2585
2586static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2587{
2588 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2589 struct mlx4_en_dev *mdev = en_priv->mdev;
2590
2591 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2592}
2593
2594static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2595{
2596 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2597 struct mlx4_en_dev *mdev = en_priv->mdev;
2598
2599 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2600}
2601
2602static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2603 struct ifla_vf_stats *vf_stats)
2604{
2605 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2606 struct mlx4_en_dev *mdev = en_priv->mdev;
2607
2608 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2609}
2610
2611#define PORT_ID_BYTE_LEN 8
2612static int mlx4_en_get_phys_port_id(struct net_device *dev,
2613 struct netdev_phys_item_id *ppid)
2614{
2615 struct mlx4_en_priv *priv = netdev_priv(dev);
2616 struct mlx4_dev *mdev = priv->mdev->dev;
2617 int i;
2618 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2619
2620 if (!phys_port_id)
2621 return -EOPNOTSUPP;
2622
2623 ppid->id_len = sizeof(phys_port_id);
2624 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2625 ppid->id[i] = phys_port_id & 0xff;
2626 phys_port_id >>= 8;
2627 }
2628 return 0;
2629}
2630
2631static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2632{
2633 int ret;
2634 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2635 vxlan_add_task);
2636
2637 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2638 if (ret)
2639 goto out;
2640
2641 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2642 VXLAN_STEER_BY_OUTER_MAC, 1);
2643out:
2644 if (ret) {
2645 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2646 return;
2647 }
2648
2649
2650 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2651 NETIF_F_RXCSUM |
2652 NETIF_F_TSO | NETIF_F_TSO6 |
2653 NETIF_F_GSO_UDP_TUNNEL |
2654 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2655 NETIF_F_GSO_PARTIAL;
2656}
2657
2658static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2659{
2660 int ret;
2661 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2662 vxlan_del_task);
2663
2664 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2665 NETIF_F_RXCSUM |
2666 NETIF_F_TSO | NETIF_F_TSO6 |
2667 NETIF_F_GSO_UDP_TUNNEL |
2668 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2669 NETIF_F_GSO_PARTIAL);
2670
2671 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2672 VXLAN_STEER_BY_OUTER_MAC, 0);
2673 if (ret)
2674 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2675
2676 priv->vxlan_port = 0;
2677}
2678
2679static void mlx4_en_add_vxlan_port(struct net_device *dev,
2680 struct udp_tunnel_info *ti)
2681{
2682 struct mlx4_en_priv *priv = netdev_priv(dev);
2683 __be16 port = ti->port;
2684 __be16 current_port;
2685
2686 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2687 return;
2688
2689 if (ti->sa_family != AF_INET)
2690 return;
2691
2692 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2693 return;
2694
2695 current_port = priv->vxlan_port;
2696 if (current_port && current_port != port) {
2697 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2698 ntohs(current_port), ntohs(port));
2699 return;
2700 }
2701
2702 priv->vxlan_port = port;
2703 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2704}
2705
2706static void mlx4_en_del_vxlan_port(struct net_device *dev,
2707 struct udp_tunnel_info *ti)
2708{
2709 struct mlx4_en_priv *priv = netdev_priv(dev);
2710 __be16 port = ti->port;
2711 __be16 current_port;
2712
2713 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2714 return;
2715
2716 if (ti->sa_family != AF_INET)
2717 return;
2718
2719 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2720 return;
2721
2722 current_port = priv->vxlan_port;
2723 if (current_port != port) {
2724 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2725 return;
2726 }
2727
2728 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2729}
2730
2731static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2732 struct net_device *dev,
2733 netdev_features_t features)
2734{
2735 features = vlan_features_check(skb, features);
2736 features = vxlan_features_check(skb, features);
2737
2738
2739
2740
2741
2742 if (skb->encapsulation &&
2743 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2744 struct mlx4_en_priv *priv = netdev_priv(dev);
2745
2746 if (!priv->vxlan_port ||
2747 (ip_hdr(skb)->version != 4) ||
2748 (udp_hdr(skb)->dest != priv->vxlan_port))
2749 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2750 }
2751
2752 return features;
2753}
2754
2755static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2756{
2757 struct mlx4_en_priv *priv = netdev_priv(dev);
2758 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
2759 struct mlx4_update_qp_params params;
2760 int err;
2761
2762 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2763 return -EOPNOTSUPP;
2764
2765
2766 if (maxrate >> 12) {
2767 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2768 params.rate_val = maxrate / 1000;
2769 } else if (maxrate) {
2770 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2771 params.rate_val = maxrate;
2772 } else {
2773 params.rate_unit = 0;
2774 params.rate_val = 0;
2775 }
2776
2777 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2778 ¶ms);
2779 return err;
2780}
2781
2782static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2783{
2784 struct mlx4_en_priv *priv = netdev_priv(dev);
2785 struct mlx4_en_dev *mdev = priv->mdev;
2786 struct mlx4_en_port_profile new_prof;
2787 struct bpf_prog *old_prog;
2788 struct mlx4_en_priv *tmp;
2789 int tx_changed = 0;
2790 int xdp_ring_num;
2791 int port_up = 0;
2792 int err;
2793 int i;
2794
2795 xdp_ring_num = prog ? priv->rx_ring_num : 0;
2796
2797
2798
2799
2800 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
2801 if (prog) {
2802 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2803 if (IS_ERR(prog))
2804 return PTR_ERR(prog);
2805 }
2806 mutex_lock(&mdev->state_lock);
2807 for (i = 0; i < priv->rx_ring_num; i++) {
2808 old_prog = rcu_dereference_protected(
2809 priv->rx_ring[i]->xdp_prog,
2810 lockdep_is_held(&mdev->state_lock));
2811 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2812 if (old_prog)
2813 bpf_prog_put(old_prog);
2814 }
2815 mutex_unlock(&mdev->state_lock);
2816 return 0;
2817 }
2818
2819 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
2820 return -EOPNOTSUPP;
2821
2822 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2823 if (!tmp)
2824 return -ENOMEM;
2825
2826 if (prog) {
2827 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2828 if (IS_ERR(prog)) {
2829 err = PTR_ERR(prog);
2830 goto out;
2831 }
2832 }
2833
2834 mutex_lock(&mdev->state_lock);
2835 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2836 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2837
2838 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2839 tx_changed = 1;
2840 new_prof.tx_ring_num[TX] =
2841 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
2842 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2843 }
2844
2845 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
2846 if (err) {
2847 if (prog)
2848 bpf_prog_sub(prog, priv->rx_ring_num - 1);
2849 goto unlock_out;
2850 }
2851
2852 if (priv->port_up) {
2853 port_up = 1;
2854 mlx4_en_stop_port(dev, 1);
2855 }
2856
2857 mlx4_en_safe_replace_resources(priv, tmp);
2858 if (tx_changed)
2859 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
2860
2861 for (i = 0; i < priv->rx_ring_num; i++) {
2862 old_prog = rcu_dereference_protected(
2863 priv->rx_ring[i]->xdp_prog,
2864 lockdep_is_held(&mdev->state_lock));
2865 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2866 if (old_prog)
2867 bpf_prog_put(old_prog);
2868 }
2869
2870 if (port_up) {
2871 err = mlx4_en_start_port(dev);
2872 if (err) {
2873 en_err(priv, "Failed starting port %d for XDP change\n",
2874 priv->port);
2875 queue_work(mdev->workqueue, &priv->watchdog_task);
2876 }
2877 }
2878
2879unlock_out:
2880 mutex_unlock(&mdev->state_lock);
2881out:
2882 kfree(tmp);
2883 return err;
2884}
2885
2886static u32 mlx4_xdp_query(struct net_device *dev)
2887{
2888 struct mlx4_en_priv *priv = netdev_priv(dev);
2889 struct mlx4_en_dev *mdev = priv->mdev;
2890 const struct bpf_prog *xdp_prog;
2891 u32 prog_id = 0;
2892
2893 if (!priv->tx_ring_num[TX_XDP])
2894 return prog_id;
2895
2896 mutex_lock(&mdev->state_lock);
2897 xdp_prog = rcu_dereference_protected(
2898 priv->rx_ring[0]->xdp_prog,
2899 lockdep_is_held(&mdev->state_lock));
2900 if (xdp_prog)
2901 prog_id = xdp_prog->aux->id;
2902 mutex_unlock(&mdev->state_lock);
2903
2904 return prog_id;
2905}
2906
2907static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2908{
2909 switch (xdp->command) {
2910 case XDP_SETUP_PROG:
2911 return mlx4_xdp_set(dev, xdp->prog);
2912 case XDP_QUERY_PROG:
2913 xdp->prog_id = mlx4_xdp_query(dev);
2914 return 0;
2915 default:
2916 return -EINVAL;
2917 }
2918}
2919
2920static const struct net_device_ops mlx4_netdev_ops = {
2921 .ndo_open = mlx4_en_open,
2922 .ndo_stop = mlx4_en_close,
2923 .ndo_start_xmit = mlx4_en_xmit,
2924 .ndo_select_queue = mlx4_en_select_queue,
2925 .ndo_get_stats64 = mlx4_en_get_stats64,
2926 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2927 .ndo_set_mac_address = mlx4_en_set_mac,
2928 .ndo_validate_addr = eth_validate_addr,
2929 .ndo_change_mtu = mlx4_en_change_mtu,
2930 .ndo_do_ioctl = mlx4_en_ioctl,
2931 .ndo_tx_timeout = mlx4_en_tx_timeout,
2932 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2933 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2934 .ndo_set_features = mlx4_en_set_features,
2935 .ndo_fix_features = mlx4_en_fix_features,
2936 .ndo_setup_tc = __mlx4_en_setup_tc,
2937#ifdef CONFIG_RFS_ACCEL
2938 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2939#endif
2940 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2941 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2942 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2943 .ndo_features_check = mlx4_en_features_check,
2944 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2945 .ndo_bpf = mlx4_xdp,
2946};
2947
2948static const struct net_device_ops mlx4_netdev_ops_master = {
2949 .ndo_open = mlx4_en_open,
2950 .ndo_stop = mlx4_en_close,
2951 .ndo_start_xmit = mlx4_en_xmit,
2952 .ndo_select_queue = mlx4_en_select_queue,
2953 .ndo_get_stats64 = mlx4_en_get_stats64,
2954 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2955 .ndo_set_mac_address = mlx4_en_set_mac,
2956 .ndo_validate_addr = eth_validate_addr,
2957 .ndo_change_mtu = mlx4_en_change_mtu,
2958 .ndo_tx_timeout = mlx4_en_tx_timeout,
2959 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2960 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2961 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2962 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2963 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2964 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2965 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2966 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2967 .ndo_get_vf_config = mlx4_en_get_vf_config,
2968 .ndo_set_features = mlx4_en_set_features,
2969 .ndo_fix_features = mlx4_en_fix_features,
2970 .ndo_setup_tc = __mlx4_en_setup_tc,
2971#ifdef CONFIG_RFS_ACCEL
2972 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2973#endif
2974 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2975 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2976 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2977 .ndo_features_check = mlx4_en_features_check,
2978 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2979 .ndo_bpf = mlx4_xdp,
2980};
2981
2982struct mlx4_en_bond {
2983 struct work_struct work;
2984 struct mlx4_en_priv *priv;
2985 int is_bonded;
2986 struct mlx4_port_map port_map;
2987};
2988
2989static void mlx4_en_bond_work(struct work_struct *work)
2990{
2991 struct mlx4_en_bond *bond = container_of(work,
2992 struct mlx4_en_bond,
2993 work);
2994 int err = 0;
2995 struct mlx4_dev *dev = bond->priv->mdev->dev;
2996
2997 if (bond->is_bonded) {
2998 if (!mlx4_is_bonded(dev)) {
2999 err = mlx4_bond(dev);
3000 if (err)
3001 en_err(bond->priv, "Fail to bond device\n");
3002 }
3003 if (!err) {
3004 err = mlx4_port_map_set(dev, &bond->port_map);
3005 if (err)
3006 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
3007 bond->port_map.port1,
3008 bond->port_map.port2,
3009 err);
3010 }
3011 } else if (mlx4_is_bonded(dev)) {
3012 err = mlx4_unbond(dev);
3013 if (err)
3014 en_err(bond->priv, "Fail to unbond device\n");
3015 }
3016 dev_put(bond->priv->dev);
3017 kfree(bond);
3018}
3019
3020static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
3021 u8 v2p_p1, u8 v2p_p2)
3022{
3023 struct mlx4_en_bond *bond = NULL;
3024
3025 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
3026 if (!bond)
3027 return -ENOMEM;
3028
3029 INIT_WORK(&bond->work, mlx4_en_bond_work);
3030 bond->priv = priv;
3031 bond->is_bonded = is_bonded;
3032 bond->port_map.port1 = v2p_p1;
3033 bond->port_map.port2 = v2p_p2;
3034 dev_hold(priv->dev);
3035 queue_work(priv->mdev->workqueue, &bond->work);
3036 return 0;
3037}
3038
3039int mlx4_en_netdev_event(struct notifier_block *this,
3040 unsigned long event, void *ptr)
3041{
3042 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3043 u8 port = 0;
3044 struct mlx4_en_dev *mdev;
3045 struct mlx4_dev *dev;
3046 int i, num_eth_ports = 0;
3047 bool do_bond = true;
3048 struct mlx4_en_priv *priv;
3049 u8 v2p_port1 = 0;
3050 u8 v2p_port2 = 0;
3051
3052 if (!net_eq(dev_net(ndev), &init_net))
3053 return NOTIFY_DONE;
3054
3055 mdev = container_of(this, struct mlx4_en_dev, nb);
3056 dev = mdev->dev;
3057
3058
3059
3060
3061 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
3062 ++num_eth_ports;
3063 if (!port && (mdev->pndev[i] == ndev))
3064 port = i;
3065 mdev->upper[i] = mdev->pndev[i] ?
3066 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
3067
3068 if (!mdev->upper[i])
3069 do_bond = false;
3070 if (num_eth_ports < 2)
3071 continue;
3072
3073 if (mdev->upper[i] != mdev->upper[i-1])
3074 do_bond = false;
3075 }
3076
3077 do_bond = (num_eth_ports == 2) ? do_bond : false;
3078
3079
3080 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
3081 return NOTIFY_DONE;
3082
3083 priv = netdev_priv(ndev);
3084 if (do_bond) {
3085 struct netdev_notifier_bonding_info *notifier_info = ptr;
3086 struct netdev_bonding_info *bonding_info =
3087 ¬ifier_info->bonding_info;
3088
3089
3090 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3091 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3092 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3093 do_bond = false;
3094
3095
3096 if (bonding_info->master.num_slaves != 2)
3097 do_bond = false;
3098
3099
3100 if (do_bond) {
3101 if (bonding_info->master.bond_mode ==
3102 BOND_MODE_ACTIVEBACKUP) {
3103
3104
3105
3106 if (bonding_info->slave.state ==
3107 BOND_STATE_BACKUP) {
3108 if (port == 1) {
3109 v2p_port1 = 2;
3110 v2p_port2 = 2;
3111 } else {
3112 v2p_port1 = 1;
3113 v2p_port2 = 1;
3114 }
3115 } else {
3116 if (port == 1) {
3117 v2p_port1 = 1;
3118 v2p_port2 = 1;
3119 } else {
3120 v2p_port1 = 2;
3121 v2p_port2 = 2;
3122 }
3123 }
3124 } else {
3125
3126
3127
3128 __s8 link = bonding_info->slave.link;
3129
3130 if (port == 1)
3131 v2p_port2 = 2;
3132 else
3133 v2p_port1 = 1;
3134 if ((link == BOND_LINK_UP) ||
3135 (link == BOND_LINK_FAIL)) {
3136 if (port == 1)
3137 v2p_port1 = 1;
3138 else
3139 v2p_port2 = 2;
3140 } else {
3141 if (port == 1)
3142 v2p_port1 = 2;
3143 else
3144 v2p_port2 = 1;
3145 }
3146 }
3147 }
3148 }
3149
3150 mlx4_en_queue_bond_work(priv, do_bond,
3151 v2p_port1, v2p_port2);
3152
3153 return NOTIFY_DONE;
3154}
3155
3156void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3157 struct mlx4_en_stats_bitmap *stats_bitmap,
3158 u8 rx_ppp, u8 rx_pause,
3159 u8 tx_ppp, u8 tx_pause)
3160{
3161 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
3162
3163 if (!mlx4_is_slave(dev) &&
3164 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3165 mutex_lock(&stats_bitmap->mutex);
3166 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3167
3168 if (rx_ppp)
3169 bitmap_set(stats_bitmap->bitmap, last_i,
3170 NUM_FLOW_PRIORITY_STATS_RX);
3171 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3172
3173 if (rx_pause && !(rx_ppp))
3174 bitmap_set(stats_bitmap->bitmap, last_i,
3175 NUM_FLOW_STATS_RX);
3176 last_i += NUM_FLOW_STATS_RX;
3177
3178 if (tx_ppp)
3179 bitmap_set(stats_bitmap->bitmap, last_i,
3180 NUM_FLOW_PRIORITY_STATS_TX);
3181 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3182
3183 if (tx_pause && !(tx_ppp))
3184 bitmap_set(stats_bitmap->bitmap, last_i,
3185 NUM_FLOW_STATS_TX);
3186 last_i += NUM_FLOW_STATS_TX;
3187
3188 mutex_unlock(&stats_bitmap->mutex);
3189 }
3190}
3191
3192void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
3193 struct mlx4_en_stats_bitmap *stats_bitmap,
3194 u8 rx_ppp, u8 rx_pause,
3195 u8 tx_ppp, u8 tx_pause)
3196{
3197 int last_i = 0;
3198
3199 mutex_init(&stats_bitmap->mutex);
3200 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
3201
3202 if (mlx4_is_slave(dev)) {
3203 bitmap_set(stats_bitmap->bitmap, last_i +
3204 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3205 bitmap_set(stats_bitmap->bitmap, last_i +
3206 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3207 bitmap_set(stats_bitmap->bitmap, last_i +
3208 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3209 bitmap_set(stats_bitmap->bitmap, last_i +
3210 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3211 bitmap_set(stats_bitmap->bitmap, last_i +
3212 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3213 bitmap_set(stats_bitmap->bitmap, last_i +
3214 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3215 } else {
3216 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
3217 }
3218 last_i += NUM_MAIN_STATS;
3219
3220 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
3221 last_i += NUM_PORT_STATS;
3222
3223 if (mlx4_is_master(dev))
3224 bitmap_set(stats_bitmap->bitmap, last_i,
3225 NUM_PF_STATS);
3226 last_i += NUM_PF_STATS;
3227
3228 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3229 rx_ppp, rx_pause,
3230 tx_ppp, tx_pause);
3231 last_i += NUM_FLOW_STATS;
3232
3233 if (!mlx4_is_slave(dev))
3234 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
3235 last_i += NUM_PKT_STATS;
3236
3237 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3238 last_i += NUM_XDP_STATS;
3239
3240 if (!mlx4_is_slave(dev))
3241 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
3242 last_i += NUM_PHY_STATS;
3243}
3244
3245int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3246 struct mlx4_en_port_profile *prof)
3247{
3248 struct net_device *dev;
3249 struct mlx4_en_priv *priv;
3250 int i, t;
3251 int err;
3252
3253 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3254 MAX_TX_RINGS, MAX_RX_RINGS);
3255 if (dev == NULL)
3256 return -ENOMEM;
3257
3258 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
3259 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3260
3261 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
3262 dev->dev_port = port - 1;
3263
3264
3265
3266
3267
3268 priv = netdev_priv(dev);
3269 memset(priv, 0, sizeof(struct mlx4_en_priv));
3270 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
3271 spin_lock_init(&priv->stats_lock);
3272 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3273 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3274 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3275 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3276 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3277 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3278 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
3279#ifdef CONFIG_RFS_ACCEL
3280 INIT_LIST_HEAD(&priv->filters);
3281 spin_lock_init(&priv->filters_lock);
3282#endif
3283
3284 priv->dev = dev;
3285 priv->mdev = mdev;
3286 priv->ddev = &mdev->pdev->dev;
3287 priv->prof = prof;
3288 priv->port = port;
3289 priv->port_up = false;
3290 priv->flags = prof->flags;
3291 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
3292 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3293 MLX4_WQE_CTRL_SOLICITED);
3294 priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
3295 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
3296 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
3297
3298 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3299 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3300 if (!priv->tx_ring_num[t])
3301 continue;
3302
3303 priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
3304 sizeof(struct mlx4_en_tx_ring *),
3305 GFP_KERNEL);
3306 if (!priv->tx_ring[t]) {
3307 err = -ENOMEM;
3308 goto out;
3309 }
3310 priv->tx_cq[t] = kcalloc(MAX_TX_RINGS,
3311 sizeof(struct mlx4_en_cq *),
3312 GFP_KERNEL);
3313 if (!priv->tx_cq[t]) {
3314 err = -ENOMEM;
3315 goto out;
3316 }
3317 }
3318 priv->rx_ring_num = prof->rx_ring_num;
3319 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
3320 priv->cqe_size = mdev->dev->caps.cqe_size;
3321 priv->mac_index = -1;
3322 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3323#ifdef CONFIG_MLX4_EN_DCB
3324 if (!mlx4_is_slave(priv->mdev->dev)) {
3325 u8 prio;
3326
3327 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
3328 priv->ets.prio_tc[prio] = prio;
3329 priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
3330 }
3331
3332 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3333 DCB_CAP_DCBX_VER_IEEE;
3334 priv->flags |= MLX4_EN_DCB_ENABLED;
3335 priv->cee_config.pfc_state = false;
3336
3337 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
3338 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3339
3340 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3341 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3342 } else {
3343 en_info(priv, "enabling only PFC DCB ops\n");
3344 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3345 }
3346 }
3347#endif
3348
3349 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3350 INIT_HLIST_HEAD(&priv->mac_hash[i]);
3351
3352
3353 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
3354
3355 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3356 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3357 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3358
3359
3360 dev->addr_len = ETH_ALEN;
3361 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3362 if (!is_valid_ether_addr(dev->dev_addr)) {
3363 en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
3364 priv->port, dev->dev_addr);
3365 err = -EINVAL;
3366 goto out;
3367 } else if (mlx4_is_slave(priv->mdev->dev) &&
3368 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3369
3370
3371
3372 dev->addr_assign_type |= NET_ADDR_RANDOM;
3373 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
3374 }
3375
3376 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
3377
3378 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3379 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3380 err = mlx4_en_alloc_resources(priv);
3381 if (err)
3382 goto out;
3383
3384
3385 priv->hwtstamp_config.flags = 0;
3386 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3387 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3388
3389
3390 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
3391 MLX4_EN_PAGE_SIZE);
3392 if (err) {
3393 en_err(priv, "Failed to allocate page for rx qps\n");
3394 goto out;
3395 }
3396 priv->allocated = 1;
3397
3398
3399
3400
3401 if (mlx4_is_master(priv->mdev->dev))
3402 dev->netdev_ops = &mlx4_netdev_ops_master;
3403 else
3404 dev->netdev_ops = &mlx4_netdev_ops;
3405 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
3406 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
3407 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3408
3409 dev->ethtool_ops = &mlx4_en_ethtool_ops;
3410
3411
3412
3413
3414 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3415 if (mdev->LSO_support)
3416 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3417
3418 dev->vlan_features = dev->hw_features;
3419
3420 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
3421 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
3422 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3423 NETIF_F_HW_VLAN_CTAG_FILTER;
3424 dev->hw_features |= NETIF_F_LOOPBACK |
3425 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3426
3427 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3428 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3429 NETIF_F_HW_VLAN_STAG_FILTER;
3430 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3431 }
3432
3433 if (mlx4_is_slave(mdev->dev)) {
3434 bool vlan_offload_disabled;
3435 int phv;
3436
3437 err = get_phv_bit(mdev->dev, port, &phv);
3438 if (!err && phv) {
3439 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3440 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3441 }
3442 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3443 &vlan_offload_disabled);
3444 if (!err && vlan_offload_disabled) {
3445 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3446 NETIF_F_HW_VLAN_CTAG_RX |
3447 NETIF_F_HW_VLAN_STAG_TX |
3448 NETIF_F_HW_VLAN_STAG_RX);
3449 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3450 NETIF_F_HW_VLAN_CTAG_RX |
3451 NETIF_F_HW_VLAN_STAG_TX |
3452 NETIF_F_HW_VLAN_STAG_RX);
3453 }
3454 } else {
3455 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3456 !(mdev->dev->caps.flags2 &
3457 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3458 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3459 }
3460
3461 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3462 dev->hw_features |= NETIF_F_RXFCS;
3463
3464 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3465 dev->hw_features |= NETIF_F_RXALL;
3466
3467 if (mdev->dev->caps.steering_mode ==
3468 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3469 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3470 dev->hw_features |= NETIF_F_NTUPLE;
3471
3472 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3473 dev->priv_flags |= IFF_UNICAST_FLT;
3474
3475
3476 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3477 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3478 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3479 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3480 } else {
3481 en_warn(priv,
3482 "No RSS hash capabilities exposed, using Toeplitz\n");
3483 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3484 }
3485
3486 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3487 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3488 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3489 NETIF_F_GSO_PARTIAL;
3490 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3491 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3492 NETIF_F_GSO_PARTIAL;
3493 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3494 }
3495
3496
3497 dev->min_mtu = ETH_MIN_MTU;
3498 dev->max_mtu = priv->max_mtu;
3499
3500 mdev->pndev[port] = dev;
3501 mdev->upper[port] = NULL;
3502
3503 netif_carrier_off(dev);
3504 mlx4_en_set_default_moderation(priv);
3505
3506 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
3507 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3508
3509 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3510
3511
3512 mlx4_en_calc_rx_buf(dev);
3513 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3514 priv->rx_skb_size + ETH_FCS_LEN,
3515 prof->tx_pause, prof->tx_ppp,
3516 prof->rx_pause, prof->rx_ppp);
3517 if (err) {
3518 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3519 priv->port, err);
3520 goto out;
3521 }
3522
3523 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3524 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3525 if (err) {
3526 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3527 err);
3528 goto out;
3529 }
3530 }
3531
3532
3533 en_warn(priv, "Initializing port\n");
3534 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3535 if (err) {
3536 en_err(priv, "Failed Initializing port\n");
3537 goto out;
3538 }
3539 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3540
3541
3542 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3543 mlx4_en_init_timestamp(mdev);
3544
3545 queue_delayed_work(mdev->workqueue, &priv->service_task,
3546 SERVICE_TASK_DELAY);
3547
3548 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3549 mdev->profile.prof[priv->port].rx_ppp,
3550 mdev->profile.prof[priv->port].rx_pause,
3551 mdev->profile.prof[priv->port].tx_ppp,
3552 mdev->profile.prof[priv->port].tx_pause);
3553
3554 err = register_netdev(dev);
3555 if (err) {
3556 en_err(priv, "Netdev registration failed for port %d\n", port);
3557 goto out;
3558 }
3559
3560 priv->registered = 1;
3561 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3562 dev);
3563
3564 return 0;
3565
3566out:
3567 mlx4_en_destroy_netdev(dev);
3568 return err;
3569}
3570
3571int mlx4_en_reset_config(struct net_device *dev,
3572 struct hwtstamp_config ts_config,
3573 netdev_features_t features)
3574{
3575 struct mlx4_en_priv *priv = netdev_priv(dev);
3576 struct mlx4_en_dev *mdev = priv->mdev;
3577 struct mlx4_en_port_profile new_prof;
3578 struct mlx4_en_priv *tmp;
3579 int port_up = 0;
3580 int err = 0;
3581
3582 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3583 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3584 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3585 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3586 return 0;
3587
3588 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3589 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3590 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3591 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3592 return -EINVAL;
3593 }
3594
3595 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3596 if (!tmp)
3597 return -ENOMEM;
3598
3599 mutex_lock(&mdev->state_lock);
3600
3601 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3602 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3603
3604 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
3605 if (err)
3606 goto out;
3607
3608 if (priv->port_up) {
3609 port_up = 1;
3610 mlx4_en_stop_port(dev, 1);
3611 }
3612
3613 mlx4_en_safe_replace_resources(priv, tmp);
3614
3615 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3616 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3617 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3618 else
3619 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3620 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3621
3622
3623
3624 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3625 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3626 else
3627 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3628 }
3629
3630 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3631 if (features & NETIF_F_RXFCS)
3632 dev->features |= NETIF_F_RXFCS;
3633 else
3634 dev->features &= ~NETIF_F_RXFCS;
3635 }
3636
3637
3638
3639
3640
3641 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3642 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3643 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3644 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3645 }
3646
3647 if (port_up) {
3648 err = mlx4_en_start_port(dev);
3649 if (err)
3650 en_err(priv, "Failed starting port\n");
3651 }
3652
3653out:
3654 mutex_unlock(&mdev->state_lock);
3655 kfree(tmp);
3656 if (!err)
3657 netdev_features_change(dev);
3658 return err;
3659}
3660