1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <generated/utsrelease.h>
9#include "ice.h"
10#include "ice_base.h"
11#include "ice_lib.h"
12#include "ice_fltr.h"
13#include "ice_dcb_lib.h"
14#include "ice_dcb_nl.h"
15#include "ice_devlink.h"
16
17#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
18static const char ice_driver_string[] = DRV_SUMMARY;
19static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
20
21
22#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
23#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
24
25MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
26MODULE_DESCRIPTION(DRV_SUMMARY);
27MODULE_LICENSE("GPL v2");
28MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
29
30static int debug = -1;
31module_param(debug, int, 0644);
32#ifndef CONFIG_DYNAMIC_DEBUG
33MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
34#else
35MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
36#endif
37
38static struct workqueue_struct *ice_wq;
39static const struct net_device_ops ice_netdev_safe_mode_ops;
40static const struct net_device_ops ice_netdev_ops;
41static int ice_vsi_open(struct ice_vsi *vsi);
42
43static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
44
45static void ice_vsi_release_all(struct ice_pf *pf);
46
47
48
49
50
51static u16 ice_get_tx_pending(struct ice_ring *ring)
52{
53 u16 head, tail;
54
55 head = ring->next_to_clean;
56 tail = ring->next_to_use;
57
58 if (head != tail)
59 return (head < tail) ?
60 tail - head : (tail + ring->count - head);
61 return 0;
62}
63
64
65
66
67
68static void ice_check_for_hang_subtask(struct ice_pf *pf)
69{
70 struct ice_vsi *vsi = NULL;
71 struct ice_hw *hw;
72 unsigned int i;
73 int packets;
74 u32 v;
75
76 ice_for_each_vsi(pf, v)
77 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
78 vsi = pf->vsi[v];
79 break;
80 }
81
82 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
83 return;
84
85 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
86 return;
87
88 hw = &vsi->back->hw;
89
90 for (i = 0; i < vsi->num_txq; i++) {
91 struct ice_ring *tx_ring = vsi->tx_rings[i];
92
93 if (tx_ring && tx_ring->desc) {
94
95
96
97
98
99
100
101 packets = tx_ring->stats.pkts & INT_MAX;
102 if (tx_ring->tx_stats.prev_pkt == packets) {
103
104 ice_trigger_sw_intr(hw, tx_ring->q_vector);
105 continue;
106 }
107
108
109
110
111 smp_rmb();
112 tx_ring->tx_stats.prev_pkt =
113 ice_get_tx_pending(tx_ring) ? packets : -1;
114 }
115 }
116}
117
118
119
120
121
122
123
124
125
126static int ice_init_mac_fltr(struct ice_pf *pf)
127{
128 enum ice_status status;
129 struct ice_vsi *vsi;
130 u8 *perm_addr;
131
132 vsi = ice_get_main_vsi(pf);
133 if (!vsi)
134 return -EINVAL;
135
136 perm_addr = vsi->port_info->mac.perm_addr;
137 status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
138 if (!status)
139 return 0;
140
141
142
143
144 if (vsi->netdev->reg_state == NETREG_REGISTERED) {
145 dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
146 ice_stat_str(status));
147 unregister_netdev(vsi->netdev);
148 free_netdev(vsi->netdev);
149 vsi->netdev = NULL;
150 }
151
152 return -EIO;
153}
154
155
156
157
158
159
160
161
162
163
164
165static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
166{
167 struct ice_netdev_priv *np = netdev_priv(netdev);
168 struct ice_vsi *vsi = np->vsi;
169
170 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
171 ICE_FWD_TO_VSI))
172 return -EINVAL;
173
174 return 0;
175}
176
177
178
179
180
181
182
183
184
185
186
187static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
188{
189 struct ice_netdev_priv *np = netdev_priv(netdev);
190 struct ice_vsi *vsi = np->vsi;
191
192 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
193 ICE_FWD_TO_VSI))
194 return -EINVAL;
195
196 return 0;
197}
198
199
200
201
202
203
204
205static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
206{
207 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
208 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
209 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
210}
211
212
213
214
215
216
217
218
219static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
220{
221 struct ice_hw *hw = &vsi->back->hw;
222 enum ice_status status = 0;
223
224 if (vsi->type != ICE_VSI_PF)
225 return 0;
226
227 if (vsi->vlan_ena) {
228 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
229 set_promisc);
230 } else {
231 if (set_promisc)
232 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
233 0);
234 else
235 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
236 0);
237 }
238
239 if (status)
240 return -EIO;
241
242 return 0;
243}
244
245
246
247
248
249
250
251static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
252{
253 struct device *dev = ice_pf_to_dev(vsi->back);
254 struct net_device *netdev = vsi->netdev;
255 bool promisc_forced_on = false;
256 struct ice_pf *pf = vsi->back;
257 struct ice_hw *hw = &pf->hw;
258 enum ice_status status = 0;
259 u32 changed_flags = 0;
260 u8 promisc_m;
261 int err = 0;
262
263 if (!vsi->netdev)
264 return -EINVAL;
265
266 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
267 usleep_range(1000, 2000);
268
269 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
270 vsi->current_netdev_flags = vsi->netdev->flags;
271
272 INIT_LIST_HEAD(&vsi->tmp_sync_list);
273 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
274
275 if (ice_vsi_fltr_changed(vsi)) {
276 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
277 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
278 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
279
280
281 netif_addr_lock_bh(netdev);
282 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
283 ice_add_mac_to_unsync_list);
284 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
285 ice_add_mac_to_unsync_list);
286
287 netif_addr_unlock_bh(netdev);
288 }
289
290
291 status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
292 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
293 if (status) {
294 netdev_err(netdev, "Failed to delete MAC filters\n");
295
296 if (status == ICE_ERR_NO_MEMORY) {
297 err = -ENOMEM;
298 goto out;
299 }
300 }
301
302
303 status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
304 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
305
306
307
308
309 if (status && status != ICE_ERR_ALREADY_EXISTS) {
310 netdev_err(netdev, "Failed to add MAC filters\n");
311
312
313
314
315 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
316 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
317 vsi->state)) {
318 promisc_forced_on = true;
319 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
320 vsi->vsi_num);
321 } else {
322 err = -EIO;
323 goto out;
324 }
325 }
326
327 if (changed_flags & IFF_ALLMULTI) {
328 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
329 if (vsi->vlan_ena)
330 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
331 else
332 promisc_m = ICE_MCAST_PROMISC_BITS;
333
334 err = ice_cfg_promisc(vsi, promisc_m, true);
335 if (err) {
336 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
337 vsi->vsi_num);
338 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
339 goto out_promisc;
340 }
341 } else {
342
343 if (vsi->vlan_ena)
344 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
345 else
346 promisc_m = ICE_MCAST_PROMISC_BITS;
347
348 err = ice_cfg_promisc(vsi, promisc_m, false);
349 if (err) {
350 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
351 vsi->vsi_num);
352 vsi->current_netdev_flags |= IFF_ALLMULTI;
353 goto out_promisc;
354 }
355 }
356 }
357
358 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
359 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
360 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
361 if (vsi->current_netdev_flags & IFF_PROMISC) {
362
363 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
364 err = ice_set_dflt_vsi(pf->first_sw, vsi);
365 if (err && err != -EEXIST) {
366 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
367 err, vsi->vsi_num);
368 vsi->current_netdev_flags &=
369 ~IFF_PROMISC;
370 goto out_promisc;
371 }
372 ice_cfg_vlan_pruning(vsi, false, false);
373 }
374 } else {
375
376 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
377 err = ice_clear_dflt_vsi(pf->first_sw);
378 if (err) {
379 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
380 err, vsi->vsi_num);
381 vsi->current_netdev_flags |=
382 IFF_PROMISC;
383 goto out_promisc;
384 }
385 if (vsi->num_vlan > 1)
386 ice_cfg_vlan_pruning(vsi, true, false);
387 }
388 }
389 }
390 goto exit;
391
392out_promisc:
393 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
394 goto exit;
395out:
396
397 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
398 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
399exit:
400 clear_bit(__ICE_CFG_BUSY, vsi->state);
401 return err;
402}
403
404
405
406
407
408static void ice_sync_fltr_subtask(struct ice_pf *pf)
409{
410 int v;
411
412 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
413 return;
414
415 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
416
417 ice_for_each_vsi(pf, v)
418 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
419 ice_vsi_sync_fltr(pf->vsi[v])) {
420
421 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
422 break;
423 }
424}
425
426
427
428
429
430
431static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
432{
433 int v;
434
435 ice_for_each_vsi(pf, v)
436 if (pf->vsi[v])
437 ice_dis_vsi(pf->vsi[v], locked);
438}
439
440
441
442
443
444
445
446static void
447ice_prepare_for_reset(struct ice_pf *pf)
448{
449 struct ice_hw *hw = &pf->hw;
450 unsigned int i;
451
452
453 if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
454 return;
455
456
457 if (ice_check_sq_alive(hw, &hw->mailboxq))
458 ice_vc_notify_reset(pf);
459
460
461 ice_for_each_vf(pf, i)
462 ice_set_vf_state_qs_dis(&pf->vf[i]);
463
464
465 ice_clear_hw_tbls(hw);
466
467 ice_pf_dis_all_vsi(pf, false);
468
469 if (hw->port_info)
470 ice_sched_clear_port(hw->port_info);
471
472 ice_shutdown_all_ctrlq(hw);
473
474 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
475}
476
477
478
479
480
481
482
483static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
484{
485 struct device *dev = ice_pf_to_dev(pf);
486 struct ice_hw *hw = &pf->hw;
487
488 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
489 WARN_ON(in_interrupt());
490
491 ice_prepare_for_reset(pf);
492
493
494 if (ice_reset(hw, reset_type)) {
495 dev_err(dev, "reset %d failed\n", reset_type);
496 set_bit(__ICE_RESET_FAILED, pf->state);
497 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
498 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
499 clear_bit(__ICE_PFR_REQ, pf->state);
500 clear_bit(__ICE_CORER_REQ, pf->state);
501 clear_bit(__ICE_GLOBR_REQ, pf->state);
502 return;
503 }
504
505
506
507
508
509 if (reset_type == ICE_RESET_PFR) {
510 pf->pfr_count++;
511 ice_rebuild(pf, reset_type);
512 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
513 clear_bit(__ICE_PFR_REQ, pf->state);
514 ice_reset_all_vfs(pf, true);
515 }
516}
517
518
519
520
521
522static void ice_reset_subtask(struct ice_pf *pf)
523{
524 enum ice_reset_req reset_type = ICE_RESET_INVAL;
525
526
527
528
529
530
531
532
533
534
535
536 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
537
538 if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
539 reset_type = ICE_RESET_CORER;
540 if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
541 reset_type = ICE_RESET_GLOBR;
542 if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
543 reset_type = ICE_RESET_EMPR;
544
545 if (reset_type == ICE_RESET_INVAL)
546 return;
547 ice_prepare_for_reset(pf);
548
549
550 if (ice_check_reset(&pf->hw)) {
551 set_bit(__ICE_RESET_FAILED, pf->state);
552 } else {
553
554 pf->hw.reset_ongoing = false;
555 ice_rebuild(pf, reset_type);
556
557
558
559 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
560 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
561 clear_bit(__ICE_PFR_REQ, pf->state);
562 clear_bit(__ICE_CORER_REQ, pf->state);
563 clear_bit(__ICE_GLOBR_REQ, pf->state);
564 ice_reset_all_vfs(pf, true);
565 }
566
567 return;
568 }
569
570
571 if (test_bit(__ICE_PFR_REQ, pf->state))
572 reset_type = ICE_RESET_PFR;
573 if (test_bit(__ICE_CORER_REQ, pf->state))
574 reset_type = ICE_RESET_CORER;
575 if (test_bit(__ICE_GLOBR_REQ, pf->state))
576 reset_type = ICE_RESET_GLOBR;
577
578 if (reset_type == ICE_RESET_INVAL)
579 return;
580
581
582 if (!test_bit(__ICE_DOWN, pf->state) &&
583 !test_bit(__ICE_CFG_BUSY, pf->state)) {
584 ice_do_reset(pf, reset_type);
585 }
586}
587
588
589
590
591
592static void ice_print_topo_conflict(struct ice_vsi *vsi)
593{
594 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
595 case ICE_AQ_LINK_TOPO_CONFLICT:
596 case ICE_AQ_LINK_MEDIA_CONFLICT:
597 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
598 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
599 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
600 netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
601 break;
602 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
603 netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
604 break;
605 default:
606 break;
607 }
608}
609
610
611
612
613
614
615void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
616{
617 struct ice_aqc_get_phy_caps_data *caps;
618 const char *an_advertised;
619 enum ice_status status;
620 const char *fec_req;
621 const char *speed;
622 const char *fec;
623 const char *fc;
624 const char *an;
625
626 if (!vsi)
627 return;
628
629 if (vsi->current_isup == isup)
630 return;
631
632 vsi->current_isup = isup;
633
634 if (!isup) {
635 netdev_info(vsi->netdev, "NIC Link is Down\n");
636 return;
637 }
638
639 switch (vsi->port_info->phy.link_info.link_speed) {
640 case ICE_AQ_LINK_SPEED_100GB:
641 speed = "100 G";
642 break;
643 case ICE_AQ_LINK_SPEED_50GB:
644 speed = "50 G";
645 break;
646 case ICE_AQ_LINK_SPEED_40GB:
647 speed = "40 G";
648 break;
649 case ICE_AQ_LINK_SPEED_25GB:
650 speed = "25 G";
651 break;
652 case ICE_AQ_LINK_SPEED_20GB:
653 speed = "20 G";
654 break;
655 case ICE_AQ_LINK_SPEED_10GB:
656 speed = "10 G";
657 break;
658 case ICE_AQ_LINK_SPEED_5GB:
659 speed = "5 G";
660 break;
661 case ICE_AQ_LINK_SPEED_2500MB:
662 speed = "2.5 G";
663 break;
664 case ICE_AQ_LINK_SPEED_1000MB:
665 speed = "1 G";
666 break;
667 case ICE_AQ_LINK_SPEED_100MB:
668 speed = "100 M";
669 break;
670 default:
671 speed = "Unknown";
672 break;
673 }
674
675 switch (vsi->port_info->fc.current_mode) {
676 case ICE_FC_FULL:
677 fc = "Rx/Tx";
678 break;
679 case ICE_FC_TX_PAUSE:
680 fc = "Tx";
681 break;
682 case ICE_FC_RX_PAUSE:
683 fc = "Rx";
684 break;
685 case ICE_FC_NONE:
686 fc = "None";
687 break;
688 default:
689 fc = "Unknown";
690 break;
691 }
692
693
694 switch (vsi->port_info->phy.link_info.fec_info) {
695 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
696 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
697 fec = "RS-FEC";
698 break;
699 case ICE_AQ_LINK_25G_KR_FEC_EN:
700 fec = "FC-FEC/BASE-R";
701 break;
702 default:
703 fec = "NONE";
704 break;
705 }
706
707
708 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
709 an = "True";
710 else
711 an = "False";
712
713
714 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
715 if (!caps) {
716 fec_req = "Unknown";
717 an_advertised = "Unknown";
718 goto done;
719 }
720
721 status = ice_aq_get_phy_caps(vsi->port_info, false,
722 ICE_AQC_REPORT_SW_CFG, caps, NULL);
723 if (status)
724 netdev_info(vsi->netdev, "Get phy capability failed.\n");
725
726 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
727
728 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
729 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
730 fec_req = "RS-FEC";
731 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
732 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
733 fec_req = "FC-FEC/BASE-R";
734 else
735 fec_req = "NONE";
736
737 kfree(caps);
738
739done:
740 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
741 speed, fec_req, fec, an_advertised, an, fc);
742 ice_print_topo_conflict(vsi);
743}
744
745
746
747
748
749
750static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
751{
752 if (!vsi)
753 return;
754
755 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
756 return;
757
758 if (vsi->type == ICE_VSI_PF) {
759 if (link_up == netif_carrier_ok(vsi->netdev))
760 return;
761
762 if (link_up) {
763 netif_carrier_on(vsi->netdev);
764 netif_tx_wake_all_queues(vsi->netdev);
765 } else {
766 netif_carrier_off(vsi->netdev);
767 netif_tx_stop_all_queues(vsi->netdev);
768 }
769 }
770}
771
772
773
774
775
776
777
778
779
780
781
782
783static void ice_set_dflt_mib(struct ice_pf *pf)
784{
785 struct device *dev = ice_pf_to_dev(pf);
786 u8 mib_type, *buf, *lldpmib = NULL;
787 u16 len, typelen, offset = 0;
788 struct ice_lldp_org_tlv *tlv;
789 struct ice_hw *hw;
790 u32 ouisubtype;
791
792 if (!pf) {
793 dev_dbg(dev, "%s NULL pf pointer\n", __func__);
794 return;
795 }
796
797 hw = &pf->hw;
798 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
799 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
800 if (!lldpmib) {
801 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
802 __func__);
803 return;
804 }
805
806
807 tlv = (struct ice_lldp_org_tlv *)lldpmib;
808 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
809 ICE_IEEE_ETS_TLV_LEN);
810 tlv->typelen = htons(typelen);
811 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
812 ICE_IEEE_SUBTYPE_ETS_CFG);
813 tlv->ouisubtype = htonl(ouisubtype);
814
815 buf = tlv->tlvinfo;
816 buf[0] = 0;
817
818
819
820
821
822 buf[5] = 0x64;
823 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
824 offset += len + 2;
825 tlv = (struct ice_lldp_org_tlv *)
826 ((char *)tlv + sizeof(tlv->typelen) + len);
827
828
829 buf = tlv->tlvinfo;
830 tlv->typelen = htons(typelen);
831
832 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
833 ICE_IEEE_SUBTYPE_ETS_REC);
834 tlv->ouisubtype = htonl(ouisubtype);
835
836
837
838
839
840
841 buf[5] = 0x64;
842 offset += len + 2;
843 tlv = (struct ice_lldp_org_tlv *)
844 ((char *)tlv + sizeof(tlv->typelen) + len);
845
846
847 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
848 ICE_IEEE_PFC_TLV_LEN);
849 tlv->typelen = htons(typelen);
850
851 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
852 ICE_IEEE_SUBTYPE_PFC_CFG);
853 tlv->ouisubtype = htonl(ouisubtype);
854
855
856 buf[0] = 0x08;
857 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
858 offset += len + 2;
859
860 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
861 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
862
863 kfree(lldpmib);
864}
865
866
867
868
869
870
871
872
873
874
875static int
876ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
877 u16 link_speed)
878{
879 struct device *dev = ice_pf_to_dev(pf);
880 struct ice_phy_info *phy_info;
881 struct ice_vsi *vsi;
882 u16 old_link_speed;
883 bool old_link;
884 int result;
885
886 phy_info = &pi->phy;
887 phy_info->link_info_old = phy_info->link_info;
888
889 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
890 old_link_speed = phy_info->link_info_old.link_speed;
891
892
893
894
895 result = ice_update_link_info(pi);
896 if (result)
897 dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
898 pi->lport);
899
900
901
902
903 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
904 link_up = true;
905
906 vsi = ice_get_main_vsi(pf);
907 if (!vsi || !vsi->port_info)
908 return -EINVAL;
909
910
911 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
912 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
913 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
914
915 result = ice_aq_set_link_restart_an(pi, false, NULL);
916 if (result) {
917 dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
918 vsi->vsi_num, result);
919 return result;
920 }
921 }
922
923
924 if (link_up == old_link && link_speed == old_link_speed)
925 return result;
926
927 if (ice_is_dcb_active(pf)) {
928 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
929 ice_dcb_rebuild(pf);
930 } else {
931 if (link_up)
932 ice_set_dflt_mib(pf);
933 }
934 ice_vsi_link_event(vsi, link_up);
935 ice_print_link_msg(vsi, link_up);
936
937 ice_vc_notify_link_state(pf);
938
939 return result;
940}
941
942
943
944
945
946static void ice_watchdog_subtask(struct ice_pf *pf)
947{
948 int i;
949
950
951 if (test_bit(__ICE_DOWN, pf->state) ||
952 test_bit(__ICE_CFG_BUSY, pf->state))
953 return;
954
955
956 if (time_before(jiffies,
957 pf->serv_tmr_prev + pf->serv_tmr_period))
958 return;
959
960 pf->serv_tmr_prev = jiffies;
961
962
963
964
965 ice_update_pf_stats(pf);
966 ice_for_each_vsi(pf, i)
967 if (pf->vsi[i] && pf->vsi[i]->netdev)
968 ice_update_vsi_stats(pf->vsi[i]);
969}
970
971
972
973
974
975
976
977static int ice_init_link_events(struct ice_port_info *pi)
978{
979 u16 mask;
980
981 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
982 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
983
984 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
985 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
986 pi->lport);
987 return -EIO;
988 }
989
990 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
991 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
992 pi->lport);
993 return -EIO;
994 }
995
996 return 0;
997}
998
999
1000
1001
1002
1003
1004static int
1005ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1006{
1007 struct ice_aqc_get_link_status_data *link_data;
1008 struct ice_port_info *port_info;
1009 int status;
1010
1011 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1012 port_info = pf->hw.port_info;
1013 if (!port_info)
1014 return -EINVAL;
1015
1016 status = ice_link_event(pf, port_info,
1017 !!(link_data->link_info & ICE_AQ_LINK_UP),
1018 le16_to_cpu(link_data->link_speed));
1019 if (status)
1020 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1021 status);
1022
1023 return status;
1024}
1025
1026enum ice_aq_task_state {
1027 ICE_AQ_TASK_WAITING = 0,
1028 ICE_AQ_TASK_COMPLETE,
1029 ICE_AQ_TASK_CANCELED,
1030};
1031
1032struct ice_aq_task {
1033 struct hlist_node entry;
1034
1035 u16 opcode;
1036 struct ice_rq_event_info *event;
1037 enum ice_aq_task_state state;
1038};
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1058 struct ice_rq_event_info *event)
1059{
1060 struct ice_aq_task *task;
1061 long ret;
1062 int err;
1063
1064 task = kzalloc(sizeof(*task), GFP_KERNEL);
1065 if (!task)
1066 return -ENOMEM;
1067
1068 INIT_HLIST_NODE(&task->entry);
1069 task->opcode = opcode;
1070 task->event = event;
1071 task->state = ICE_AQ_TASK_WAITING;
1072
1073 spin_lock_bh(&pf->aq_wait_lock);
1074 hlist_add_head(&task->entry, &pf->aq_wait_list);
1075 spin_unlock_bh(&pf->aq_wait_lock);
1076
1077 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1078 timeout);
1079 switch (task->state) {
1080 case ICE_AQ_TASK_WAITING:
1081 err = ret < 0 ? ret : -ETIMEDOUT;
1082 break;
1083 case ICE_AQ_TASK_CANCELED:
1084 err = ret < 0 ? ret : -ECANCELED;
1085 break;
1086 case ICE_AQ_TASK_COMPLETE:
1087 err = ret < 0 ? ret : 0;
1088 break;
1089 default:
1090 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1091 err = -EINVAL;
1092 break;
1093 }
1094
1095 spin_lock_bh(&pf->aq_wait_lock);
1096 hlist_del(&task->entry);
1097 spin_unlock_bh(&pf->aq_wait_lock);
1098 kfree(task);
1099
1100 return err;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1122 struct ice_rq_event_info *event)
1123{
1124 struct ice_aq_task *task;
1125 bool found = false;
1126
1127 spin_lock_bh(&pf->aq_wait_lock);
1128 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1129 if (task->state || task->opcode != opcode)
1130 continue;
1131
1132 memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1133 task->event->msg_len = event->msg_len;
1134
1135
1136 if (task->event->msg_buf &&
1137 task->event->buf_len > event->buf_len) {
1138 memcpy(task->event->msg_buf, event->msg_buf,
1139 event->buf_len);
1140 task->event->buf_len = event->buf_len;
1141 }
1142
1143 task->state = ICE_AQ_TASK_COMPLETE;
1144 found = true;
1145 }
1146 spin_unlock_bh(&pf->aq_wait_lock);
1147
1148 if (found)
1149 wake_up(&pf->aq_wait_queue);
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1160{
1161 struct ice_aq_task *task;
1162
1163 spin_lock_bh(&pf->aq_wait_lock);
1164 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1165 task->state = ICE_AQ_TASK_CANCELED;
1166 spin_unlock_bh(&pf->aq_wait_lock);
1167
1168 wake_up(&pf->aq_wait_queue);
1169}
1170
1171
1172
1173
1174
1175
1176static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1177{
1178 struct device *dev = ice_pf_to_dev(pf);
1179 struct ice_rq_event_info event;
1180 struct ice_hw *hw = &pf->hw;
1181 struct ice_ctl_q_info *cq;
1182 u16 pending, i = 0;
1183 const char *qtype;
1184 u32 oldval, val;
1185
1186
1187 if (test_bit(__ICE_RESET_FAILED, pf->state))
1188 return 0;
1189
1190 switch (q_type) {
1191 case ICE_CTL_Q_ADMIN:
1192 cq = &hw->adminq;
1193 qtype = "Admin";
1194 break;
1195 case ICE_CTL_Q_MAILBOX:
1196 cq = &hw->mailboxq;
1197 qtype = "Mailbox";
1198 break;
1199 default:
1200 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1201 return 0;
1202 }
1203
1204
1205
1206
1207 val = rd32(hw, cq->rq.len);
1208 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1209 PF_FW_ARQLEN_ARQCRIT_M)) {
1210 oldval = val;
1211 if (val & PF_FW_ARQLEN_ARQVFE_M)
1212 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1213 qtype);
1214 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1215 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1216 qtype);
1217 }
1218 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1219 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1220 qtype);
1221 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1222 PF_FW_ARQLEN_ARQCRIT_M);
1223 if (oldval != val)
1224 wr32(hw, cq->rq.len, val);
1225 }
1226
1227 val = rd32(hw, cq->sq.len);
1228 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1229 PF_FW_ATQLEN_ATQCRIT_M)) {
1230 oldval = val;
1231 if (val & PF_FW_ATQLEN_ATQVFE_M)
1232 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1233 qtype);
1234 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1235 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1236 qtype);
1237 }
1238 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1239 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1240 qtype);
1241 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1242 PF_FW_ATQLEN_ATQCRIT_M);
1243 if (oldval != val)
1244 wr32(hw, cq->sq.len, val);
1245 }
1246
1247 event.buf_len = cq->rq_buf_size;
1248 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1249 if (!event.msg_buf)
1250 return 0;
1251
1252 do {
1253 enum ice_status ret;
1254 u16 opcode;
1255
1256 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1257 if (ret == ICE_ERR_AQ_NO_WORK)
1258 break;
1259 if (ret) {
1260 dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1261 ice_stat_str(ret));
1262 break;
1263 }
1264
1265 opcode = le16_to_cpu(event.desc.opcode);
1266
1267
1268 ice_aq_check_events(pf, opcode, &event);
1269
1270 switch (opcode) {
1271 case ice_aqc_opc_get_link_status:
1272 if (ice_handle_link_event(pf, &event))
1273 dev_err(dev, "Could not handle link event\n");
1274 break;
1275 case ice_aqc_opc_event_lan_overflow:
1276 ice_vf_lan_overflow_event(pf, &event);
1277 break;
1278 case ice_mbx_opc_send_msg_to_pf:
1279 ice_vc_process_vf_msg(pf, &event);
1280 break;
1281 case ice_aqc_opc_fw_logging:
1282 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1283 break;
1284 case ice_aqc_opc_lldp_set_mib_change:
1285 ice_dcb_process_lldp_set_mib_change(pf, &event);
1286 break;
1287 default:
1288 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1289 qtype, opcode);
1290 break;
1291 }
1292 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1293
1294 kfree(event.msg_buf);
1295
1296 return pending && (i == ICE_DFLT_IRQ_WORK);
1297}
1298
1299
1300
1301
1302
1303
1304
1305
1306static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1307{
1308 u16 ntu;
1309
1310 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1311 return cq->rq.next_to_clean != ntu;
1312}
1313
1314
1315
1316
1317
1318static void ice_clean_adminq_subtask(struct ice_pf *pf)
1319{
1320 struct ice_hw *hw = &pf->hw;
1321
1322 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1323 return;
1324
1325 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1326 return;
1327
1328 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1329
1330
1331
1332
1333
1334
1335 if (ice_ctrlq_pending(hw, &hw->adminq))
1336 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1337
1338 ice_flush(hw);
1339}
1340
1341
1342
1343
1344
1345static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1346{
1347 struct ice_hw *hw = &pf->hw;
1348
1349 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1350 return;
1351
1352 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1353 return;
1354
1355 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1356
1357 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1358 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1359
1360 ice_flush(hw);
1361}
1362
1363
1364
1365
1366
1367
1368
1369void ice_service_task_schedule(struct ice_pf *pf)
1370{
1371 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1372 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1373 !test_bit(__ICE_NEEDS_RESTART, pf->state))
1374 queue_work(ice_wq, &pf->serv_task);
1375}
1376
1377
1378
1379
1380
1381static void ice_service_task_complete(struct ice_pf *pf)
1382{
1383 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1384
1385
1386 smp_mb__before_atomic();
1387 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397static int ice_service_task_stop(struct ice_pf *pf)
1398{
1399 int ret;
1400
1401 ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
1402
1403 if (pf->serv_tmr.function)
1404 del_timer_sync(&pf->serv_tmr);
1405 if (pf->serv_task.func)
1406 cancel_work_sync(&pf->serv_task);
1407
1408 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1409 return ret;
1410}
1411
1412
1413
1414
1415
1416
1417
1418static void ice_service_task_restart(struct ice_pf *pf)
1419{
1420 clear_bit(__ICE_SERVICE_DIS, pf->state);
1421 ice_service_task_schedule(pf);
1422}
1423
1424
1425
1426
1427
1428static void ice_service_timer(struct timer_list *t)
1429{
1430 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1431
1432 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1433 ice_service_task_schedule(pf);
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static void ice_handle_mdd_event(struct ice_pf *pf)
1447{
1448 struct device *dev = ice_pf_to_dev(pf);
1449 struct ice_hw *hw = &pf->hw;
1450 unsigned int i;
1451 u32 reg;
1452
1453 if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
1454
1455
1456
1457 ice_print_vfs_mdd_events(pf);
1458 return;
1459 }
1460
1461
1462 reg = rd32(hw, GL_MDET_TX_PQM);
1463 if (reg & GL_MDET_TX_PQM_VALID_M) {
1464 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1465 GL_MDET_TX_PQM_PF_NUM_S;
1466 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1467 GL_MDET_TX_PQM_VF_NUM_S;
1468 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1469 GL_MDET_TX_PQM_MAL_TYPE_S;
1470 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1471 GL_MDET_TX_PQM_QNUM_S);
1472
1473 if (netif_msg_tx_err(pf))
1474 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1475 event, queue, pf_num, vf_num);
1476 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1477 }
1478
1479 reg = rd32(hw, GL_MDET_TX_TCLAN);
1480 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1481 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1482 GL_MDET_TX_TCLAN_PF_NUM_S;
1483 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1484 GL_MDET_TX_TCLAN_VF_NUM_S;
1485 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1486 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1487 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1488 GL_MDET_TX_TCLAN_QNUM_S);
1489
1490 if (netif_msg_tx_err(pf))
1491 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1492 event, queue, pf_num, vf_num);
1493 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1494 }
1495
1496 reg = rd32(hw, GL_MDET_RX);
1497 if (reg & GL_MDET_RX_VALID_M) {
1498 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1499 GL_MDET_RX_PF_NUM_S;
1500 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1501 GL_MDET_RX_VF_NUM_S;
1502 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1503 GL_MDET_RX_MAL_TYPE_S;
1504 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1505 GL_MDET_RX_QNUM_S);
1506
1507 if (netif_msg_rx_err(pf))
1508 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1509 event, queue, pf_num, vf_num);
1510 wr32(hw, GL_MDET_RX, 0xffffffff);
1511 }
1512
1513
1514 reg = rd32(hw, PF_MDET_TX_PQM);
1515 if (reg & PF_MDET_TX_PQM_VALID_M) {
1516 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1517 if (netif_msg_tx_err(pf))
1518 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1519 }
1520
1521 reg = rd32(hw, PF_MDET_TX_TCLAN);
1522 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1523 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1524 if (netif_msg_tx_err(pf))
1525 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1526 }
1527
1528 reg = rd32(hw, PF_MDET_RX);
1529 if (reg & PF_MDET_RX_VALID_M) {
1530 wr32(hw, PF_MDET_RX, 0xFFFF);
1531 if (netif_msg_rx_err(pf))
1532 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1533 }
1534
1535
1536
1537
1538 ice_for_each_vf(pf, i) {
1539 struct ice_vf *vf = &pf->vf[i];
1540
1541 reg = rd32(hw, VP_MDET_TX_PQM(i));
1542 if (reg & VP_MDET_TX_PQM_VALID_M) {
1543 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1544 vf->mdd_tx_events.count++;
1545 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1546 if (netif_msg_tx_err(pf))
1547 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1548 i);
1549 }
1550
1551 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1552 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1553 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1554 vf->mdd_tx_events.count++;
1555 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1556 if (netif_msg_tx_err(pf))
1557 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1558 i);
1559 }
1560
1561 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1562 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1563 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1564 vf->mdd_tx_events.count++;
1565 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1566 if (netif_msg_tx_err(pf))
1567 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1568 i);
1569 }
1570
1571 reg = rd32(hw, VP_MDET_RX(i));
1572 if (reg & VP_MDET_RX_VALID_M) {
1573 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1574 vf->mdd_rx_events.count++;
1575 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1576 if (netif_msg_rx_err(pf))
1577 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1578 i);
1579
1580
1581
1582
1583
1584 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1585
1586
1587
1588 ice_print_vf_rx_mdd_event(vf);
1589 ice_reset_vf(&pf->vf[i], false);
1590 }
1591 }
1592 }
1593
1594 ice_print_vfs_mdd_events(pf);
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1610{
1611 struct ice_aqc_get_phy_caps_data *pcaps;
1612 struct ice_aqc_set_phy_cfg_data *cfg;
1613 struct ice_port_info *pi;
1614 struct device *dev;
1615 int retcode;
1616
1617 if (!vsi || !vsi->port_info || !vsi->back)
1618 return -EINVAL;
1619 if (vsi->type != ICE_VSI_PF)
1620 return 0;
1621
1622 dev = ice_pf_to_dev(vsi->back);
1623
1624 pi = vsi->port_info;
1625
1626 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1627 if (!pcaps)
1628 return -ENOMEM;
1629
1630 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1631 NULL);
1632 if (retcode) {
1633 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1634 vsi->vsi_num, retcode);
1635 retcode = -EIO;
1636 goto out;
1637 }
1638
1639
1640 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1641 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1642 goto out;
1643
1644
1645
1646
1647
1648 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1649 if (!cfg) {
1650 retcode = -ENOMEM;
1651 goto out;
1652 }
1653
1654 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1655 if (link_up)
1656 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1657 else
1658 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1659
1660 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1661 if (retcode) {
1662 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1663 vsi->vsi_num, retcode);
1664 retcode = -EIO;
1665 }
1666
1667 kfree(cfg);
1668out:
1669 kfree(pcaps);
1670 return retcode;
1671}
1672
1673
1674
1675
1676
1677
1678
1679static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1680{
1681 struct ice_aqc_get_phy_caps_data *pcaps;
1682 struct ice_pf *pf = pi->hw->back;
1683 enum ice_status status;
1684 int err = 0;
1685
1686 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1687 if (!pcaps)
1688 return -ENOMEM;
1689
1690 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
1691 NULL);
1692
1693 if (status) {
1694 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1695 err = -EIO;
1696 goto out;
1697 }
1698
1699 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1700 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1701
1702out:
1703 kfree(pcaps);
1704 return err;
1705}
1706
1707
1708
1709
1710
1711
1712
1713static void ice_init_link_dflt_override(struct ice_port_info *pi)
1714{
1715 struct ice_link_default_override_tlv *ldo;
1716 struct ice_pf *pf = pi->hw->back;
1717
1718 ldo = &pf->link_dflt_override;
1719 if (ice_get_link_default_override(ldo, pi))
1720 return;
1721
1722 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1723 return;
1724
1725
1726
1727
1728 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1729 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1747{
1748 struct ice_link_default_override_tlv *ldo;
1749 struct ice_aqc_set_phy_cfg_data *cfg;
1750 struct ice_phy_info *phy = &pi->phy;
1751 struct ice_pf *pf = pi->hw->back;
1752
1753 ldo = &pf->link_dflt_override;
1754
1755
1756
1757
1758 cfg = &phy->curr_user_phy_cfg;
1759
1760 if (ldo->phy_type_low || ldo->phy_type_high) {
1761 cfg->phy_type_low = pf->nvm_phy_type_lo &
1762 cpu_to_le64(ldo->phy_type_low);
1763 cfg->phy_type_high = pf->nvm_phy_type_hi &
1764 cpu_to_le64(ldo->phy_type_high);
1765 }
1766 cfg->link_fec_opt = ldo->fec_options;
1767 phy->curr_user_fec_req = ICE_FEC_AUTO;
1768
1769 set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1787{
1788 struct ice_aqc_get_phy_caps_data *pcaps;
1789 struct ice_phy_info *phy = &pi->phy;
1790 struct ice_pf *pf = pi->hw->back;
1791 enum ice_status status;
1792 struct ice_vsi *vsi;
1793 int err = 0;
1794
1795 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1796 return -EIO;
1797
1798 vsi = ice_get_main_vsi(pf);
1799 if (!vsi)
1800 return -EINVAL;
1801
1802 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1803 if (!pcaps)
1804 return -ENOMEM;
1805
1806 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1807 NULL);
1808 if (status) {
1809 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1810 err = -EIO;
1811 goto err_out;
1812 }
1813
1814 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1815
1816
1817 if (ice_fw_supports_link_override(&vsi->back->hw) &&
1818 !(pcaps->module_compliance_enforcement &
1819 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1820 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1821
1822
1823
1824
1825 if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
1826 ice_init_phy_cfg_dflt_override(pi);
1827 goto out;
1828 }
1829 }
1830
1831
1832
1833
1834 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1835 pcaps->link_fec_options);
1836 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1837
1838out:
1839 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1840 set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
1841err_out:
1842 kfree(pcaps);
1843 return err;
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854static int ice_configure_phy(struct ice_vsi *vsi)
1855{
1856 struct device *dev = ice_pf_to_dev(vsi->back);
1857 struct ice_aqc_get_phy_caps_data *pcaps;
1858 struct ice_aqc_set_phy_cfg_data *cfg;
1859 struct ice_port_info *pi;
1860 enum ice_status status;
1861 int err = 0;
1862
1863 pi = vsi->port_info;
1864 if (!pi)
1865 return -EINVAL;
1866
1867
1868 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1869 return -EPERM;
1870
1871 ice_print_topo_conflict(vsi);
1872
1873 if (vsi->port_info->phy.link_info.topo_media_conflict ==
1874 ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1875 return -EPERM;
1876
1877 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
1878 return ice_force_phys_link_state(vsi, true);
1879
1880 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1881 if (!pcaps)
1882 return -ENOMEM;
1883
1884
1885 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1886 NULL);
1887 if (status) {
1888 dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1889 vsi->vsi_num, ice_stat_str(status));
1890 err = -EIO;
1891 goto done;
1892 }
1893
1894
1895
1896
1897 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1898 ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
1899 goto done;
1900
1901
1902 memset(pcaps, 0, sizeof(*pcaps));
1903 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1904 NULL);
1905 if (status) {
1906 dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
1907 vsi->vsi_num, ice_stat_str(status));
1908 err = -EIO;
1909 goto done;
1910 }
1911
1912 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1913 if (!cfg) {
1914 err = -ENOMEM;
1915 goto done;
1916 }
1917
1918 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1919
1920
1921
1922
1923 if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
1924 vsi->back->state)) {
1925 cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
1926 cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
1927 } else {
1928 u64 phy_low = 0, phy_high = 0;
1929
1930 ice_update_phy_type(&phy_low, &phy_high,
1931 pi->phy.curr_user_speed_req);
1932 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
1933 cfg->phy_type_high = pcaps->phy_type_high &
1934 cpu_to_le64(phy_high);
1935 }
1936
1937
1938 if (!cfg->phy_type_low && !cfg->phy_type_high) {
1939 cfg->phy_type_low = pcaps->phy_type_low;
1940 cfg->phy_type_high = pcaps->phy_type_high;
1941 }
1942
1943
1944 ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
1945
1946
1947 if (cfg->link_fec_opt !=
1948 (cfg->link_fec_opt & pcaps->link_fec_options)) {
1949 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
1950 cfg->link_fec_opt = pcaps->link_fec_options;
1951 }
1952
1953
1954
1955
1956 ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
1957
1958
1959 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
1960
1961 status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1962 if (status) {
1963 dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
1964 vsi->vsi_num, ice_stat_str(status));
1965 err = -EIO;
1966 }
1967
1968 kfree(cfg);
1969done:
1970 kfree(pcaps);
1971 return err;
1972}
1973
1974
1975
1976
1977
1978
1979
1980
1981static void ice_check_media_subtask(struct ice_pf *pf)
1982{
1983 struct ice_port_info *pi;
1984 struct ice_vsi *vsi;
1985 int err;
1986
1987
1988 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
1989 return;
1990
1991 vsi = ice_get_main_vsi(pf);
1992 if (!vsi)
1993 return;
1994
1995
1996 pi = vsi->port_info;
1997 err = ice_update_link_info(pi);
1998 if (err)
1999 return;
2000
2001 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2002 if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
2003 ice_init_phy_user_cfg(pi);
2004
2005
2006
2007
2008 if (test_bit(__ICE_DOWN, vsi->state) &&
2009 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2010 return;
2011
2012 err = ice_configure_phy(vsi);
2013 if (!err)
2014 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2015
2016
2017
2018
2019 }
2020}
2021
2022
2023
2024
2025
2026static void ice_service_task(struct work_struct *work)
2027{
2028 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2029 unsigned long start_time = jiffies;
2030
2031
2032
2033
2034 ice_reset_subtask(pf);
2035
2036
2037 if (ice_is_reset_in_progress(pf->state) ||
2038 test_bit(__ICE_SUSPENDED, pf->state) ||
2039 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
2040 ice_service_task_complete(pf);
2041 return;
2042 }
2043
2044 ice_clean_adminq_subtask(pf);
2045 ice_check_media_subtask(pf);
2046 ice_check_for_hang_subtask(pf);
2047 ice_sync_fltr_subtask(pf);
2048 ice_handle_mdd_event(pf);
2049 ice_watchdog_subtask(pf);
2050
2051 if (ice_is_safe_mode(pf)) {
2052 ice_service_task_complete(pf);
2053 return;
2054 }
2055
2056 ice_process_vflr_event(pf);
2057 ice_clean_mailboxq_subtask(pf);
2058 ice_sync_arfs_fltrs(pf);
2059
2060 ice_service_task_complete(pf);
2061
2062
2063
2064
2065
2066 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2067 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
2068 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
2069 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2070 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
2071 mod_timer(&pf->serv_tmr, jiffies);
2072}
2073
2074
2075
2076
2077
2078static void ice_set_ctrlq_len(struct ice_hw *hw)
2079{
2080 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2081 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2082 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2083 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2084 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2085 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2086 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2087 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2088}
2089
2090
2091
2092
2093
2094
2095int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2096{
2097 struct device *dev = ice_pf_to_dev(pf);
2098
2099
2100 if (test_bit(__ICE_RESET_FAILED, pf->state)) {
2101 dev_dbg(dev, "earlier reset has failed\n");
2102 return -EIO;
2103 }
2104
2105 if (ice_is_reset_in_progress(pf->state)) {
2106 dev_dbg(dev, "Reset already in progress\n");
2107 return -EBUSY;
2108 }
2109
2110 switch (reset) {
2111 case ICE_RESET_PFR:
2112 set_bit(__ICE_PFR_REQ, pf->state);
2113 break;
2114 case ICE_RESET_CORER:
2115 set_bit(__ICE_CORER_REQ, pf->state);
2116 break;
2117 case ICE_RESET_GLOBR:
2118 set_bit(__ICE_GLOBR_REQ, pf->state);
2119 break;
2120 default:
2121 return -EINVAL;
2122 }
2123
2124 ice_service_task_schedule(pf);
2125 return 0;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136static void
2137ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2138 const cpumask_t *mask)
2139{
2140 struct ice_q_vector *q_vector =
2141 container_of(notify, struct ice_q_vector, affinity_notify);
2142
2143 cpumask_copy(&q_vector->affinity_mask, mask);
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2155
2156
2157
2158
2159
2160static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2161{
2162 struct ice_hw *hw = &vsi->back->hw;
2163 int i;
2164
2165 ice_for_each_q_vector(vsi, i)
2166 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2167
2168 ice_flush(hw);
2169 return 0;
2170}
2171
2172
2173
2174
2175
2176
2177static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2178{
2179 int q_vectors = vsi->num_q_vectors;
2180 struct ice_pf *pf = vsi->back;
2181 int base = vsi->base_vector;
2182 struct device *dev;
2183 int rx_int_idx = 0;
2184 int tx_int_idx = 0;
2185 int vector, err;
2186 int irq_num;
2187
2188 dev = ice_pf_to_dev(pf);
2189 for (vector = 0; vector < q_vectors; vector++) {
2190 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2191
2192 irq_num = pf->msix_entries[base + vector].vector;
2193
2194 if (q_vector->tx.ring && q_vector->rx.ring) {
2195 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2196 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2197 tx_int_idx++;
2198 } else if (q_vector->rx.ring) {
2199 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2200 "%s-%s-%d", basename, "rx", rx_int_idx++);
2201 } else if (q_vector->tx.ring) {
2202 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2203 "%s-%s-%d", basename, "tx", tx_int_idx++);
2204 } else {
2205
2206 continue;
2207 }
2208 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
2209 q_vector->name, q_vector);
2210 if (err) {
2211 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2212 err);
2213 goto free_q_irqs;
2214 }
2215
2216
2217 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2218 struct irq_affinity_notify *affinity_notify;
2219
2220 affinity_notify = &q_vector->affinity_notify;
2221 affinity_notify->notify = ice_irq_affinity_notify;
2222 affinity_notify->release = ice_irq_affinity_release;
2223 irq_set_affinity_notifier(irq_num, affinity_notify);
2224 }
2225
2226
2227 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2228 }
2229
2230 vsi->irqs_ready = true;
2231 return 0;
2232
2233free_q_irqs:
2234 while (vector) {
2235 vector--;
2236 irq_num = pf->msix_entries[base + vector].vector;
2237 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2238 irq_set_affinity_notifier(irq_num, NULL);
2239 irq_set_affinity_hint(irq_num, NULL);
2240 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2241 }
2242 return err;
2243}
2244
2245
2246
2247
2248
2249
2250
2251static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2252{
2253 struct device *dev = ice_pf_to_dev(vsi->back);
2254 int i;
2255
2256 for (i = 0; i < vsi->num_xdp_txq; i++) {
2257 u16 xdp_q_idx = vsi->alloc_txq + i;
2258 struct ice_ring *xdp_ring;
2259
2260 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2261
2262 if (!xdp_ring)
2263 goto free_xdp_rings;
2264
2265 xdp_ring->q_index = xdp_q_idx;
2266 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2267 xdp_ring->ring_active = false;
2268 xdp_ring->vsi = vsi;
2269 xdp_ring->netdev = NULL;
2270 xdp_ring->dev = dev;
2271 xdp_ring->count = vsi->num_tx_desc;
2272 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2273 if (ice_setup_tx_ring(xdp_ring))
2274 goto free_xdp_rings;
2275 ice_set_ring_xdp(xdp_ring);
2276 xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
2277 }
2278
2279 return 0;
2280
2281free_xdp_rings:
2282 for (; i >= 0; i--)
2283 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2284 ice_free_tx_ring(vsi->xdp_rings[i]);
2285 return -ENOMEM;
2286}
2287
2288
2289
2290
2291
2292
2293static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2294{
2295 struct bpf_prog *old_prog;
2296 int i;
2297
2298 old_prog = xchg(&vsi->xdp_prog, prog);
2299 if (old_prog)
2300 bpf_prog_put(old_prog);
2301
2302 ice_for_each_rxq(vsi, i)
2303 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2304}
2305
2306
2307
2308
2309
2310
2311
2312
2313int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2314{
2315 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2316 int xdp_rings_rem = vsi->num_xdp_txq;
2317 struct ice_pf *pf = vsi->back;
2318 struct ice_qs_cfg xdp_qs_cfg = {
2319 .qs_mutex = &pf->avail_q_mutex,
2320 .pf_map = pf->avail_txqs,
2321 .pf_map_size = pf->max_pf_txqs,
2322 .q_count = vsi->num_xdp_txq,
2323 .scatter_count = ICE_MAX_SCATTER_TXQS,
2324 .vsi_map = vsi->txq_map,
2325 .vsi_map_offset = vsi->alloc_txq,
2326 .mapping_mode = ICE_VSI_MAP_CONTIG
2327 };
2328 enum ice_status status;
2329 struct device *dev;
2330 int i, v_idx;
2331
2332 dev = ice_pf_to_dev(pf);
2333 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2334 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2335 if (!vsi->xdp_rings)
2336 return -ENOMEM;
2337
2338 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2339 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2340 goto err_map_xdp;
2341
2342 if (ice_xdp_alloc_setup_rings(vsi))
2343 goto clear_xdp_rings;
2344
2345
2346 ice_for_each_q_vector(vsi, v_idx) {
2347 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2348 int xdp_rings_per_v, q_id, q_base;
2349
2350 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2351 vsi->num_q_vectors - v_idx);
2352 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2353
2354 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2355 struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2356
2357 xdp_ring->q_vector = q_vector;
2358 xdp_ring->next = q_vector->tx.ring;
2359 q_vector->tx.ring = xdp_ring;
2360 }
2361 xdp_rings_rem -= xdp_rings_per_v;
2362 }
2363
2364
2365
2366
2367
2368 if (ice_is_reset_in_progress(pf->state))
2369 return 0;
2370
2371
2372
2373
2374 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2375 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2376
2377 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2378 max_txqs);
2379 if (status) {
2380 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2381 ice_stat_str(status));
2382 goto clear_xdp_rings;
2383 }
2384 ice_vsi_assign_bpf_prog(vsi, prog);
2385
2386 return 0;
2387clear_xdp_rings:
2388 for (i = 0; i < vsi->num_xdp_txq; i++)
2389 if (vsi->xdp_rings[i]) {
2390 kfree_rcu(vsi->xdp_rings[i], rcu);
2391 vsi->xdp_rings[i] = NULL;
2392 }
2393
2394err_map_xdp:
2395 mutex_lock(&pf->avail_q_mutex);
2396 for (i = 0; i < vsi->num_xdp_txq; i++) {
2397 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2398 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2399 }
2400 mutex_unlock(&pf->avail_q_mutex);
2401
2402 devm_kfree(dev, vsi->xdp_rings);
2403 return -ENOMEM;
2404}
2405
2406
2407
2408
2409
2410
2411
2412
2413int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2414{
2415 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2416 struct ice_pf *pf = vsi->back;
2417 int i, v_idx;
2418
2419
2420
2421
2422
2423
2424 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2425 goto free_qmap;
2426
2427 ice_for_each_q_vector(vsi, v_idx) {
2428 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2429 struct ice_ring *ring;
2430
2431 ice_for_each_ring(ring, q_vector->tx)
2432 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2433 break;
2434
2435
2436 q_vector->tx.ring = ring;
2437 }
2438
2439free_qmap:
2440 mutex_lock(&pf->avail_q_mutex);
2441 for (i = 0; i < vsi->num_xdp_txq; i++) {
2442 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2443 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2444 }
2445 mutex_unlock(&pf->avail_q_mutex);
2446
2447 for (i = 0; i < vsi->num_xdp_txq; i++)
2448 if (vsi->xdp_rings[i]) {
2449 if (vsi->xdp_rings[i]->desc)
2450 ice_free_tx_ring(vsi->xdp_rings[i]);
2451 kfree_rcu(vsi->xdp_rings[i], rcu);
2452 vsi->xdp_rings[i] = NULL;
2453 }
2454
2455 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2456 vsi->xdp_rings = NULL;
2457
2458 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2459 return 0;
2460
2461 ice_vsi_assign_bpf_prog(vsi, NULL);
2462
2463
2464
2465
2466 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2467 max_txqs[i] = vsi->num_txq;
2468
2469
2470 vsi->num_xdp_txq = 0;
2471
2472 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2473 max_txqs);
2474}
2475
2476
2477
2478
2479
2480
2481
2482static int
2483ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2484 struct netlink_ext_ack *extack)
2485{
2486 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2487 bool if_running = netif_running(vsi->netdev);
2488 int ret = 0, xdp_ring_err = 0;
2489
2490 if (frame_size > vsi->rx_buf_len) {
2491 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2492 return -EOPNOTSUPP;
2493 }
2494
2495
2496 if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
2497 ret = ice_down(vsi);
2498 if (ret) {
2499 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2500 return ret;
2501 }
2502 }
2503
2504 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2505 vsi->num_xdp_txq = vsi->alloc_rxq;
2506 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2507 if (xdp_ring_err)
2508 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2509 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2510 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2511 if (xdp_ring_err)
2512 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2513 } else {
2514 ice_vsi_assign_bpf_prog(vsi, prog);
2515 }
2516
2517 if (if_running)
2518 ret = ice_up(vsi);
2519
2520 if (!ret && prog && vsi->xsk_umems) {
2521 int i;
2522
2523 ice_for_each_rxq(vsi, i) {
2524 struct ice_ring *rx_ring = vsi->rx_rings[i];
2525
2526 if (rx_ring->xsk_umem)
2527 napi_schedule(&rx_ring->q_vector->napi);
2528 }
2529 }
2530
2531 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2532}
2533
2534
2535
2536
2537
2538
2539static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2540{
2541 struct ice_netdev_priv *np = netdev_priv(dev);
2542 struct ice_vsi *vsi = np->vsi;
2543
2544 if (vsi->type != ICE_VSI_PF) {
2545 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2546 return -EINVAL;
2547 }
2548
2549 switch (xdp->command) {
2550 case XDP_SETUP_PROG:
2551 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2552 case XDP_SETUP_XSK_UMEM:
2553 return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
2554 xdp->xsk.queue_id);
2555 default:
2556 return -EINVAL;
2557 }
2558}
2559
2560
2561
2562
2563
2564static void ice_ena_misc_vector(struct ice_pf *pf)
2565{
2566 struct ice_hw *hw = &pf->hw;
2567 u32 val;
2568
2569
2570
2571
2572
2573 val = rd32(hw, GL_MDCK_TX_TDPU);
2574 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2575 wr32(hw, GL_MDCK_TX_TDPU, val);
2576
2577
2578 wr32(hw, PFINT_OICR_ENA, 0);
2579 rd32(hw, PFINT_OICR);
2580
2581 val = (PFINT_OICR_ECC_ERR_M |
2582 PFINT_OICR_MAL_DETECT_M |
2583 PFINT_OICR_GRST_M |
2584 PFINT_OICR_PCI_EXCEPTION_M |
2585 PFINT_OICR_VFLR_M |
2586 PFINT_OICR_HMC_ERR_M |
2587 PFINT_OICR_PE_CRITERR_M);
2588
2589 wr32(hw, PFINT_OICR_ENA, val);
2590
2591
2592 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2593 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2594}
2595
2596
2597
2598
2599
2600
2601static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2602{
2603 struct ice_pf *pf = (struct ice_pf *)data;
2604 struct ice_hw *hw = &pf->hw;
2605 irqreturn_t ret = IRQ_NONE;
2606 struct device *dev;
2607 u32 oicr, ena_mask;
2608
2609 dev = ice_pf_to_dev(pf);
2610 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
2611 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2612
2613 oicr = rd32(hw, PFINT_OICR);
2614 ena_mask = rd32(hw, PFINT_OICR_ENA);
2615
2616 if (oicr & PFINT_OICR_SWINT_M) {
2617 ena_mask &= ~PFINT_OICR_SWINT_M;
2618 pf->sw_int_count++;
2619 }
2620
2621 if (oicr & PFINT_OICR_MAL_DETECT_M) {
2622 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2623 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
2624 }
2625 if (oicr & PFINT_OICR_VFLR_M) {
2626
2627 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
2628 u32 reg = rd32(hw, PFINT_OICR_ENA);
2629
2630 reg &= ~PFINT_OICR_VFLR_M;
2631 wr32(hw, PFINT_OICR_ENA, reg);
2632 } else {
2633 ena_mask &= ~PFINT_OICR_VFLR_M;
2634 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
2635 }
2636 }
2637
2638 if (oicr & PFINT_OICR_GRST_M) {
2639 u32 reset;
2640
2641
2642 ena_mask &= ~PFINT_OICR_GRST_M;
2643 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2644 GLGEN_RSTAT_RESET_TYPE_S;
2645
2646 if (reset == ICE_RESET_CORER)
2647 pf->corer_count++;
2648 else if (reset == ICE_RESET_GLOBR)
2649 pf->globr_count++;
2650 else if (reset == ICE_RESET_EMPR)
2651 pf->empr_count++;
2652 else
2653 dev_dbg(dev, "Invalid reset type %d\n", reset);
2654
2655
2656
2657
2658
2659
2660 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
2661 if (reset == ICE_RESET_CORER)
2662 set_bit(__ICE_CORER_RECV, pf->state);
2663 else if (reset == ICE_RESET_GLOBR)
2664 set_bit(__ICE_GLOBR_RECV, pf->state);
2665 else
2666 set_bit(__ICE_EMPR_RECV, pf->state);
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681 hw->reset_ongoing = true;
2682 }
2683 }
2684
2685 if (oicr & PFINT_OICR_HMC_ERR_M) {
2686 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
2687 dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
2688 rd32(hw, PFHMC_ERRORINFO),
2689 rd32(hw, PFHMC_ERRORDATA));
2690 }
2691
2692
2693 oicr &= ena_mask;
2694 if (oicr) {
2695 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2696
2697
2698
2699 if (oicr & (PFINT_OICR_PE_CRITERR_M |
2700 PFINT_OICR_PCI_EXCEPTION_M |
2701 PFINT_OICR_ECC_ERR_M)) {
2702 set_bit(__ICE_PFR_REQ, pf->state);
2703 ice_service_task_schedule(pf);
2704 }
2705 }
2706 ret = IRQ_HANDLED;
2707
2708 ice_service_task_schedule(pf);
2709 ice_irq_dynamic_ena(hw, NULL, NULL);
2710
2711 return ret;
2712}
2713
2714
2715
2716
2717
2718static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2719{
2720
2721 wr32(hw, PFINT_FW_CTL,
2722 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2723
2724
2725 wr32(hw, PFINT_MBX_CTL,
2726 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2727
2728
2729 wr32(hw, PFINT_OICR_CTL,
2730 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2731
2732 ice_flush(hw);
2733}
2734
2735
2736
2737
2738
2739static void ice_free_irq_msix_misc(struct ice_pf *pf)
2740{
2741 struct ice_hw *hw = &pf->hw;
2742
2743 ice_dis_ctrlq_interrupts(hw);
2744
2745
2746 wr32(hw, PFINT_OICR_ENA, 0);
2747 ice_flush(hw);
2748
2749 if (pf->msix_entries) {
2750 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2751 devm_free_irq(ice_pf_to_dev(pf),
2752 pf->msix_entries[pf->oicr_idx].vector, pf);
2753 }
2754
2755 pf->num_avail_sw_msix += 1;
2756 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2757}
2758
2759
2760
2761
2762
2763
2764static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2765{
2766 u32 val;
2767
2768 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2769 PFINT_OICR_CTL_CAUSE_ENA_M);
2770 wr32(hw, PFINT_OICR_CTL, val);
2771
2772
2773 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2774 PFINT_FW_CTL_CAUSE_ENA_M);
2775 wr32(hw, PFINT_FW_CTL, val);
2776
2777
2778 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2779 PFINT_MBX_CTL_CAUSE_ENA_M);
2780 wr32(hw, PFINT_MBX_CTL, val);
2781
2782 ice_flush(hw);
2783}
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793static int ice_req_irq_msix_misc(struct ice_pf *pf)
2794{
2795 struct device *dev = ice_pf_to_dev(pf);
2796 struct ice_hw *hw = &pf->hw;
2797 int oicr_idx, err = 0;
2798
2799 if (!pf->int_name[0])
2800 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2801 dev_driver_string(dev), dev_name(dev));
2802
2803
2804
2805
2806
2807 if (ice_is_reset_in_progress(pf->state))
2808 goto skip_req_irq;
2809
2810
2811 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2812 if (oicr_idx < 0)
2813 return oicr_idx;
2814
2815 pf->num_avail_sw_msix -= 1;
2816 pf->oicr_idx = (u16)oicr_idx;
2817
2818 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2819 ice_misc_intr, 0, pf->int_name, pf);
2820 if (err) {
2821 dev_err(dev, "devm_request_irq for %s failed: %d\n",
2822 pf->int_name, err);
2823 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2824 pf->num_avail_sw_msix += 1;
2825 return err;
2826 }
2827
2828skip_req_irq:
2829 ice_ena_misc_vector(pf);
2830
2831 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2832 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2833 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2834
2835 ice_flush(hw);
2836 ice_irq_dynamic_ena(hw, NULL, NULL);
2837
2838 return 0;
2839}
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849static void ice_napi_add(struct ice_vsi *vsi)
2850{
2851 int v_idx;
2852
2853 if (!vsi->netdev)
2854 return;
2855
2856 ice_for_each_q_vector(vsi, v_idx)
2857 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
2858 ice_napi_poll, NAPI_POLL_WEIGHT);
2859}
2860
2861
2862
2863
2864
2865static void ice_set_ops(struct net_device *netdev)
2866{
2867 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2868
2869 if (ice_is_safe_mode(pf)) {
2870 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
2871 ice_set_ethtool_safe_mode_ops(netdev);
2872 return;
2873 }
2874
2875 netdev->netdev_ops = &ice_netdev_ops;
2876 ice_set_ethtool_ops(netdev);
2877}
2878
2879
2880
2881
2882
2883static void ice_set_netdev_features(struct net_device *netdev)
2884{
2885 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2886 netdev_features_t csumo_features;
2887 netdev_features_t vlano_features;
2888 netdev_features_t dflt_features;
2889 netdev_features_t tso_features;
2890
2891 if (ice_is_safe_mode(pf)) {
2892
2893 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
2894 netdev->hw_features = netdev->features;
2895 return;
2896 }
2897
2898 dflt_features = NETIF_F_SG |
2899 NETIF_F_HIGHDMA |
2900 NETIF_F_NTUPLE |
2901 NETIF_F_RXHASH;
2902
2903 csumo_features = NETIF_F_RXCSUM |
2904 NETIF_F_IP_CSUM |
2905 NETIF_F_SCTP_CRC |
2906 NETIF_F_IPV6_CSUM;
2907
2908 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2909 NETIF_F_HW_VLAN_CTAG_TX |
2910 NETIF_F_HW_VLAN_CTAG_RX;
2911
2912 tso_features = NETIF_F_TSO |
2913 NETIF_F_TSO_ECN |
2914 NETIF_F_TSO6 |
2915 NETIF_F_GSO_GRE |
2916 NETIF_F_GSO_UDP_TUNNEL |
2917 NETIF_F_GSO_GRE_CSUM |
2918 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2919 NETIF_F_GSO_PARTIAL |
2920 NETIF_F_GSO_IPXIP4 |
2921 NETIF_F_GSO_IPXIP6 |
2922 NETIF_F_GSO_UDP_L4;
2923
2924 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
2925 NETIF_F_GSO_GRE_CSUM;
2926
2927 netdev->hw_features = dflt_features | csumo_features |
2928 vlano_features | tso_features;
2929
2930
2931 netdev->mpls_features = NETIF_F_HW_CSUM;
2932
2933
2934 netdev->features |= netdev->hw_features;
2935
2936 netdev->hw_enc_features |= dflt_features | csumo_features |
2937 tso_features;
2938 netdev->vlan_features |= dflt_features | csumo_features |
2939 tso_features;
2940}
2941
2942
2943
2944
2945
2946
2947
2948static int ice_cfg_netdev(struct ice_vsi *vsi)
2949{
2950 struct ice_pf *pf = vsi->back;
2951 struct ice_netdev_priv *np;
2952 struct net_device *netdev;
2953 u8 mac_addr[ETH_ALEN];
2954 int err;
2955
2956 err = ice_devlink_create_port(pf);
2957 if (err)
2958 return err;
2959
2960 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
2961 vsi->alloc_rxq);
2962 if (!netdev) {
2963 err = -ENOMEM;
2964 goto err_destroy_devlink_port;
2965 }
2966
2967 vsi->netdev = netdev;
2968 np = netdev_priv(netdev);
2969 np->vsi = vsi;
2970
2971 ice_set_netdev_features(netdev);
2972
2973 ice_set_ops(netdev);
2974
2975 if (vsi->type == ICE_VSI_PF) {
2976 SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
2977 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
2978 ether_addr_copy(netdev->dev_addr, mac_addr);
2979 ether_addr_copy(netdev->perm_addr, mac_addr);
2980 }
2981
2982 netdev->priv_flags |= IFF_UNICAST_FLT;
2983
2984
2985 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
2986
2987
2988 netdev->watchdog_timeo = 5 * HZ;
2989
2990 netdev->min_mtu = ETH_MIN_MTU;
2991 netdev->max_mtu = ICE_MAX_MTU;
2992
2993 err = register_netdev(vsi->netdev);
2994 if (err)
2995 goto err_free_netdev;
2996
2997 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
2998
2999 netif_carrier_off(vsi->netdev);
3000
3001
3002 netif_tx_stop_all_queues(vsi->netdev);
3003
3004 return 0;
3005
3006err_free_netdev:
3007 free_netdev(vsi->netdev);
3008 vsi->netdev = NULL;
3009err_destroy_devlink_port:
3010 ice_devlink_destroy_port(pf);
3011 return err;
3012}
3013
3014
3015
3016
3017
3018
3019
3020void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3021{
3022 u16 i;
3023
3024 for (i = 0; i < rss_table_size; i++)
3025 lut[i] = i % rss_size;
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036static struct ice_vsi *
3037ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3038{
3039 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3040}
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050static struct ice_vsi *
3051ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3052{
3053 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3054}
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064struct ice_vsi *
3065ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3066{
3067 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3068}
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078static int
3079ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3080 u16 vid)
3081{
3082 struct ice_netdev_priv *np = netdev_priv(netdev);
3083 struct ice_vsi *vsi = np->vsi;
3084 int ret;
3085
3086 if (vid >= VLAN_N_VID) {
3087 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
3088 vid, VLAN_N_VID);
3089 return -EINVAL;
3090 }
3091
3092 if (vsi->info.pvid)
3093 return -EINVAL;
3094
3095
3096 if (!vid)
3097 return 0;
3098
3099
3100 if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3101 ret = ice_cfg_vlan_pruning(vsi, true, false);
3102 if (ret)
3103 return ret;
3104 }
3105
3106
3107
3108
3109 ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3110 if (!ret) {
3111 vsi->vlan_ena = true;
3112 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3113 }
3114
3115 return ret;
3116}
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126static int
3127ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3128 u16 vid)
3129{
3130 struct ice_netdev_priv *np = netdev_priv(netdev);
3131 struct ice_vsi *vsi = np->vsi;
3132 int ret;
3133
3134 if (vsi->info.pvid)
3135 return -EINVAL;
3136
3137
3138 if (!vid)
3139 return 0;
3140
3141
3142
3143
3144 ret = ice_vsi_kill_vlan(vsi, vid);
3145 if (ret)
3146 return ret;
3147
3148
3149 if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3150 ret = ice_cfg_vlan_pruning(vsi, false, false);
3151
3152 vsi->vlan_ena = false;
3153 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3154 return ret;
3155}
3156
3157
3158
3159
3160
3161
3162
3163static int ice_setup_pf_sw(struct ice_pf *pf)
3164{
3165 struct ice_vsi *vsi;
3166 int status = 0;
3167
3168 if (ice_is_reset_in_progress(pf->state))
3169 return -EBUSY;
3170
3171 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3172 if (!vsi)
3173 return -ENOMEM;
3174
3175 status = ice_cfg_netdev(vsi);
3176 if (status) {
3177 status = -ENODEV;
3178 goto unroll_vsi_setup;
3179 }
3180
3181 ice_vsi_cfg_frame_size(vsi);
3182
3183
3184 ice_dcbnl_setup(vsi);
3185
3186
3187
3188
3189
3190 ice_napi_add(vsi);
3191
3192 status = ice_set_cpu_rx_rmap(vsi);
3193 if (status) {
3194 dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3195 vsi->vsi_num, status);
3196 status = -EINVAL;
3197 goto unroll_napi_add;
3198 }
3199 status = ice_init_mac_fltr(pf);
3200 if (status)
3201 goto free_cpu_rx_map;
3202
3203 return status;
3204
3205free_cpu_rx_map:
3206 ice_free_cpu_rx_rmap(vsi);
3207
3208unroll_napi_add:
3209 if (vsi) {
3210 ice_napi_del(vsi);
3211 if (vsi->netdev) {
3212 if (vsi->netdev->reg_state == NETREG_REGISTERED)
3213 unregister_netdev(vsi->netdev);
3214 free_netdev(vsi->netdev);
3215 vsi->netdev = NULL;
3216 }
3217 }
3218
3219unroll_vsi_setup:
3220 ice_vsi_release(vsi);
3221 return status;
3222}
3223
3224
3225
3226
3227
3228
3229
3230static u16
3231ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3232{
3233 unsigned long bit;
3234 u16 count = 0;
3235
3236 mutex_lock(lock);
3237 for_each_clear_bit(bit, pf_qmap, size)
3238 count++;
3239 mutex_unlock(lock);
3240
3241 return count;
3242}
3243
3244
3245
3246
3247
3248u16 ice_get_avail_txq_count(struct ice_pf *pf)
3249{
3250 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3251 pf->max_pf_txqs);
3252}
3253
3254
3255
3256
3257
3258u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3259{
3260 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3261 pf->max_pf_rxqs);
3262}
3263
3264
3265
3266
3267
3268static void ice_deinit_pf(struct ice_pf *pf)
3269{
3270 ice_service_task_stop(pf);
3271 mutex_destroy(&pf->sw_mutex);
3272 mutex_destroy(&pf->tc_mutex);
3273 mutex_destroy(&pf->avail_q_mutex);
3274
3275 if (pf->avail_txqs) {
3276 bitmap_free(pf->avail_txqs);
3277 pf->avail_txqs = NULL;
3278 }
3279
3280 if (pf->avail_rxqs) {
3281 bitmap_free(pf->avail_rxqs);
3282 pf->avail_rxqs = NULL;
3283 }
3284}
3285
3286
3287
3288
3289
3290static void ice_set_pf_caps(struct ice_pf *pf)
3291{
3292 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3293
3294 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3295 if (func_caps->common_cap.dcb)
3296 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3297 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3298 if (func_caps->common_cap.sr_iov_1_1) {
3299 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3300 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3301 ICE_MAX_VF_COUNT);
3302 }
3303 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3304 if (func_caps->common_cap.rss_table_size)
3305 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3306
3307 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3308 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3309 u16 unused;
3310
3311
3312
3313
3314 pf->ctrl_vsi_idx = ICE_NO_VSI;
3315 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3316
3317 ice_alloc_fd_guar_item(&pf->hw, &unused,
3318 func_caps->fd_fltr_guar);
3319
3320 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3321 func_caps->fd_fltr_best_effort);
3322 }
3323
3324 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3325 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3326}
3327
3328
3329
3330
3331
3332static int ice_init_pf(struct ice_pf *pf)
3333{
3334 ice_set_pf_caps(pf);
3335
3336 mutex_init(&pf->sw_mutex);
3337 mutex_init(&pf->tc_mutex);
3338
3339 INIT_HLIST_HEAD(&pf->aq_wait_list);
3340 spin_lock_init(&pf->aq_wait_lock);
3341 init_waitqueue_head(&pf->aq_wait_queue);
3342
3343
3344 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3345 pf->serv_tmr_period = HZ;
3346 INIT_WORK(&pf->serv_task, ice_service_task);
3347 clear_bit(__ICE_SERVICE_SCHED, pf->state);
3348
3349 mutex_init(&pf->avail_q_mutex);
3350 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3351 if (!pf->avail_txqs)
3352 return -ENOMEM;
3353
3354 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3355 if (!pf->avail_rxqs) {
3356 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3357 pf->avail_txqs = NULL;
3358 return -ENOMEM;
3359 }
3360
3361 return 0;
3362}
3363
3364
3365
3366
3367
3368
3369
3370
3371static int ice_ena_msix_range(struct ice_pf *pf)
3372{
3373 struct device *dev = ice_pf_to_dev(pf);
3374 int v_left, v_actual, v_budget = 0;
3375 int needed, err, i;
3376
3377 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3378
3379
3380 needed = 1;
3381 if (v_left < needed)
3382 goto no_hw_vecs_left_err;
3383 v_budget += needed;
3384 v_left -= needed;
3385
3386
3387 needed = min_t(int, num_online_cpus(), v_left);
3388 if (v_left < needed)
3389 goto no_hw_vecs_left_err;
3390 pf->num_lan_msix = needed;
3391 v_budget += needed;
3392 v_left -= needed;
3393
3394
3395 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3396 needed = ICE_FDIR_MSIX;
3397 if (v_left < needed)
3398 goto no_hw_vecs_left_err;
3399 v_budget += needed;
3400 v_left -= needed;
3401 }
3402
3403 pf->msix_entries = devm_kcalloc(dev, v_budget,
3404 sizeof(*pf->msix_entries), GFP_KERNEL);
3405
3406 if (!pf->msix_entries) {
3407 err = -ENOMEM;
3408 goto exit_err;
3409 }
3410
3411 for (i = 0; i < v_budget; i++)
3412 pf->msix_entries[i].entry = i;
3413
3414
3415 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3416 ICE_MIN_MSIX, v_budget);
3417
3418 if (v_actual < 0) {
3419 dev_err(dev, "unable to reserve MSI-X vectors\n");
3420 err = v_actual;
3421 goto msix_err;
3422 }
3423
3424 if (v_actual < v_budget) {
3425 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3426 v_budget, v_actual);
3427
3428#define ICE_MIN_LAN_VECS 2
3429#define ICE_MIN_RDMA_VECS 2
3430#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
3431
3432 if (v_actual < ICE_MIN_LAN_VECS) {
3433
3434 pci_disable_msix(pf->pdev);
3435 err = -ERANGE;
3436 goto msix_err;
3437 } else {
3438 pf->num_lan_msix = ICE_MIN_LAN_VECS;
3439 }
3440 }
3441
3442 return v_actual;
3443
3444msix_err:
3445 devm_kfree(dev, pf->msix_entries);
3446 goto exit_err;
3447
3448no_hw_vecs_left_err:
3449 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3450 needed, v_left);
3451 err = -ERANGE;
3452exit_err:
3453 pf->num_lan_msix = 0;
3454 return err;
3455}
3456
3457
3458
3459
3460
3461static void ice_dis_msix(struct ice_pf *pf)
3462{
3463 pci_disable_msix(pf->pdev);
3464 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3465 pf->msix_entries = NULL;
3466}
3467
3468
3469
3470
3471
3472static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3473{
3474 ice_dis_msix(pf);
3475
3476 if (pf->irq_tracker) {
3477 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3478 pf->irq_tracker = NULL;
3479 }
3480}
3481
3482
3483
3484
3485
3486static int ice_init_interrupt_scheme(struct ice_pf *pf)
3487{
3488 int vectors;
3489
3490 vectors = ice_ena_msix_range(pf);
3491
3492 if (vectors < 0)
3493 return vectors;
3494
3495
3496 pf->irq_tracker =
3497 devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
3498 (sizeof(u16) * vectors), GFP_KERNEL);
3499 if (!pf->irq_tracker) {
3500 ice_dis_msix(pf);
3501 return -ENOMEM;
3502 }
3503
3504
3505 pf->num_avail_sw_msix = (u16)vectors;
3506 pf->irq_tracker->num_entries = (u16)vectors;
3507 pf->irq_tracker->end = pf->irq_tracker->num_entries;
3508
3509 return 0;
3510}
3511
3512
3513
3514
3515
3516
3517
3518
3519bool ice_is_wol_supported(struct ice_pf *pf)
3520{
3521 struct ice_hw *hw = &pf->hw;
3522 u16 wol_ctrl;
3523
3524
3525
3526
3527 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3528 return false;
3529
3530 return !(BIT(hw->pf_id) & wol_ctrl);
3531}
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3544{
3545 struct ice_pf *pf = vsi->back;
3546 int err = 0, timeout = 50;
3547
3548 if (!new_rx && !new_tx)
3549 return -EINVAL;
3550
3551 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
3552 timeout--;
3553 if (!timeout)
3554 return -EBUSY;
3555 usleep_range(1000, 2000);
3556 }
3557
3558 if (new_tx)
3559 vsi->req_txq = (u16)new_tx;
3560 if (new_rx)
3561 vsi->req_rxq = (u16)new_rx;
3562
3563
3564 if (!netif_running(vsi->netdev)) {
3565 ice_vsi_rebuild(vsi, false);
3566 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3567 goto done;
3568 }
3569
3570 ice_vsi_close(vsi);
3571 ice_vsi_rebuild(vsi, false);
3572 ice_pf_dcb_recfg(pf);
3573 ice_vsi_open(vsi);
3574done:
3575 clear_bit(__ICE_CFG_BUSY, pf->state);
3576 return err;
3577}
3578
3579
3580
3581
3582
3583
3584
3585
3586static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3587{
3588 struct ice_vsi *vsi = ice_get_main_vsi(pf);
3589 struct ice_vsi_ctx *ctxt;
3590 enum ice_status status;
3591 struct ice_hw *hw;
3592
3593 if (!vsi)
3594 return;
3595
3596 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3597 if (!ctxt)
3598 return;
3599
3600 hw = &pf->hw;
3601 ctxt->info = vsi->info;
3602
3603 ctxt->info.valid_sections =
3604 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3605 ICE_AQ_VSI_PROP_SECURITY_VALID |
3606 ICE_AQ_VSI_PROP_SW_VALID);
3607
3608
3609 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3610 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3611
3612
3613 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3614
3615
3616 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3617 ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3618
3619 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3620 if (status) {
3621 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3622 ice_stat_str(status),
3623 ice_aq_str(hw->adminq.sq_last_status));
3624 } else {
3625 vsi->info.sec_flags = ctxt->info.sec_flags;
3626 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3627 vsi->info.vlan_flags = ctxt->info.vlan_flags;
3628 }
3629
3630 kfree(ctxt);
3631}
3632
3633
3634
3635
3636
3637
3638static void
3639ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3640{
3641 struct ice_pf *pf = (struct ice_pf *)hw->back;
3642 struct device *dev = ice_pf_to_dev(pf);
3643
3644 switch (*status) {
3645 case ICE_SUCCESS:
3646
3647
3648
3649
3650 if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3651 hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3652 hw->pkg_ver.update == hw->active_pkg_ver.update &&
3653 hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3654 !memcmp(hw->pkg_name, hw->active_pkg_name,
3655 sizeof(hw->pkg_name))) {
3656 if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3657 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3658 hw->active_pkg_name,
3659 hw->active_pkg_ver.major,
3660 hw->active_pkg_ver.minor,
3661 hw->active_pkg_ver.update,
3662 hw->active_pkg_ver.draft);
3663 else
3664 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3665 hw->active_pkg_name,
3666 hw->active_pkg_ver.major,
3667 hw->active_pkg_ver.minor,
3668 hw->active_pkg_ver.update,
3669 hw->active_pkg_ver.draft);
3670 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3671 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3672 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
3673 hw->active_pkg_name,
3674 hw->active_pkg_ver.major,
3675 hw->active_pkg_ver.minor,
3676 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3677 *status = ICE_ERR_NOT_SUPPORTED;
3678 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3679 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3680 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3681 hw->active_pkg_name,
3682 hw->active_pkg_ver.major,
3683 hw->active_pkg_ver.minor,
3684 hw->active_pkg_ver.update,
3685 hw->active_pkg_ver.draft,
3686 hw->pkg_name,
3687 hw->pkg_ver.major,
3688 hw->pkg_ver.minor,
3689 hw->pkg_ver.update,
3690 hw->pkg_ver.draft);
3691 } else {
3692 dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
3693 *status = ICE_ERR_NOT_SUPPORTED;
3694 }
3695 break;
3696 case ICE_ERR_FW_DDP_MISMATCH:
3697 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
3698 break;
3699 case ICE_ERR_BUF_TOO_SHORT:
3700 case ICE_ERR_CFG:
3701 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3702 break;
3703 case ICE_ERR_NOT_SUPPORTED:
3704
3705 if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3706 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3707 hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3708 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
3709 else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3710 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3711 hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3712 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
3713 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3714 break;
3715 case ICE_ERR_AQ_ERROR:
3716 switch (hw->pkg_dwnld_status) {
3717 case ICE_AQ_RC_ENOSEC:
3718 case ICE_AQ_RC_EBADSIG:
3719 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
3720 return;
3721 case ICE_AQ_RC_ESVN:
3722 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
3723 return;
3724 case ICE_AQ_RC_EBADMAN:
3725 case ICE_AQ_RC_EBADBUF:
3726 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
3727
3728 if (ice_check_reset(hw))
3729 dev_err(dev, "Error resetting device. Please reload the driver\n");
3730 return;
3731 default:
3732 break;
3733 }
3734 fallthrough;
3735 default:
3736 dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
3737 *status);
3738 break;
3739 }
3740}
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750static void
3751ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3752{
3753 enum ice_status status = ICE_ERR_PARAM;
3754 struct device *dev = ice_pf_to_dev(pf);
3755 struct ice_hw *hw = &pf->hw;
3756
3757
3758 if (firmware && !hw->pkg_copy) {
3759 status = ice_copy_and_init_pkg(hw, firmware->data,
3760 firmware->size);
3761 ice_log_pkg_init(hw, &status);
3762 } else if (!firmware && hw->pkg_copy) {
3763
3764 status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3765 ice_log_pkg_init(hw, &status);
3766 } else {
3767 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3768 }
3769
3770 if (status) {
3771
3772 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3773 return;
3774 }
3775
3776
3777
3778
3779 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3780}
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790static void ice_verify_cacheline_size(struct ice_pf *pf)
3791{
3792 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3793 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3794 ICE_CACHE_LINE_BYTES);
3795}
3796
3797
3798
3799
3800
3801
3802
3803static enum ice_status ice_send_version(struct ice_pf *pf)
3804{
3805 struct ice_driver_ver dv;
3806
3807 dv.major_ver = 0xff;
3808 dv.minor_ver = 0xff;
3809 dv.build_ver = 0xff;
3810 dv.subbuild_ver = 0;
3811 strscpy((char *)dv.driver_string, UTS_RELEASE,
3812 sizeof(dv.driver_string));
3813 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3814}
3815
3816
3817
3818
3819
3820
3821
3822static int ice_init_fdir(struct ice_pf *pf)
3823{
3824 struct device *dev = ice_pf_to_dev(pf);
3825 struct ice_vsi *ctrl_vsi;
3826 int err;
3827
3828
3829
3830
3831 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3832 if (!ctrl_vsi) {
3833 dev_dbg(dev, "could not create control VSI\n");
3834 return -ENOMEM;
3835 }
3836
3837 err = ice_vsi_open_ctrl(ctrl_vsi);
3838 if (err) {
3839 dev_dbg(dev, "could not open control VSI\n");
3840 goto err_vsi_open;
3841 }
3842
3843 mutex_init(&pf->hw.fdir_fltr_lock);
3844
3845 err = ice_fdir_create_dflt_rules(pf);
3846 if (err)
3847 goto err_fdir_rule;
3848
3849 return 0;
3850
3851err_fdir_rule:
3852 ice_fdir_release_flows(&pf->hw);
3853 ice_vsi_close(ctrl_vsi);
3854err_vsi_open:
3855 ice_vsi_release(ctrl_vsi);
3856 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
3857 pf->vsi[pf->ctrl_vsi_idx] = NULL;
3858 pf->ctrl_vsi_idx = ICE_NO_VSI;
3859 }
3860 return err;
3861}
3862
3863
3864
3865
3866
3867static char *ice_get_opt_fw_name(struct ice_pf *pf)
3868{
3869
3870
3871
3872 struct pci_dev *pdev = pf->pdev;
3873 char *opt_fw_filename;
3874 u64 dsn;
3875
3876
3877
3878
3879 dsn = pci_get_dsn(pdev);
3880 if (!dsn)
3881 return NULL;
3882
3883 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
3884 if (!opt_fw_filename)
3885 return NULL;
3886
3887 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
3888 ICE_DDP_PKG_PATH, dsn);
3889
3890 return opt_fw_filename;
3891}
3892
3893
3894
3895
3896
3897static void ice_request_fw(struct ice_pf *pf)
3898{
3899 char *opt_fw_filename = ice_get_opt_fw_name(pf);
3900 const struct firmware *firmware = NULL;
3901 struct device *dev = ice_pf_to_dev(pf);
3902 int err = 0;
3903
3904
3905
3906
3907
3908 if (opt_fw_filename) {
3909 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
3910 if (err) {
3911 kfree(opt_fw_filename);
3912 goto dflt_pkg_load;
3913 }
3914
3915
3916 ice_load_pkg(firmware, pf);
3917 kfree(opt_fw_filename);
3918 release_firmware(firmware);
3919 return;
3920 }
3921
3922dflt_pkg_load:
3923 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
3924 if (err) {
3925 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
3926 return;
3927 }
3928
3929
3930 ice_load_pkg(firmware, pf);
3931 release_firmware(firmware);
3932}
3933
3934
3935
3936
3937
3938static void ice_print_wake_reason(struct ice_pf *pf)
3939{
3940 u32 wus = pf->wakeup_reason;
3941 const char *wake_str;
3942
3943
3944 if (!wus)
3945 return;
3946
3947 if (wus & PFPM_WUS_LNKC_M)
3948 wake_str = "Link\n";
3949 else if (wus & PFPM_WUS_MAG_M)
3950 wake_str = "Magic Packet\n";
3951 else if (wus & PFPM_WUS_MNG_M)
3952 wake_str = "Management\n";
3953 else if (wus & PFPM_WUS_FW_RST_WK_M)
3954 wake_str = "Firmware Reset\n";
3955 else
3956 wake_str = "Unknown\n";
3957
3958 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
3959}
3960
3961
3962
3963
3964
3965
3966
3967
3968static int
3969ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
3970{
3971 struct device *dev = &pdev->dev;
3972 struct ice_pf *pf;
3973 struct ice_hw *hw;
3974 int err;
3975
3976
3977
3978
3979 err = pcim_enable_device(pdev);
3980 if (err)
3981 return err;
3982
3983 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3984 if (err) {
3985 dev_err(dev, "BAR0 I/O map error %d\n", err);
3986 return err;
3987 }
3988
3989 pf = ice_allocate_pf(dev);
3990 if (!pf)
3991 return -ENOMEM;
3992
3993
3994 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3995 if (err)
3996 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3997 if (err) {
3998 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
3999 return err;
4000 }
4001
4002 pci_enable_pcie_error_reporting(pdev);
4003 pci_set_master(pdev);
4004
4005 pf->pdev = pdev;
4006 pci_set_drvdata(pdev, pf);
4007 set_bit(__ICE_DOWN, pf->state);
4008
4009 set_bit(__ICE_SERVICE_DIS, pf->state);
4010
4011 hw = &pf->hw;
4012 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4013 pci_save_state(pdev);
4014
4015 hw->back = pf;
4016 hw->vendor_id = pdev->vendor;
4017 hw->device_id = pdev->device;
4018 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4019 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4020 hw->subsystem_device_id = pdev->subsystem_device;
4021 hw->bus.device = PCI_SLOT(pdev->devfn);
4022 hw->bus.func = PCI_FUNC(pdev->devfn);
4023 ice_set_ctrlq_len(hw);
4024
4025 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4026
4027 err = ice_devlink_register(pf);
4028 if (err) {
4029 dev_err(dev, "ice_devlink_register failed: %d\n", err);
4030 goto err_exit_unroll;
4031 }
4032
4033#ifndef CONFIG_DYNAMIC_DEBUG
4034 if (debug < -1)
4035 hw->debug_mask = debug;
4036#endif
4037
4038 err = ice_init_hw(hw);
4039 if (err) {
4040 dev_err(dev, "ice_init_hw failed: %d\n", err);
4041 err = -EIO;
4042 goto err_exit_unroll;
4043 }
4044
4045 ice_request_fw(pf);
4046
4047
4048
4049
4050
4051 if (ice_is_safe_mode(pf)) {
4052 dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4053
4054
4055
4056
4057
4058 ice_set_safe_mode_caps(hw);
4059 }
4060
4061 err = ice_init_pf(pf);
4062 if (err) {
4063 dev_err(dev, "ice_init_pf failed: %d\n", err);
4064 goto err_init_pf_unroll;
4065 }
4066
4067 ice_devlink_init_regions(pf);
4068
4069 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4070 if (!pf->num_alloc_vsi) {
4071 err = -EIO;
4072 goto err_init_pf_unroll;
4073 }
4074
4075 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4076 GFP_KERNEL);
4077 if (!pf->vsi) {
4078 err = -ENOMEM;
4079 goto err_init_pf_unroll;
4080 }
4081
4082 err = ice_init_interrupt_scheme(pf);
4083 if (err) {
4084 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4085 err = -EIO;
4086 goto err_init_vsi_unroll;
4087 }
4088
4089
4090
4091
4092
4093
4094 err = ice_req_irq_msix_misc(pf);
4095 if (err) {
4096 dev_err(dev, "setup of misc vector failed: %d\n", err);
4097 goto err_init_interrupt_unroll;
4098 }
4099
4100
4101 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4102 if (!pf->first_sw) {
4103 err = -ENOMEM;
4104 goto err_msix_misc_unroll;
4105 }
4106
4107 if (hw->evb_veb)
4108 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4109 else
4110 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4111
4112 pf->first_sw->pf = pf;
4113
4114
4115 pf->first_sw->sw_id = hw->port_info->sw_id;
4116
4117 err = ice_setup_pf_sw(pf);
4118 if (err) {
4119 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4120 goto err_alloc_sw_unroll;
4121 }
4122
4123 clear_bit(__ICE_SERVICE_DIS, pf->state);
4124
4125
4126 err = ice_send_version(pf);
4127 if (err) {
4128 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4129 UTS_RELEASE, err);
4130 goto err_send_version_unroll;
4131 }
4132
4133
4134 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4135
4136 err = ice_init_link_events(pf->hw.port_info);
4137 if (err) {
4138 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4139 goto err_send_version_unroll;
4140 }
4141
4142 err = ice_init_nvm_phy_type(pf->hw.port_info);
4143 if (err) {
4144 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4145 goto err_send_version_unroll;
4146 }
4147
4148 err = ice_update_link_info(pf->hw.port_info);
4149 if (err) {
4150 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4151 goto err_send_version_unroll;
4152 }
4153
4154 ice_init_link_dflt_override(pf->hw.port_info);
4155
4156
4157 if (pf->hw.port_info->phy.link_info.link_info &
4158 ICE_AQ_MEDIA_AVAILABLE) {
4159 err = ice_init_phy_user_cfg(pf->hw.port_info);
4160 if (err) {
4161 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4162 goto err_send_version_unroll;
4163 }
4164
4165 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4166 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4167
4168 if (vsi)
4169 ice_configure_phy(vsi);
4170 }
4171 } else {
4172 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4173 }
4174
4175 ice_verify_cacheline_size(pf);
4176
4177
4178 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4179
4180
4181 ice_print_wake_reason(pf);
4182
4183
4184 wr32(hw, PFPM_WUS, U32_MAX);
4185
4186
4187 device_set_wakeup_enable(dev, false);
4188
4189 if (ice_is_safe_mode(pf)) {
4190 ice_set_safe_mode_vlan_cfg(pf);
4191 goto probe_done;
4192 }
4193
4194
4195
4196
4197 if (ice_init_fdir(pf))
4198 dev_err(dev, "could not initialize flow director\n");
4199
4200
4201 if (ice_init_pf_dcb(pf, false)) {
4202 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4203 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4204 } else {
4205 ice_cfg_lldp_mib_change(&pf->hw, true);
4206 }
4207
4208
4209 pcie_print_link_status(pf->pdev);
4210
4211probe_done:
4212
4213 clear_bit(__ICE_DOWN, pf->state);
4214 return 0;
4215
4216err_send_version_unroll:
4217 ice_vsi_release_all(pf);
4218err_alloc_sw_unroll:
4219 ice_devlink_destroy_port(pf);
4220 set_bit(__ICE_SERVICE_DIS, pf->state);
4221 set_bit(__ICE_DOWN, pf->state);
4222 devm_kfree(dev, pf->first_sw);
4223err_msix_misc_unroll:
4224 ice_free_irq_msix_misc(pf);
4225err_init_interrupt_unroll:
4226 ice_clear_interrupt_scheme(pf);
4227err_init_vsi_unroll:
4228 devm_kfree(dev, pf->vsi);
4229err_init_pf_unroll:
4230 ice_deinit_pf(pf);
4231 ice_devlink_destroy_regions(pf);
4232 ice_deinit_hw(hw);
4233err_exit_unroll:
4234 ice_devlink_unregister(pf);
4235 pci_disable_pcie_error_reporting(pdev);
4236 pci_disable_device(pdev);
4237 return err;
4238}
4239
4240
4241
4242
4243
4244
4245
4246static void ice_set_wake(struct ice_pf *pf)
4247{
4248 struct ice_hw *hw = &pf->hw;
4249 bool wol = pf->wol_ena;
4250
4251
4252 wr32(hw, PFPM_WUS, U32_MAX);
4253
4254
4255 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4256
4257
4258 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4259}
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4270{
4271 struct device *dev = ice_pf_to_dev(pf);
4272 struct ice_hw *hw = &pf->hw;
4273 enum ice_status status;
4274 u8 mac_addr[ETH_ALEN];
4275 struct ice_vsi *vsi;
4276 u8 flags;
4277
4278 if (!pf->wol_ena)
4279 return;
4280
4281 vsi = ice_get_main_vsi(pf);
4282 if (!vsi)
4283 return;
4284
4285
4286 if (vsi->netdev)
4287 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4288 else
4289 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4290
4291 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4292 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4293 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4294
4295 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4296 if (status)
4297 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4298 ice_stat_str(status),
4299 ice_aq_str(hw->adminq.sq_last_status));
4300}
4301
4302
4303
4304
4305
4306static void ice_remove(struct pci_dev *pdev)
4307{
4308 struct ice_pf *pf = pci_get_drvdata(pdev);
4309 int i;
4310
4311 if (!pf)
4312 return;
4313
4314 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4315 if (!ice_is_reset_in_progress(pf->state))
4316 break;
4317 msleep(100);
4318 }
4319
4320 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4321 set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
4322 ice_free_vfs(pf);
4323 }
4324
4325 set_bit(__ICE_DOWN, pf->state);
4326 ice_service_task_stop(pf);
4327
4328 ice_aq_cancel_waiting_tasks(pf);
4329
4330 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4331 if (!ice_is_safe_mode(pf))
4332 ice_remove_arfs(pf);
4333 ice_setup_mc_magic_wake(pf);
4334 ice_devlink_destroy_port(pf);
4335 ice_vsi_release_all(pf);
4336 ice_set_wake(pf);
4337 ice_free_irq_msix_misc(pf);
4338 ice_for_each_vsi(pf, i) {
4339 if (!pf->vsi[i])
4340 continue;
4341 ice_vsi_free_q_vectors(pf->vsi[i]);
4342 }
4343 ice_deinit_pf(pf);
4344 ice_devlink_destroy_regions(pf);
4345 ice_deinit_hw(&pf->hw);
4346 ice_devlink_unregister(pf);
4347
4348
4349
4350
4351
4352 ice_reset(&pf->hw, ICE_RESET_PFR);
4353 pci_wait_for_pending_transaction(pdev);
4354 ice_clear_interrupt_scheme(pf);
4355 pci_disable_pcie_error_reporting(pdev);
4356 pci_disable_device(pdev);
4357}
4358
4359
4360
4361
4362
4363static void ice_shutdown(struct pci_dev *pdev)
4364{
4365 struct ice_pf *pf = pci_get_drvdata(pdev);
4366
4367 ice_remove(pdev);
4368
4369 if (system_state == SYSTEM_POWER_OFF) {
4370 pci_wake_from_d3(pdev, pf->wol_ena);
4371 pci_set_power_state(pdev, PCI_D3hot);
4372 }
4373}
4374
4375#ifdef CONFIG_PM
4376
4377
4378
4379
4380
4381
4382static void ice_prepare_for_shutdown(struct ice_pf *pf)
4383{
4384 struct ice_hw *hw = &pf->hw;
4385 u32 v;
4386
4387
4388 if (ice_check_sq_alive(hw, &hw->mailboxq))
4389 ice_vc_notify_reset(pf);
4390
4391 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4392
4393
4394 ice_pf_dis_all_vsi(pf, false);
4395
4396 ice_for_each_vsi(pf, v)
4397 if (pf->vsi[v])
4398 pf->vsi[v]->vsi_num = 0;
4399
4400 ice_shutdown_all_ctrlq(hw);
4401}
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4414{
4415 struct device *dev = ice_pf_to_dev(pf);
4416 int ret, v;
4417
4418
4419
4420
4421
4422 ret = ice_init_interrupt_scheme(pf);
4423 if (ret) {
4424 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4425 return ret;
4426 }
4427
4428
4429 ice_for_each_vsi(pf, v) {
4430 if (!pf->vsi[v])
4431 continue;
4432
4433 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4434 if (ret)
4435 goto err_reinit;
4436 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4437 }
4438
4439 ret = ice_req_irq_msix_misc(pf);
4440 if (ret) {
4441 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4442 ret);
4443 goto err_reinit;
4444 }
4445
4446 return 0;
4447
4448err_reinit:
4449 while (v--)
4450 if (pf->vsi[v])
4451 ice_vsi_free_q_vectors(pf->vsi[v]);
4452
4453 return ret;
4454}
4455
4456
4457
4458
4459
4460
4461
4462
4463static int __maybe_unused ice_suspend(struct device *dev)
4464{
4465 struct pci_dev *pdev = to_pci_dev(dev);
4466 struct ice_pf *pf;
4467 int disabled, v;
4468
4469 pf = pci_get_drvdata(pdev);
4470
4471 if (!ice_pf_state_is_nominal(pf)) {
4472 dev_err(dev, "Device is not ready, no need to suspend it\n");
4473 return -EBUSY;
4474 }
4475
4476
4477
4478
4479
4480
4481
4482 disabled = ice_service_task_stop(pf);
4483
4484
4485 if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
4486 if (!disabled)
4487 ice_service_task_restart(pf);
4488 return 0;
4489 }
4490
4491 if (test_bit(__ICE_DOWN, pf->state) ||
4492 ice_is_reset_in_progress(pf->state)) {
4493 dev_err(dev, "can't suspend device in reset or already down\n");
4494 if (!disabled)
4495 ice_service_task_restart(pf);
4496 return 0;
4497 }
4498
4499 ice_setup_mc_magic_wake(pf);
4500
4501 ice_prepare_for_shutdown(pf);
4502
4503 ice_set_wake(pf);
4504
4505
4506
4507
4508
4509
4510 ice_free_irq_msix_misc(pf);
4511 ice_for_each_vsi(pf, v) {
4512 if (!pf->vsi[v])
4513 continue;
4514 ice_vsi_free_q_vectors(pf->vsi[v]);
4515 }
4516 ice_clear_interrupt_scheme(pf);
4517
4518 pci_save_state(pdev);
4519 pci_wake_from_d3(pdev, pf->wol_ena);
4520 pci_set_power_state(pdev, PCI_D3hot);
4521 return 0;
4522}
4523
4524
4525
4526
4527
4528static int __maybe_unused ice_resume(struct device *dev)
4529{
4530 struct pci_dev *pdev = to_pci_dev(dev);
4531 enum ice_reset_req reset_type;
4532 struct ice_pf *pf;
4533 struct ice_hw *hw;
4534 int ret;
4535
4536 pci_set_power_state(pdev, PCI_D0);
4537 pci_restore_state(pdev);
4538 pci_save_state(pdev);
4539
4540 if (!pci_device_is_present(pdev))
4541 return -ENODEV;
4542
4543 ret = pci_enable_device_mem(pdev);
4544 if (ret) {
4545 dev_err(dev, "Cannot enable device after suspend\n");
4546 return ret;
4547 }
4548
4549 pf = pci_get_drvdata(pdev);
4550 hw = &pf->hw;
4551
4552 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4553 ice_print_wake_reason(pf);
4554
4555
4556
4557
4558 ret = ice_reinit_interrupt_scheme(pf);
4559 if (ret)
4560 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4561
4562 clear_bit(__ICE_DOWN, pf->state);
4563
4564 reset_type = ICE_RESET_PFR;
4565
4566 clear_bit(__ICE_SERVICE_DIS, pf->state);
4567
4568 if (ice_schedule_reset(pf, reset_type))
4569 dev_err(dev, "Reset during resume failed.\n");
4570
4571 clear_bit(__ICE_SUSPENDED, pf->state);
4572 ice_service_task_restart(pf);
4573
4574
4575 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4576
4577 return 0;
4578}
4579#endif
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589static pci_ers_result_t
4590ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4591{
4592 struct ice_pf *pf = pci_get_drvdata(pdev);
4593
4594 if (!pf) {
4595 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4596 __func__, err);
4597 return PCI_ERS_RESULT_DISCONNECT;
4598 }
4599
4600 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4601 ice_service_task_stop(pf);
4602
4603 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4604 set_bit(__ICE_PFR_REQ, pf->state);
4605 ice_prepare_for_reset(pf);
4606 }
4607 }
4608
4609 return PCI_ERS_RESULT_NEED_RESET;
4610}
4611
4612
4613
4614
4615
4616
4617
4618
4619static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4620{
4621 struct ice_pf *pf = pci_get_drvdata(pdev);
4622 pci_ers_result_t result;
4623 int err;
4624 u32 reg;
4625
4626 err = pci_enable_device_mem(pdev);
4627 if (err) {
4628 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4629 err);
4630 result = PCI_ERS_RESULT_DISCONNECT;
4631 } else {
4632 pci_set_master(pdev);
4633 pci_restore_state(pdev);
4634 pci_save_state(pdev);
4635 pci_wake_from_d3(pdev, false);
4636
4637
4638 reg = rd32(&pf->hw, GLGEN_RTRIG);
4639 if (!reg)
4640 result = PCI_ERS_RESULT_RECOVERED;
4641 else
4642 result = PCI_ERS_RESULT_DISCONNECT;
4643 }
4644
4645 err = pci_aer_clear_nonfatal_status(pdev);
4646 if (err)
4647 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4648 err);
4649
4650
4651 return result;
4652}
4653
4654
4655
4656
4657
4658
4659
4660
4661static void ice_pci_err_resume(struct pci_dev *pdev)
4662{
4663 struct ice_pf *pf = pci_get_drvdata(pdev);
4664
4665 if (!pf) {
4666 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4667 __func__);
4668 return;
4669 }
4670
4671 if (test_bit(__ICE_SUSPENDED, pf->state)) {
4672 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4673 __func__);
4674 return;
4675 }
4676
4677 ice_restore_all_vfs_msi_state(pdev);
4678
4679 ice_do_reset(pf, ICE_RESET_PFR);
4680 ice_service_task_restart(pf);
4681 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4682}
4683
4684
4685
4686
4687
4688static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4689{
4690 struct ice_pf *pf = pci_get_drvdata(pdev);
4691
4692 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4693 ice_service_task_stop(pf);
4694
4695 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4696 set_bit(__ICE_PFR_REQ, pf->state);
4697 ice_prepare_for_reset(pf);
4698 }
4699 }
4700}
4701
4702
4703
4704
4705
4706static void ice_pci_err_reset_done(struct pci_dev *pdev)
4707{
4708 ice_pci_err_resume(pdev);
4709}
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719static const struct pci_device_id ice_pci_tbl[] = {
4720 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4721 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4722 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4723 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4724 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4725 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4726 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4727 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4728 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4729 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4730 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4731 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4732 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4733 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4734 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
4735 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
4736 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
4737 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
4738 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
4739 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
4740 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
4741 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
4742 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
4743
4744 { 0, }
4745};
4746MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
4747
4748static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
4749
4750static const struct pci_error_handlers ice_pci_err_handler = {
4751 .error_detected = ice_pci_err_detected,
4752 .slot_reset = ice_pci_err_slot_reset,
4753 .reset_prepare = ice_pci_err_reset_prepare,
4754 .reset_done = ice_pci_err_reset_done,
4755 .resume = ice_pci_err_resume
4756};
4757
4758static struct pci_driver ice_driver = {
4759 .name = KBUILD_MODNAME,
4760 .id_table = ice_pci_tbl,
4761 .probe = ice_probe,
4762 .remove = ice_remove,
4763#ifdef CONFIG_PM
4764 .driver.pm = &ice_pm_ops,
4765#endif
4766 .shutdown = ice_shutdown,
4767 .sriov_configure = ice_sriov_configure,
4768 .err_handler = &ice_pci_err_handler
4769};
4770
4771
4772
4773
4774
4775
4776
4777static int __init ice_module_init(void)
4778{
4779 int status;
4780
4781 pr_info("%s\n", ice_driver_string);
4782 pr_info("%s\n", ice_copyright);
4783
4784 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
4785 if (!ice_wq) {
4786 pr_err("Failed to create workqueue\n");
4787 return -ENOMEM;
4788 }
4789
4790 status = pci_register_driver(&ice_driver);
4791 if (status) {
4792 pr_err("failed to register PCI driver, err %d\n", status);
4793 destroy_workqueue(ice_wq);
4794 }
4795
4796 return status;
4797}
4798module_init(ice_module_init);
4799
4800
4801
4802
4803
4804
4805
4806static void __exit ice_module_exit(void)
4807{
4808 pci_unregister_driver(&ice_driver);
4809 destroy_workqueue(ice_wq);
4810 pr_info("module unloaded\n");
4811}
4812module_exit(ice_module_exit);
4813
4814
4815
4816
4817
4818
4819
4820
4821static int ice_set_mac_address(struct net_device *netdev, void *pi)
4822{
4823 struct ice_netdev_priv *np = netdev_priv(netdev);
4824 struct ice_vsi *vsi = np->vsi;
4825 struct ice_pf *pf = vsi->back;
4826 struct ice_hw *hw = &pf->hw;
4827 struct sockaddr *addr = pi;
4828 enum ice_status status;
4829 u8 flags = 0;
4830 int err = 0;
4831 u8 *mac;
4832
4833 mac = (u8 *)addr->sa_data;
4834
4835 if (!is_valid_ether_addr(mac))
4836 return -EADDRNOTAVAIL;
4837
4838 if (ether_addr_equal(netdev->dev_addr, mac)) {
4839 netdev_warn(netdev, "already using mac %pM\n", mac);
4840 return 0;
4841 }
4842
4843 if (test_bit(__ICE_DOWN, pf->state) ||
4844 ice_is_reset_in_progress(pf->state)) {
4845 netdev_err(netdev, "can't set mac %pM. device not ready\n",
4846 mac);
4847 return -EBUSY;
4848 }
4849
4850
4851 status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
4852 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
4853 err = -EADDRNOTAVAIL;
4854 goto err_update_filters;
4855 }
4856
4857
4858 status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
4859 if (status == ICE_ERR_ALREADY_EXISTS) {
4860 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
4861 return 0;
4862 }
4863
4864
4865 if (status)
4866 err = -EADDRNOTAVAIL;
4867
4868err_update_filters:
4869 if (err) {
4870 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
4871 mac);
4872 return err;
4873 }
4874
4875
4876 memcpy(netdev->dev_addr, mac, netdev->addr_len);
4877 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
4878 netdev->dev_addr);
4879
4880
4881 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4882 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
4883 if (status) {
4884 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
4885 mac, ice_stat_str(status));
4886 }
4887 return 0;
4888}
4889
4890
4891
4892
4893
4894static void ice_set_rx_mode(struct net_device *netdev)
4895{
4896 struct ice_netdev_priv *np = netdev_priv(netdev);
4897 struct ice_vsi *vsi = np->vsi;
4898
4899 if (!vsi)
4900 return;
4901
4902
4903
4904
4905
4906 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
4907 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
4908 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
4909
4910
4911
4912
4913 ice_service_task_schedule(vsi->back);
4914}
4915
4916
4917
4918
4919
4920
4921
4922static int
4923ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
4924{
4925 struct ice_netdev_priv *np = netdev_priv(netdev);
4926 struct ice_vsi *vsi = np->vsi;
4927 enum ice_status status;
4928 u16 q_handle;
4929 u8 tc;
4930
4931
4932 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
4933 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
4934 maxrate, queue_index);
4935 return -EINVAL;
4936 }
4937
4938 q_handle = vsi->tx_rings[queue_index]->q_handle;
4939 tc = ice_dcb_get_tc(vsi, queue_index);
4940
4941
4942 if (!maxrate)
4943 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
4944 q_handle, ICE_MAX_BW);
4945 else
4946 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
4947 q_handle, ICE_MAX_BW, maxrate * 1000);
4948 if (status) {
4949 netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
4950 ice_stat_str(status));
4951 return -EIO;
4952 }
4953
4954 return 0;
4955}
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967static int
4968ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
4969 struct net_device *dev, const unsigned char *addr, u16 vid,
4970 u16 flags, struct netlink_ext_ack __always_unused *extack)
4971{
4972 int err;
4973
4974 if (vid) {
4975 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
4976 return -EINVAL;
4977 }
4978 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4979 netdev_err(dev, "FDB only supports static addresses\n");
4980 return -EINVAL;
4981 }
4982
4983 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4984 err = dev_uc_add_excl(dev, addr);
4985 else if (is_multicast_ether_addr(addr))
4986 err = dev_mc_add_excl(dev, addr);
4987 else
4988 err = -EINVAL;
4989
4990
4991 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4992 err = 0;
4993
4994 return err;
4995}
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005static int
5006ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5007 struct net_device *dev, const unsigned char *addr,
5008 __always_unused u16 vid)
5009{
5010 int err;
5011
5012 if (ndm->ndm_state & NUD_PERMANENT) {
5013 netdev_err(dev, "FDB only supports static addresses\n");
5014 return -EINVAL;
5015 }
5016
5017 if (is_unicast_ether_addr(addr))
5018 err = dev_uc_del(dev, addr);
5019 else if (is_multicast_ether_addr(addr))
5020 err = dev_mc_del(dev, addr);
5021 else
5022 err = -EINVAL;
5023
5024 return err;
5025}
5026
5027
5028
5029
5030
5031
5032static int
5033ice_set_features(struct net_device *netdev, netdev_features_t features)
5034{
5035 struct ice_netdev_priv *np = netdev_priv(netdev);
5036 struct ice_vsi *vsi = np->vsi;
5037 struct ice_pf *pf = vsi->back;
5038 int ret = 0;
5039
5040
5041 if (ice_is_safe_mode(vsi->back)) {
5042 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5043 return ret;
5044 }
5045
5046
5047 if (ice_is_reset_in_progress(pf->state)) {
5048 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5049 return -EBUSY;
5050 }
5051
5052
5053
5054
5055 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5056 ret = ice_vsi_manage_rss_lut(vsi, true);
5057 else if (!(features & NETIF_F_RXHASH) &&
5058 netdev->features & NETIF_F_RXHASH)
5059 ret = ice_vsi_manage_rss_lut(vsi, false);
5060
5061 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5062 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5063 ret = ice_vsi_manage_vlan_stripping(vsi, true);
5064 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5065 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5066 ret = ice_vsi_manage_vlan_stripping(vsi, false);
5067
5068 if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5069 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5070 ret = ice_vsi_manage_vlan_insertion(vsi);
5071 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5072 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5073 ret = ice_vsi_manage_vlan_insertion(vsi);
5074
5075 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5076 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5077 ret = ice_cfg_vlan_pruning(vsi, true, false);
5078 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5079 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5080 ret = ice_cfg_vlan_pruning(vsi, false, false);
5081
5082 if ((features & NETIF_F_NTUPLE) &&
5083 !(netdev->features & NETIF_F_NTUPLE)) {
5084 ice_vsi_manage_fdir(vsi, true);
5085 ice_init_arfs(vsi);
5086 } else if (!(features & NETIF_F_NTUPLE) &&
5087 (netdev->features & NETIF_F_NTUPLE)) {
5088 ice_vsi_manage_fdir(vsi, false);
5089 ice_clear_arfs(vsi);
5090 }
5091
5092 return ret;
5093}
5094
5095
5096
5097
5098
5099static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5100{
5101 int ret = 0;
5102
5103 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5104 ret = ice_vsi_manage_vlan_stripping(vsi, true);
5105 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5106 ret = ice_vsi_manage_vlan_insertion(vsi);
5107
5108 return ret;
5109}
5110
5111
5112
5113
5114
5115
5116
5117int ice_vsi_cfg(struct ice_vsi *vsi)
5118{
5119 int err;
5120
5121 if (vsi->netdev) {
5122 ice_set_rx_mode(vsi->netdev);
5123
5124 err = ice_vsi_vlan_setup(vsi);
5125
5126 if (err)
5127 return err;
5128 }
5129 ice_vsi_cfg_dcb_rings(vsi);
5130
5131 err = ice_vsi_cfg_lan_txqs(vsi);
5132 if (!err && ice_is_xdp_ena_vsi(vsi))
5133 err = ice_vsi_cfg_xdp_txqs(vsi);
5134 if (!err)
5135 err = ice_vsi_cfg_rxqs(vsi);
5136
5137 return err;
5138}
5139
5140
5141
5142
5143
5144static void ice_napi_enable_all(struct ice_vsi *vsi)
5145{
5146 int q_idx;
5147
5148 if (!vsi->netdev)
5149 return;
5150
5151 ice_for_each_q_vector(vsi, q_idx) {
5152 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5153
5154 if (q_vector->rx.ring || q_vector->tx.ring)
5155 napi_enable(&q_vector->napi);
5156 }
5157}
5158
5159
5160
5161
5162
5163
5164
5165static int ice_up_complete(struct ice_vsi *vsi)
5166{
5167 struct ice_pf *pf = vsi->back;
5168 int err;
5169
5170 ice_vsi_cfg_msix(vsi);
5171
5172
5173
5174
5175
5176 err = ice_vsi_start_all_rx_rings(vsi);
5177 if (err)
5178 return err;
5179
5180 clear_bit(__ICE_DOWN, vsi->state);
5181 ice_napi_enable_all(vsi);
5182 ice_vsi_ena_irq(vsi);
5183
5184 if (vsi->port_info &&
5185 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5186 vsi->netdev) {
5187 ice_print_link_msg(vsi, true);
5188 netif_tx_start_all_queues(vsi->netdev);
5189 netif_carrier_on(vsi->netdev);
5190 }
5191
5192 ice_service_task_schedule(pf);
5193
5194 return 0;
5195}
5196
5197
5198
5199
5200
5201int ice_up(struct ice_vsi *vsi)
5202{
5203 int err;
5204
5205 err = ice_vsi_cfg(vsi);
5206 if (!err)
5207 err = ice_up_complete(vsi);
5208
5209 return err;
5210}
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221static void
5222ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5223{
5224 unsigned int start;
5225 *pkts = 0;
5226 *bytes = 0;
5227
5228 if (!ring)
5229 return;
5230 do {
5231 start = u64_stats_fetch_begin_irq(&ring->syncp);
5232 *pkts = ring->stats.pkts;
5233 *bytes = ring->stats.bytes;
5234 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5235}
5236
5237
5238
5239
5240
5241
5242
5243static void
5244ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5245 u16 count)
5246{
5247 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5248 u16 i;
5249
5250 for (i = 0; i < count; i++) {
5251 struct ice_ring *ring;
5252 u64 pkts, bytes;
5253
5254 ring = READ_ONCE(rings[i]);
5255 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5256 vsi_stats->tx_packets += pkts;
5257 vsi_stats->tx_bytes += bytes;
5258 vsi->tx_restart += ring->tx_stats.restart_q;
5259 vsi->tx_busy += ring->tx_stats.tx_busy;
5260 vsi->tx_linearize += ring->tx_stats.tx_linearize;
5261 }
5262}
5263
5264
5265
5266
5267
5268static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5269{
5270 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5271 struct ice_ring *ring;
5272 u64 pkts, bytes;
5273 int i;
5274
5275
5276 vsi_stats->tx_packets = 0;
5277 vsi_stats->tx_bytes = 0;
5278 vsi_stats->rx_packets = 0;
5279 vsi_stats->rx_bytes = 0;
5280
5281
5282 vsi->tx_restart = 0;
5283 vsi->tx_busy = 0;
5284 vsi->tx_linearize = 0;
5285 vsi->rx_buf_failed = 0;
5286 vsi->rx_page_failed = 0;
5287 vsi->rx_gro_dropped = 0;
5288
5289 rcu_read_lock();
5290
5291
5292 ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5293
5294
5295 ice_for_each_rxq(vsi, i) {
5296 ring = READ_ONCE(vsi->rx_rings[i]);
5297 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5298 vsi_stats->rx_packets += pkts;
5299 vsi_stats->rx_bytes += bytes;
5300 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5301 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5302 vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
5303 }
5304
5305
5306 if (ice_is_xdp_ena_vsi(vsi))
5307 ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5308 vsi->num_xdp_txq);
5309
5310 rcu_read_unlock();
5311}
5312
5313
5314
5315
5316
5317void ice_update_vsi_stats(struct ice_vsi *vsi)
5318{
5319 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5320 struct ice_eth_stats *cur_es = &vsi->eth_stats;
5321 struct ice_pf *pf = vsi->back;
5322
5323 if (test_bit(__ICE_DOWN, vsi->state) ||
5324 test_bit(__ICE_CFG_BUSY, pf->state))
5325 return;
5326
5327
5328 ice_update_vsi_ring_stats(vsi);
5329
5330
5331 ice_update_eth_stats(vsi);
5332
5333 cur_ns->tx_errors = cur_es->tx_errors;
5334 cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
5335 cur_ns->tx_dropped = cur_es->tx_discards;
5336 cur_ns->multicast = cur_es->rx_multicast;
5337
5338
5339 if (vsi->type == ICE_VSI_PF) {
5340 cur_ns->rx_crc_errors = pf->stats.crc_errors;
5341 cur_ns->rx_errors = pf->stats.crc_errors +
5342 pf->stats.illegal_bytes +
5343 pf->stats.rx_len_errors +
5344 pf->stats.rx_undersize +
5345 pf->hw_csum_rx_error +
5346 pf->stats.rx_jabber +
5347 pf->stats.rx_fragments +
5348 pf->stats.rx_oversize;
5349 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5350
5351 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5352 }
5353}
5354
5355
5356
5357
5358
5359void ice_update_pf_stats(struct ice_pf *pf)
5360{
5361 struct ice_hw_port_stats *prev_ps, *cur_ps;
5362 struct ice_hw *hw = &pf->hw;
5363 u16 fd_ctr_base;
5364 u8 port;
5365
5366 port = hw->port_info->lport;
5367 prev_ps = &pf->stats_prev;
5368 cur_ps = &pf->stats;
5369
5370 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5371 &prev_ps->eth.rx_bytes,
5372 &cur_ps->eth.rx_bytes);
5373
5374 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5375 &prev_ps->eth.rx_unicast,
5376 &cur_ps->eth.rx_unicast);
5377
5378 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5379 &prev_ps->eth.rx_multicast,
5380 &cur_ps->eth.rx_multicast);
5381
5382 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5383 &prev_ps->eth.rx_broadcast,
5384 &cur_ps->eth.rx_broadcast);
5385
5386 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5387 &prev_ps->eth.rx_discards,
5388 &cur_ps->eth.rx_discards);
5389
5390 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5391 &prev_ps->eth.tx_bytes,
5392 &cur_ps->eth.tx_bytes);
5393
5394 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5395 &prev_ps->eth.tx_unicast,
5396 &cur_ps->eth.tx_unicast);
5397
5398 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5399 &prev_ps->eth.tx_multicast,
5400 &cur_ps->eth.tx_multicast);
5401
5402 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5403 &prev_ps->eth.tx_broadcast,
5404 &cur_ps->eth.tx_broadcast);
5405
5406 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5407 &prev_ps->tx_dropped_link_down,
5408 &cur_ps->tx_dropped_link_down);
5409
5410 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5411 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5412
5413 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5414 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5415
5416 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5417 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5418
5419 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5420 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5421
5422 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5423 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5424
5425 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5426 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5427
5428 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5429 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5430
5431 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5432 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5433
5434 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5435 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5436
5437 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5438 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5439
5440 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5441 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5442
5443 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5444 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5445
5446 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5447 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5448
5449 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5450 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5451
5452 fd_ctr_base = hw->fd_ctr_base;
5453
5454 ice_stat_update40(hw,
5455 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5456 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5457 &cur_ps->fd_sb_match);
5458 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5459 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5460
5461 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5462 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5463
5464 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5465 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5466
5467 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5468 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5469
5470 ice_update_dcb_stats(pf);
5471
5472 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5473 &prev_ps->crc_errors, &cur_ps->crc_errors);
5474
5475 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5476 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5477
5478 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5479 &prev_ps->mac_local_faults,
5480 &cur_ps->mac_local_faults);
5481
5482 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5483 &prev_ps->mac_remote_faults,
5484 &cur_ps->mac_remote_faults);
5485
5486 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5487 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5488
5489 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5490 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5491
5492 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5493 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5494
5495 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5496 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5497
5498 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5499 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5500
5501 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5502
5503 pf->stat_prev_loaded = true;
5504}
5505
5506
5507
5508
5509
5510
5511static
5512void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5513{
5514 struct ice_netdev_priv *np = netdev_priv(netdev);
5515 struct rtnl_link_stats64 *vsi_stats;
5516 struct ice_vsi *vsi = np->vsi;
5517
5518 vsi_stats = &vsi->net_stats;
5519
5520 if (!vsi->num_txq || !vsi->num_rxq)
5521 return;
5522
5523
5524
5525
5526
5527
5528 if (!test_bit(__ICE_DOWN, vsi->state))
5529 ice_update_vsi_ring_stats(vsi);
5530 stats->tx_packets = vsi_stats->tx_packets;
5531 stats->tx_bytes = vsi_stats->tx_bytes;
5532 stats->rx_packets = vsi_stats->rx_packets;
5533 stats->rx_bytes = vsi_stats->rx_bytes;
5534
5535
5536
5537
5538
5539 stats->multicast = vsi_stats->multicast;
5540 stats->tx_errors = vsi_stats->tx_errors;
5541 stats->tx_dropped = vsi_stats->tx_dropped;
5542 stats->rx_errors = vsi_stats->rx_errors;
5543 stats->rx_dropped = vsi_stats->rx_dropped;
5544 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5545 stats->rx_length_errors = vsi_stats->rx_length_errors;
5546}
5547
5548
5549
5550
5551
5552static void ice_napi_disable_all(struct ice_vsi *vsi)
5553{
5554 int q_idx;
5555
5556 if (!vsi->netdev)
5557 return;
5558
5559 ice_for_each_q_vector(vsi, q_idx) {
5560 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5561
5562 if (q_vector->rx.ring || q_vector->tx.ring)
5563 napi_disable(&q_vector->napi);
5564 }
5565}
5566
5567
5568
5569
5570
5571int ice_down(struct ice_vsi *vsi)
5572{
5573 int i, tx_err, rx_err, link_err = 0;
5574
5575
5576
5577
5578 if (vsi->netdev) {
5579 netif_carrier_off(vsi->netdev);
5580 netif_tx_disable(vsi->netdev);
5581 }
5582
5583 ice_vsi_dis_irq(vsi);
5584
5585 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5586 if (tx_err)
5587 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5588 vsi->vsi_num, tx_err);
5589 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5590 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5591 if (tx_err)
5592 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5593 vsi->vsi_num, tx_err);
5594 }
5595
5596 rx_err = ice_vsi_stop_all_rx_rings(vsi);
5597 if (rx_err)
5598 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5599 vsi->vsi_num, rx_err);
5600
5601 ice_napi_disable_all(vsi);
5602
5603 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5604 link_err = ice_force_phys_link_state(vsi, false);
5605 if (link_err)
5606 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5607 vsi->vsi_num, link_err);
5608 }
5609
5610 ice_for_each_txq(vsi, i)
5611 ice_clean_tx_ring(vsi->tx_rings[i]);
5612
5613 ice_for_each_rxq(vsi, i)
5614 ice_clean_rx_ring(vsi->rx_rings[i]);
5615
5616 if (tx_err || rx_err || link_err) {
5617 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5618 vsi->vsi_num, vsi->vsw->sw_id);
5619 return -EIO;
5620 }
5621
5622 return 0;
5623}
5624
5625
5626
5627
5628
5629
5630
5631int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5632{
5633 int i, err = 0;
5634
5635 if (!vsi->num_txq) {
5636 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
5637 vsi->vsi_num);
5638 return -EINVAL;
5639 }
5640
5641 ice_for_each_txq(vsi, i) {
5642 struct ice_ring *ring = vsi->tx_rings[i];
5643
5644 if (!ring)
5645 return -EINVAL;
5646
5647 ring->netdev = vsi->netdev;
5648 err = ice_setup_tx_ring(ring);
5649 if (err)
5650 break;
5651 }
5652
5653 return err;
5654}
5655
5656
5657
5658
5659
5660
5661
5662int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5663{
5664 int i, err = 0;
5665
5666 if (!vsi->num_rxq) {
5667 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
5668 vsi->vsi_num);
5669 return -EINVAL;
5670 }
5671
5672 ice_for_each_rxq(vsi, i) {
5673 struct ice_ring *ring = vsi->rx_rings[i];
5674
5675 if (!ring)
5676 return -EINVAL;
5677
5678 ring->netdev = vsi->netdev;
5679 err = ice_setup_rx_ring(ring);
5680 if (err)
5681 break;
5682 }
5683
5684 return err;
5685}
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695int ice_vsi_open_ctrl(struct ice_vsi *vsi)
5696{
5697 char int_name[ICE_INT_NAME_STR_LEN];
5698 struct ice_pf *pf = vsi->back;
5699 struct device *dev;
5700 int err;
5701
5702 dev = ice_pf_to_dev(pf);
5703
5704 err = ice_vsi_setup_tx_rings(vsi);
5705 if (err)
5706 goto err_setup_tx;
5707
5708 err = ice_vsi_setup_rx_rings(vsi);
5709 if (err)
5710 goto err_setup_rx;
5711
5712 err = ice_vsi_cfg(vsi);
5713 if (err)
5714 goto err_setup_rx;
5715
5716 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
5717 dev_driver_string(dev), dev_name(dev));
5718 err = ice_vsi_req_irq_msix(vsi, int_name);
5719 if (err)
5720 goto err_setup_rx;
5721
5722 ice_vsi_cfg_msix(vsi);
5723
5724 err = ice_vsi_start_all_rx_rings(vsi);
5725 if (err)
5726 goto err_up_complete;
5727
5728 clear_bit(__ICE_DOWN, vsi->state);
5729 ice_vsi_ena_irq(vsi);
5730
5731 return 0;
5732
5733err_up_complete:
5734 ice_down(vsi);
5735err_setup_rx:
5736 ice_vsi_free_rx_rings(vsi);
5737err_setup_tx:
5738 ice_vsi_free_tx_rings(vsi);
5739
5740 return err;
5741}
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751static int ice_vsi_open(struct ice_vsi *vsi)
5752{
5753 char int_name[ICE_INT_NAME_STR_LEN];
5754 struct ice_pf *pf = vsi->back;
5755 int err;
5756
5757
5758 err = ice_vsi_setup_tx_rings(vsi);
5759 if (err)
5760 goto err_setup_tx;
5761
5762 err = ice_vsi_setup_rx_rings(vsi);
5763 if (err)
5764 goto err_setup_rx;
5765
5766 err = ice_vsi_cfg(vsi);
5767 if (err)
5768 goto err_setup_rx;
5769
5770 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5771 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
5772 err = ice_vsi_req_irq_msix(vsi, int_name);
5773 if (err)
5774 goto err_setup_rx;
5775
5776
5777 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5778 if (err)
5779 goto err_set_qs;
5780
5781 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5782 if (err)
5783 goto err_set_qs;
5784
5785 err = ice_up_complete(vsi);
5786 if (err)
5787 goto err_up_complete;
5788
5789 return 0;
5790
5791err_up_complete:
5792 ice_down(vsi);
5793err_set_qs:
5794 ice_vsi_free_irq(vsi);
5795err_setup_rx:
5796 ice_vsi_free_rx_rings(vsi);
5797err_setup_tx:
5798 ice_vsi_free_tx_rings(vsi);
5799
5800 return err;
5801}
5802
5803
5804
5805
5806
5807static void ice_vsi_release_all(struct ice_pf *pf)
5808{
5809 int err, i;
5810
5811 if (!pf->vsi)
5812 return;
5813
5814 ice_for_each_vsi(pf, i) {
5815 if (!pf->vsi[i])
5816 continue;
5817
5818 err = ice_vsi_release(pf->vsi[i]);
5819 if (err)
5820 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
5821 i, err, pf->vsi[i]->vsi_num);
5822 }
5823}
5824
5825
5826
5827
5828
5829
5830
5831
5832static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
5833{
5834 struct device *dev = ice_pf_to_dev(pf);
5835 enum ice_status status;
5836 int i, err;
5837
5838 ice_for_each_vsi(pf, i) {
5839 struct ice_vsi *vsi = pf->vsi[i];
5840
5841 if (!vsi || vsi->type != type)
5842 continue;
5843
5844
5845 err = ice_vsi_rebuild(vsi, true);
5846 if (err) {
5847 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
5848 err, vsi->idx, ice_vsi_type_str(type));
5849 return err;
5850 }
5851
5852
5853 status = ice_replay_vsi(&pf->hw, vsi->idx);
5854 if (status) {
5855 dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
5856 ice_stat_str(status), vsi->idx,
5857 ice_vsi_type_str(type));
5858 return -EIO;
5859 }
5860
5861
5862
5863
5864 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
5865
5866
5867 err = ice_ena_vsi(vsi, false);
5868 if (err) {
5869 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
5870 err, vsi->idx, ice_vsi_type_str(type));
5871 return err;
5872 }
5873
5874 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
5875 ice_vsi_type_str(type));
5876 }
5877
5878 return 0;
5879}
5880
5881
5882
5883
5884
5885static void ice_update_pf_netdev_link(struct ice_pf *pf)
5886{
5887 bool link_up;
5888 int i;
5889
5890 ice_for_each_vsi(pf, i) {
5891 struct ice_vsi *vsi = pf->vsi[i];
5892
5893 if (!vsi || vsi->type != ICE_VSI_PF)
5894 return;
5895
5896 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
5897 if (link_up) {
5898 netif_carrier_on(pf->vsi[i]->netdev);
5899 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
5900 } else {
5901 netif_carrier_off(pf->vsi[i]->netdev);
5902 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
5903 }
5904 }
5905}
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
5918{
5919 struct device *dev = ice_pf_to_dev(pf);
5920 struct ice_hw *hw = &pf->hw;
5921 enum ice_status ret;
5922 int err;
5923
5924 if (test_bit(__ICE_DOWN, pf->state))
5925 goto clear_recovery;
5926
5927 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
5928
5929 ret = ice_init_all_ctrlq(hw);
5930 if (ret) {
5931 dev_err(dev, "control queues init failed %s\n",
5932 ice_stat_str(ret));
5933 goto err_init_ctrlq;
5934 }
5935
5936
5937 if (!ice_is_safe_mode(pf)) {
5938
5939 if (reset_type == ICE_RESET_PFR)
5940 ice_fill_blk_tbls(hw);
5941 else
5942
5943 ice_load_pkg(NULL, pf);
5944 }
5945
5946 ret = ice_clear_pf_cfg(hw);
5947 if (ret) {
5948 dev_err(dev, "clear PF configuration failed %s\n",
5949 ice_stat_str(ret));
5950 goto err_init_ctrlq;
5951 }
5952
5953 if (pf->first_sw->dflt_vsi_ena)
5954 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
5955
5956 pf->first_sw->dflt_vsi = NULL;
5957 pf->first_sw->dflt_vsi_ena = false;
5958
5959 ice_clear_pxe_mode(hw);
5960
5961 ret = ice_get_caps(hw);
5962 if (ret) {
5963 dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
5964 goto err_init_ctrlq;
5965 }
5966
5967 ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
5968 if (ret) {
5969 dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
5970 goto err_init_ctrlq;
5971 }
5972
5973 err = ice_sched_init_port(hw->port_info);
5974 if (err)
5975 goto err_sched_init_port;
5976
5977
5978 err = ice_req_irq_msix_misc(pf);
5979 if (err) {
5980 dev_err(dev, "misc vector setup failed: %d\n", err);
5981 goto err_sched_init_port;
5982 }
5983
5984 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
5985 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
5986 if (!rd32(hw, PFQF_FD_SIZE)) {
5987 u16 unused, guar, b_effort;
5988
5989 guar = hw->func_caps.fd_fltr_guar;
5990 b_effort = hw->func_caps.fd_fltr_best_effort;
5991
5992
5993 ice_alloc_fd_guar_item(hw, &unused, guar);
5994
5995 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
5996 }
5997 }
5998
5999 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6000 ice_dcb_rebuild(pf);
6001
6002
6003 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6004 if (err) {
6005 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6006 goto err_vsi_rebuild;
6007 }
6008
6009
6010 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6011 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6012 if (err) {
6013 dev_err(dev, "control VSI rebuild failed: %d\n", err);
6014 goto err_vsi_rebuild;
6015 }
6016
6017
6018 if (hw->fdir_prof)
6019 ice_fdir_replay_flows(hw);
6020
6021
6022 ice_fdir_replay_fltrs(pf);
6023
6024 ice_rebuild_arfs(pf);
6025 }
6026
6027 ice_update_pf_netdev_link(pf);
6028
6029
6030 ret = ice_send_version(pf);
6031 if (ret) {
6032 dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6033 ice_stat_str(ret));
6034 goto err_vsi_rebuild;
6035 }
6036
6037 ice_replay_post(hw);
6038
6039
6040 clear_bit(__ICE_RESET_FAILED, pf->state);
6041 return;
6042
6043err_vsi_rebuild:
6044err_sched_init_port:
6045 ice_sched_cleanup_all(hw);
6046err_init_ctrlq:
6047 ice_shutdown_all_ctrlq(hw);
6048 set_bit(__ICE_RESET_FAILED, pf->state);
6049clear_recovery:
6050
6051 set_bit(__ICE_NEEDS_RESTART, pf->state);
6052 dev_err(dev, "Rebuild failed, unload and reload driver\n");
6053}
6054
6055
6056
6057
6058
6059static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6060{
6061 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6062 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6063 else
6064 return ICE_RXBUF_3072;
6065}
6066
6067
6068
6069
6070
6071
6072
6073
6074static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6075{
6076 struct ice_netdev_priv *np = netdev_priv(netdev);
6077 struct ice_vsi *vsi = np->vsi;
6078 struct ice_pf *pf = vsi->back;
6079 u8 count = 0;
6080
6081 if (new_mtu == (int)netdev->mtu) {
6082 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6083 return 0;
6084 }
6085
6086 if (ice_is_xdp_ena_vsi(vsi)) {
6087 int frame_size = ice_max_xdp_frame_size(vsi);
6088
6089 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6090 netdev_err(netdev, "max MTU for XDP usage is %d\n",
6091 frame_size - ICE_ETH_PKT_HDR_PAD);
6092 return -EINVAL;
6093 }
6094 }
6095
6096 if (new_mtu < (int)netdev->min_mtu) {
6097 netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
6098 netdev->min_mtu);
6099 return -EINVAL;
6100 } else if (new_mtu > (int)netdev->max_mtu) {
6101 netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
6102 netdev->min_mtu);
6103 return -EINVAL;
6104 }
6105
6106 do {
6107 if (ice_is_reset_in_progress(pf->state)) {
6108 count++;
6109 usleep_range(1000, 2000);
6110 } else {
6111 break;
6112 }
6113
6114 } while (count < 100);
6115
6116 if (count == 100) {
6117 netdev_err(netdev, "can't change MTU. Device is busy\n");
6118 return -EBUSY;
6119 }
6120
6121 netdev->mtu = (unsigned int)new_mtu;
6122
6123
6124 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
6125 int err;
6126
6127 err = ice_down(vsi);
6128 if (err) {
6129 netdev_err(netdev, "change MTU if_up err %d\n", err);
6130 return err;
6131 }
6132
6133 err = ice_up(vsi);
6134 if (err) {
6135 netdev_err(netdev, "change MTU if_up err %d\n", err);
6136 return err;
6137 }
6138 }
6139
6140 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6141 return 0;
6142}
6143
6144
6145
6146
6147
6148const char *ice_aq_str(enum ice_aq_err aq_err)
6149{
6150 switch (aq_err) {
6151 case ICE_AQ_RC_OK:
6152 return "OK";
6153 case ICE_AQ_RC_EPERM:
6154 return "ICE_AQ_RC_EPERM";
6155 case ICE_AQ_RC_ENOENT:
6156 return "ICE_AQ_RC_ENOENT";
6157 case ICE_AQ_RC_ENOMEM:
6158 return "ICE_AQ_RC_ENOMEM";
6159 case ICE_AQ_RC_EBUSY:
6160 return "ICE_AQ_RC_EBUSY";
6161 case ICE_AQ_RC_EEXIST:
6162 return "ICE_AQ_RC_EEXIST";
6163 case ICE_AQ_RC_EINVAL:
6164 return "ICE_AQ_RC_EINVAL";
6165 case ICE_AQ_RC_ENOSPC:
6166 return "ICE_AQ_RC_ENOSPC";
6167 case ICE_AQ_RC_ENOSYS:
6168 return "ICE_AQ_RC_ENOSYS";
6169 case ICE_AQ_RC_EMODE:
6170 return "ICE_AQ_RC_EMODE";
6171 case ICE_AQ_RC_ENOSEC:
6172 return "ICE_AQ_RC_ENOSEC";
6173 case ICE_AQ_RC_EBADSIG:
6174 return "ICE_AQ_RC_EBADSIG";
6175 case ICE_AQ_RC_ESVN:
6176 return "ICE_AQ_RC_ESVN";
6177 case ICE_AQ_RC_EBADMAN:
6178 return "ICE_AQ_RC_EBADMAN";
6179 case ICE_AQ_RC_EBADBUF:
6180 return "ICE_AQ_RC_EBADBUF";
6181 }
6182
6183 return "ICE_AQ_RC_UNKNOWN";
6184}
6185
6186
6187
6188
6189
6190const char *ice_stat_str(enum ice_status stat_err)
6191{
6192 switch (stat_err) {
6193 case ICE_SUCCESS:
6194 return "OK";
6195 case ICE_ERR_PARAM:
6196 return "ICE_ERR_PARAM";
6197 case ICE_ERR_NOT_IMPL:
6198 return "ICE_ERR_NOT_IMPL";
6199 case ICE_ERR_NOT_READY:
6200 return "ICE_ERR_NOT_READY";
6201 case ICE_ERR_NOT_SUPPORTED:
6202 return "ICE_ERR_NOT_SUPPORTED";
6203 case ICE_ERR_BAD_PTR:
6204 return "ICE_ERR_BAD_PTR";
6205 case ICE_ERR_INVAL_SIZE:
6206 return "ICE_ERR_INVAL_SIZE";
6207 case ICE_ERR_DEVICE_NOT_SUPPORTED:
6208 return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6209 case ICE_ERR_RESET_FAILED:
6210 return "ICE_ERR_RESET_FAILED";
6211 case ICE_ERR_FW_API_VER:
6212 return "ICE_ERR_FW_API_VER";
6213 case ICE_ERR_NO_MEMORY:
6214 return "ICE_ERR_NO_MEMORY";
6215 case ICE_ERR_CFG:
6216 return "ICE_ERR_CFG";
6217 case ICE_ERR_OUT_OF_RANGE:
6218 return "ICE_ERR_OUT_OF_RANGE";
6219 case ICE_ERR_ALREADY_EXISTS:
6220 return "ICE_ERR_ALREADY_EXISTS";
6221 case ICE_ERR_NVM_CHECKSUM:
6222 return "ICE_ERR_NVM_CHECKSUM";
6223 case ICE_ERR_BUF_TOO_SHORT:
6224 return "ICE_ERR_BUF_TOO_SHORT";
6225 case ICE_ERR_NVM_BLANK_MODE:
6226 return "ICE_ERR_NVM_BLANK_MODE";
6227 case ICE_ERR_IN_USE:
6228 return "ICE_ERR_IN_USE";
6229 case ICE_ERR_MAX_LIMIT:
6230 return "ICE_ERR_MAX_LIMIT";
6231 case ICE_ERR_RESET_ONGOING:
6232 return "ICE_ERR_RESET_ONGOING";
6233 case ICE_ERR_HW_TABLE:
6234 return "ICE_ERR_HW_TABLE";
6235 case ICE_ERR_DOES_NOT_EXIST:
6236 return "ICE_ERR_DOES_NOT_EXIST";
6237 case ICE_ERR_FW_DDP_MISMATCH:
6238 return "ICE_ERR_FW_DDP_MISMATCH";
6239 case ICE_ERR_AQ_ERROR:
6240 return "ICE_ERR_AQ_ERROR";
6241 case ICE_ERR_AQ_TIMEOUT:
6242 return "ICE_ERR_AQ_TIMEOUT";
6243 case ICE_ERR_AQ_FULL:
6244 return "ICE_ERR_AQ_FULL";
6245 case ICE_ERR_AQ_NO_WORK:
6246 return "ICE_ERR_AQ_NO_WORK";
6247 case ICE_ERR_AQ_EMPTY:
6248 return "ICE_ERR_AQ_EMPTY";
6249 case ICE_ERR_AQ_FW_CRITICAL:
6250 return "ICE_ERR_AQ_FW_CRITICAL";
6251 }
6252
6253 return "ICE_ERR_UNKNOWN";
6254}
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6266{
6267 struct ice_pf *pf = vsi->back;
6268 struct ice_hw *hw = &pf->hw;
6269 enum ice_status status;
6270 struct device *dev;
6271
6272 dev = ice_pf_to_dev(pf);
6273 if (seed) {
6274 struct ice_aqc_get_set_rss_keys *buf =
6275 (struct ice_aqc_get_set_rss_keys *)seed;
6276
6277 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
6278
6279 if (status) {
6280 dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
6281 ice_stat_str(status),
6282 ice_aq_str(hw->adminq.sq_last_status));
6283 return -EIO;
6284 }
6285 }
6286
6287 if (lut) {
6288 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6289 lut, lut_size);
6290 if (status) {
6291 dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
6292 ice_stat_str(status),
6293 ice_aq_str(hw->adminq.sq_last_status));
6294 return -EIO;
6295 }
6296 }
6297
6298 return 0;
6299}
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6311{
6312 struct ice_pf *pf = vsi->back;
6313 struct ice_hw *hw = &pf->hw;
6314 enum ice_status status;
6315 struct device *dev;
6316
6317 dev = ice_pf_to_dev(pf);
6318 if (seed) {
6319 struct ice_aqc_get_set_rss_keys *buf =
6320 (struct ice_aqc_get_set_rss_keys *)seed;
6321
6322 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
6323 if (status) {
6324 dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
6325 ice_stat_str(status),
6326 ice_aq_str(hw->adminq.sq_last_status));
6327 return -EIO;
6328 }
6329 }
6330
6331 if (lut) {
6332 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6333 lut, lut_size);
6334 if (status) {
6335 dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
6336 ice_stat_str(status),
6337 ice_aq_str(hw->adminq.sq_last_status));
6338 return -EIO;
6339 }
6340 }
6341
6342 return 0;
6343}
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356static int
6357ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6358 struct net_device *dev, u32 filter_mask, int nlflags)
6359{
6360 struct ice_netdev_priv *np = netdev_priv(dev);
6361 struct ice_vsi *vsi = np->vsi;
6362 struct ice_pf *pf = vsi->back;
6363 u16 bmode;
6364
6365 bmode = pf->first_sw->bridge_mode;
6366
6367 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6368 filter_mask, NULL);
6369}
6370
6371
6372
6373
6374
6375
6376
6377
6378static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6379{
6380 struct ice_aqc_vsi_props *vsi_props;
6381 struct ice_hw *hw = &vsi->back->hw;
6382 struct ice_vsi_ctx *ctxt;
6383 enum ice_status status;
6384 int ret = 0;
6385
6386 vsi_props = &vsi->info;
6387
6388 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6389 if (!ctxt)
6390 return -ENOMEM;
6391
6392 ctxt->info = vsi->info;
6393
6394 if (bmode == BRIDGE_MODE_VEB)
6395
6396 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6397 else
6398
6399 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6400 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6401
6402 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6403 if (status) {
6404 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6405 bmode, ice_stat_str(status),
6406 ice_aq_str(hw->adminq.sq_last_status));
6407 ret = -EIO;
6408 goto out;
6409 }
6410
6411 vsi_props->sw_flags = ctxt->info.sw_flags;
6412
6413out:
6414 kfree(ctxt);
6415 return ret;
6416}
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430static int
6431ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6432 u16 __always_unused flags,
6433 struct netlink_ext_ack __always_unused *extack)
6434{
6435 struct ice_netdev_priv *np = netdev_priv(dev);
6436 struct ice_pf *pf = np->vsi->back;
6437 struct nlattr *attr, *br_spec;
6438 struct ice_hw *hw = &pf->hw;
6439 enum ice_status status;
6440 struct ice_sw *pf_sw;
6441 int rem, v, err = 0;
6442
6443 pf_sw = pf->first_sw;
6444
6445 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6446
6447 nla_for_each_nested(attr, br_spec, rem) {
6448 __u16 mode;
6449
6450 if (nla_type(attr) != IFLA_BRIDGE_MODE)
6451 continue;
6452 mode = nla_get_u16(attr);
6453 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6454 return -EINVAL;
6455
6456 if (mode == pf_sw->bridge_mode)
6457 continue;
6458
6459
6460
6461 ice_for_each_vsi(pf, v) {
6462 if (!pf->vsi[v])
6463 continue;
6464 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6465 if (err)
6466 return err;
6467 }
6468
6469 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6470
6471
6472
6473 status = ice_update_sw_rule_bridge_mode(hw);
6474 if (status) {
6475 netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6476 mode, ice_stat_str(status),
6477 ice_aq_str(hw->adminq.sq_last_status));
6478
6479 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6480 return -EIO;
6481 }
6482
6483 pf_sw->bridge_mode = mode;
6484 }
6485
6486 return 0;
6487}
6488
6489
6490
6491
6492
6493
6494static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6495{
6496 struct ice_netdev_priv *np = netdev_priv(netdev);
6497 struct ice_ring *tx_ring = NULL;
6498 struct ice_vsi *vsi = np->vsi;
6499 struct ice_pf *pf = vsi->back;
6500 u32 i;
6501
6502 pf->tx_timeout_count++;
6503
6504
6505
6506
6507
6508 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6509 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6510 txqueue);
6511 return;
6512 }
6513
6514
6515 for (i = 0; i < vsi->num_txq; i++)
6516 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6517 if (txqueue == vsi->tx_rings[i]->q_index) {
6518 tx_ring = vsi->tx_rings[i];
6519 break;
6520 }
6521
6522
6523
6524
6525 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6526 pf->tx_timeout_recovery_level = 1;
6527 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6528 netdev->watchdog_timeo)))
6529 return;
6530
6531 if (tx_ring) {
6532 struct ice_hw *hw = &pf->hw;
6533 u32 head, val = 0;
6534
6535 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6536 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6537
6538 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6539
6540 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6541 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6542 head, tx_ring->next_to_use, val);
6543 }
6544
6545 pf->tx_timeout_last_recovery = jiffies;
6546 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6547 pf->tx_timeout_recovery_level, txqueue);
6548
6549 switch (pf->tx_timeout_recovery_level) {
6550 case 1:
6551 set_bit(__ICE_PFR_REQ, pf->state);
6552 break;
6553 case 2:
6554 set_bit(__ICE_CORER_REQ, pf->state);
6555 break;
6556 case 3:
6557 set_bit(__ICE_GLOBR_REQ, pf->state);
6558 break;
6559 default:
6560 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
6561 set_bit(__ICE_DOWN, pf->state);
6562 set_bit(__ICE_NEEDS_RESTART, vsi->state);
6563 set_bit(__ICE_SERVICE_DIS, pf->state);
6564 break;
6565 }
6566
6567 ice_service_task_schedule(pf);
6568 pf->tx_timeout_recovery_level++;
6569}
6570
6571
6572
6573
6574
6575
6576static void
6577ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
6578{
6579 struct ice_netdev_priv *np = netdev_priv(netdev);
6580 struct ice_vsi *vsi = np->vsi;
6581 struct ice_pf *pf = vsi->back;
6582 enum ice_tunnel_type tnl_type;
6583 u16 port = ntohs(ti->port);
6584 enum ice_status status;
6585
6586 switch (ti->type) {
6587 case UDP_TUNNEL_TYPE_VXLAN:
6588 tnl_type = TNL_VXLAN;
6589 break;
6590 case UDP_TUNNEL_TYPE_GENEVE:
6591 tnl_type = TNL_GENEVE;
6592 break;
6593 default:
6594 netdev_err(netdev, "Unknown tunnel type\n");
6595 return;
6596 }
6597
6598 status = ice_create_tunnel(&pf->hw, tnl_type, port);
6599 if (status == ICE_ERR_OUT_OF_RANGE)
6600 netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
6601 port);
6602 else if (status)
6603 netdev_err(netdev, "Error adding UDP tunnel - %s\n",
6604 ice_stat_str(status));
6605}
6606
6607
6608
6609
6610
6611
6612static void
6613ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
6614{
6615 struct ice_netdev_priv *np = netdev_priv(netdev);
6616 struct ice_vsi *vsi = np->vsi;
6617 struct ice_pf *pf = vsi->back;
6618 u16 port = ntohs(ti->port);
6619 enum ice_status status;
6620 bool retval;
6621
6622 retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
6623 if (!retval) {
6624 netdev_info(netdev, "port %d not found in UDP tunnels list\n",
6625 port);
6626 return;
6627 }
6628
6629 status = ice_destroy_tunnel(&pf->hw, port, false);
6630 if (status)
6631 netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
6632 port);
6633}
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647int ice_open(struct net_device *netdev)
6648{
6649 struct ice_netdev_priv *np = netdev_priv(netdev);
6650 struct ice_vsi *vsi = np->vsi;
6651 struct ice_pf *pf = vsi->back;
6652 struct ice_port_info *pi;
6653 int err;
6654
6655 if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
6656 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6657 return -EIO;
6658 }
6659
6660 if (test_bit(__ICE_DOWN, pf->state)) {
6661 netdev_err(netdev, "device is not ready yet\n");
6662 return -EBUSY;
6663 }
6664
6665 netif_carrier_off(netdev);
6666
6667 pi = vsi->port_info;
6668 err = ice_update_link_info(pi);
6669 if (err) {
6670 netdev_err(netdev, "Failed to get link info, error %d\n",
6671 err);
6672 return err;
6673 }
6674
6675
6676 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
6677 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6678 if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
6679 err = ice_init_phy_user_cfg(pi);
6680 if (err) {
6681 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
6682 err);
6683 return err;
6684 }
6685 }
6686
6687 err = ice_configure_phy(vsi);
6688 if (err) {
6689 netdev_err(netdev, "Failed to set physical link up, error %d\n",
6690 err);
6691 return err;
6692 }
6693 } else {
6694 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6695 err = ice_aq_set_link_restart_an(pi, false, NULL);
6696 if (err) {
6697 netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
6698 vsi->vsi_num, err);
6699 return err;
6700 }
6701 }
6702
6703 err = ice_vsi_open(vsi);
6704 if (err)
6705 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6706 vsi->vsi_num, vsi->vsw->sw_id);
6707
6708
6709 udp_tunnel_get_rx_info(netdev);
6710
6711 return err;
6712}
6713
6714
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724int ice_stop(struct net_device *netdev)
6725{
6726 struct ice_netdev_priv *np = netdev_priv(netdev);
6727 struct ice_vsi *vsi = np->vsi;
6728
6729 ice_vsi_close(vsi);
6730
6731 return 0;
6732}
6733
6734
6735
6736
6737
6738
6739
6740static netdev_features_t
6741ice_features_check(struct sk_buff *skb,
6742 struct net_device __always_unused *netdev,
6743 netdev_features_t features)
6744{
6745 size_t len;
6746
6747
6748
6749
6750
6751 if (skb->ip_summed != CHECKSUM_PARTIAL)
6752 return features;
6753
6754
6755
6756
6757 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
6758 features &= ~NETIF_F_GSO_MASK;
6759
6760 len = skb_network_header(skb) - skb->data;
6761 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
6762 goto out_rm_features;
6763
6764 len = skb_transport_header(skb) - skb_network_header(skb);
6765 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6766 goto out_rm_features;
6767
6768 if (skb->encapsulation) {
6769 len = skb_inner_network_header(skb) - skb_transport_header(skb);
6770 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
6771 goto out_rm_features;
6772
6773 len = skb_inner_transport_header(skb) -
6774 skb_inner_network_header(skb);
6775 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6776 goto out_rm_features;
6777 }
6778
6779 return features;
6780out_rm_features:
6781 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6782}
6783
6784static const struct net_device_ops ice_netdev_safe_mode_ops = {
6785 .ndo_open = ice_open,
6786 .ndo_stop = ice_stop,
6787 .ndo_start_xmit = ice_start_xmit,
6788 .ndo_set_mac_address = ice_set_mac_address,
6789 .ndo_validate_addr = eth_validate_addr,
6790 .ndo_change_mtu = ice_change_mtu,
6791 .ndo_get_stats64 = ice_get_stats64,
6792 .ndo_tx_timeout = ice_tx_timeout,
6793};
6794
6795static const struct net_device_ops ice_netdev_ops = {
6796 .ndo_open = ice_open,
6797 .ndo_stop = ice_stop,
6798 .ndo_start_xmit = ice_start_xmit,
6799 .ndo_features_check = ice_features_check,
6800 .ndo_set_rx_mode = ice_set_rx_mode,
6801 .ndo_set_mac_address = ice_set_mac_address,
6802 .ndo_validate_addr = eth_validate_addr,
6803 .ndo_change_mtu = ice_change_mtu,
6804 .ndo_get_stats64 = ice_get_stats64,
6805 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
6806 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
6807 .ndo_set_vf_mac = ice_set_vf_mac,
6808 .ndo_get_vf_config = ice_get_vf_cfg,
6809 .ndo_set_vf_trust = ice_set_vf_trust,
6810 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
6811 .ndo_set_vf_link_state = ice_set_vf_link_state,
6812 .ndo_get_vf_stats = ice_get_vf_stats,
6813 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6814 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6815 .ndo_set_features = ice_set_features,
6816 .ndo_bridge_getlink = ice_bridge_getlink,
6817 .ndo_bridge_setlink = ice_bridge_setlink,
6818 .ndo_fdb_add = ice_fdb_add,
6819 .ndo_fdb_del = ice_fdb_del,
6820#ifdef CONFIG_RFS_ACCEL
6821 .ndo_rx_flow_steer = ice_rx_flow_steer,
6822#endif
6823 .ndo_tx_timeout = ice_tx_timeout,
6824 .ndo_bpf = ice_xdp,
6825 .ndo_xdp_xmit = ice_xdp_xmit,
6826 .ndo_xsk_wakeup = ice_xsk_wakeup,
6827 .ndo_udp_tunnel_add = ice_udp_tunnel_add,
6828 .ndo_udp_tunnel_del = ice_udp_tunnel_del,
6829};
6830