1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include "ice.h"
9#include "ice_lib.h"
10#include "ice_dcb_lib.h"
11
12#define DRV_VERSION "0.7.4-k"
13#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
14const char ice_drv_ver[] = DRV_VERSION;
15static const char ice_driver_string[] = DRV_SUMMARY;
16static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
17
18MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
19MODULE_DESCRIPTION(DRV_SUMMARY);
20MODULE_LICENSE("GPL v2");
21MODULE_VERSION(DRV_VERSION);
22
23static int debug = -1;
24module_param(debug, int, 0644);
25#ifndef CONFIG_DYNAMIC_DEBUG
26MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
27#else
28MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
29#endif
30
31static struct workqueue_struct *ice_wq;
32static const struct net_device_ops ice_netdev_ops;
33
34static void ice_rebuild(struct ice_pf *pf);
35
36static void ice_vsi_release_all(struct ice_pf *pf);
37static void ice_update_vsi_stats(struct ice_vsi *vsi);
38static void ice_update_pf_stats(struct ice_pf *pf);
39
40
41
42
43
44static u32 ice_get_tx_pending(struct ice_ring *ring)
45{
46 u32 head, tail;
47
48 head = ring->next_to_clean;
49 tail = readl(ring->tail);
50
51 if (head != tail)
52 return (head < tail) ?
53 tail - head : (tail + ring->count - head);
54 return 0;
55}
56
57
58
59
60
61static void ice_check_for_hang_subtask(struct ice_pf *pf)
62{
63 struct ice_vsi *vsi = NULL;
64 struct ice_hw *hw;
65 unsigned int i;
66 int packets;
67 u32 v;
68
69 ice_for_each_vsi(pf, v)
70 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
71 vsi = pf->vsi[v];
72 break;
73 }
74
75 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
76 return;
77
78 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
79 return;
80
81 hw = &vsi->back->hw;
82
83 for (i = 0; i < vsi->num_txq; i++) {
84 struct ice_ring *tx_ring = vsi->tx_rings[i];
85
86 if (tx_ring && tx_ring->desc) {
87
88
89
90
91
92
93
94 packets = tx_ring->stats.pkts & INT_MAX;
95 if (tx_ring->tx_stats.prev_pkt == packets) {
96
97 ice_trigger_sw_intr(hw, tx_ring->q_vector);
98 continue;
99 }
100
101
102
103
104 smp_rmb();
105 tx_ring->tx_stats.prev_pkt =
106 ice_get_tx_pending(tx_ring) ? packets : -1;
107 }
108 }
109}
110
111
112
113
114
115
116
117
118
119static int ice_init_mac_fltr(struct ice_pf *pf)
120{
121 LIST_HEAD(tmp_add_list);
122 u8 broadcast[ETH_ALEN];
123 struct ice_vsi *vsi;
124 int status;
125
126 vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
127 if (!vsi)
128 return -EINVAL;
129
130
131
132
133
134
135 status = ice_add_mac_to_list(vsi, &tmp_add_list,
136 vsi->port_info->mac.perm_addr);
137 if (status)
138 goto unregister;
139
140
141
142
143 eth_broadcast_addr(broadcast);
144 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
145 if (status)
146 goto free_mac_list;
147
148
149 status = ice_add_mac(&pf->hw, &tmp_add_list);
150 if (status)
151 status = -ENOMEM;
152
153free_mac_list:
154 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
155
156unregister:
157
158
159
160 if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
161 dev_err(&pf->pdev->dev,
162 "Could not add MAC filters error %d. Unregistering device\n",
163 status);
164 unregister_netdev(vsi->netdev);
165 free_netdev(vsi->netdev);
166 vsi->netdev = NULL;
167 }
168
169 return status;
170}
171
172
173
174
175
176
177
178
179
180
181
182static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
183{
184 struct ice_netdev_priv *np = netdev_priv(netdev);
185 struct ice_vsi *vsi = np->vsi;
186
187 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
188 return -EINVAL;
189
190 return 0;
191}
192
193
194
195
196
197
198
199
200
201
202
203static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
204{
205 struct ice_netdev_priv *np = netdev_priv(netdev);
206 struct ice_vsi *vsi = np->vsi;
207
208 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
209 return -EINVAL;
210
211 return 0;
212}
213
214
215
216
217
218
219
220static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
221{
222 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
223 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
224 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
225}
226
227
228
229
230
231
232
233
234static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
235{
236 struct ice_hw *hw = &vsi->back->hw;
237 enum ice_status status = 0;
238
239 if (vsi->type != ICE_VSI_PF)
240 return 0;
241
242 if (vsi->vlan_ena) {
243 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
244 set_promisc);
245 } else {
246 if (set_promisc)
247 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
248 0);
249 else
250 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
251 0);
252 }
253
254 if (status)
255 return -EIO;
256
257 return 0;
258}
259
260
261
262
263
264
265
266static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
267{
268 struct device *dev = &vsi->back->pdev->dev;
269 struct net_device *netdev = vsi->netdev;
270 bool promisc_forced_on = false;
271 struct ice_pf *pf = vsi->back;
272 struct ice_hw *hw = &pf->hw;
273 enum ice_status status = 0;
274 u32 changed_flags = 0;
275 u8 promisc_m;
276 int err = 0;
277
278 if (!vsi->netdev)
279 return -EINVAL;
280
281 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
282 usleep_range(1000, 2000);
283
284 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
285 vsi->current_netdev_flags = vsi->netdev->flags;
286
287 INIT_LIST_HEAD(&vsi->tmp_sync_list);
288 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
289
290 if (ice_vsi_fltr_changed(vsi)) {
291 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
292 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
293 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
294
295
296 netif_addr_lock_bh(netdev);
297 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
298 ice_add_mac_to_unsync_list);
299 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
300 ice_add_mac_to_unsync_list);
301
302 netif_addr_unlock_bh(netdev);
303 }
304
305
306 status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
307 ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
308 if (status) {
309 netdev_err(netdev, "Failed to delete MAC filters\n");
310
311 if (status == ICE_ERR_NO_MEMORY) {
312 err = -ENOMEM;
313 goto out;
314 }
315 }
316
317
318 status = ice_add_mac(hw, &vsi->tmp_sync_list);
319 ice_free_fltr_list(dev, &vsi->tmp_sync_list);
320
321
322
323
324 if (status && status != ICE_ERR_ALREADY_EXISTS) {
325 netdev_err(netdev, "Failed to add MAC filters\n");
326
327
328
329
330 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
331 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
332 vsi->state)) {
333 promisc_forced_on = true;
334 netdev_warn(netdev,
335 "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
336 vsi->vsi_num);
337 } else {
338 err = -EIO;
339 goto out;
340 }
341 }
342
343 if (changed_flags & IFF_ALLMULTI) {
344 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
345 if (vsi->vlan_ena)
346 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
347 else
348 promisc_m = ICE_MCAST_PROMISC_BITS;
349
350 err = ice_cfg_promisc(vsi, promisc_m, true);
351 if (err) {
352 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
353 vsi->vsi_num);
354 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
355 goto out_promisc;
356 }
357 } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
358 if (vsi->vlan_ena)
359 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
360 else
361 promisc_m = ICE_MCAST_PROMISC_BITS;
362
363 err = ice_cfg_promisc(vsi, promisc_m, false);
364 if (err) {
365 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
366 vsi->vsi_num);
367 vsi->current_netdev_flags |= IFF_ALLMULTI;
368 goto out_promisc;
369 }
370 }
371 }
372
373 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
374 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
375 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
376 if (vsi->current_netdev_flags & IFF_PROMISC) {
377
378 status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
379 ICE_FLTR_RX);
380 if (status) {
381 netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
382 vsi->vsi_num);
383 vsi->current_netdev_flags &= ~IFF_PROMISC;
384 err = -EIO;
385 goto out_promisc;
386 }
387 } else {
388
389 status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
390 ICE_FLTR_RX);
391 if (status) {
392 netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
393 vsi->vsi_num);
394 vsi->current_netdev_flags |= IFF_PROMISC;
395 err = -EIO;
396 goto out_promisc;
397 }
398 }
399 }
400 goto exit;
401
402out_promisc:
403 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
404 goto exit;
405out:
406
407 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
408 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
409exit:
410 clear_bit(__ICE_CFG_BUSY, vsi->state);
411 return err;
412}
413
414
415
416
417
418static void ice_sync_fltr_subtask(struct ice_pf *pf)
419{
420 int v;
421
422 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
423 return;
424
425 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
426
427 ice_for_each_vsi(pf, v)
428 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
429 ice_vsi_sync_fltr(pf->vsi[v])) {
430
431 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
432 break;
433 }
434}
435
436
437
438
439
440
441static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
442{
443 if (test_bit(__ICE_DOWN, vsi->state))
444 return;
445
446 set_bit(__ICE_NEEDS_RESTART, vsi->state);
447
448 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
449 if (netif_running(vsi->netdev)) {
450 if (!locked) {
451 rtnl_lock();
452 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
453 rtnl_unlock();
454 } else {
455 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
456 }
457 } else {
458 ice_vsi_close(vsi);
459 }
460 }
461}
462
463
464
465
466
467
468#ifdef CONFIG_DCB
469void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
470#else
471static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
472#endif
473{
474 int v;
475
476 ice_for_each_vsi(pf, v)
477 if (pf->vsi[v])
478 ice_dis_vsi(pf->vsi[v], locked);
479}
480
481
482
483
484
485
486
487static void
488ice_prepare_for_reset(struct ice_pf *pf)
489{
490 struct ice_hw *hw = &pf->hw;
491
492
493 if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
494 return;
495
496
497 if (ice_check_sq_alive(hw, &hw->mailboxq))
498 ice_vc_notify_reset(pf);
499
500
501 ice_pf_dis_all_vsi(pf, false);
502
503 if (hw->port_info)
504 ice_sched_clear_port(hw->port_info);
505
506 ice_shutdown_all_ctrlq(hw);
507
508 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
509}
510
511
512
513
514
515
516
517static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
518{
519 struct device *dev = &pf->pdev->dev;
520 struct ice_hw *hw = &pf->hw;
521
522 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
523 WARN_ON(in_interrupt());
524
525 ice_prepare_for_reset(pf);
526
527
528 if (ice_reset(hw, reset_type)) {
529 dev_err(dev, "reset %d failed\n", reset_type);
530 set_bit(__ICE_RESET_FAILED, pf->state);
531 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
532 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
533 clear_bit(__ICE_PFR_REQ, pf->state);
534 clear_bit(__ICE_CORER_REQ, pf->state);
535 clear_bit(__ICE_GLOBR_REQ, pf->state);
536 return;
537 }
538
539
540
541
542
543 if (reset_type == ICE_RESET_PFR) {
544 pf->pfr_count++;
545 ice_rebuild(pf);
546 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
547 clear_bit(__ICE_PFR_REQ, pf->state);
548 ice_reset_all_vfs(pf, true);
549 }
550}
551
552
553
554
555
556static void ice_reset_subtask(struct ice_pf *pf)
557{
558 enum ice_reset_req reset_type = ICE_RESET_INVAL;
559
560
561
562
563
564
565
566
567
568
569
570 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
571
572 if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
573 reset_type = ICE_RESET_CORER;
574 if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
575 reset_type = ICE_RESET_GLOBR;
576
577 if (reset_type == ICE_RESET_INVAL)
578 return;
579 ice_prepare_for_reset(pf);
580
581
582 if (ice_check_reset(&pf->hw)) {
583 set_bit(__ICE_RESET_FAILED, pf->state);
584 } else {
585
586 pf->hw.reset_ongoing = false;
587 ice_rebuild(pf);
588
589
590
591 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
592 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
593 clear_bit(__ICE_PFR_REQ, pf->state);
594 clear_bit(__ICE_CORER_REQ, pf->state);
595 clear_bit(__ICE_GLOBR_REQ, pf->state);
596 ice_reset_all_vfs(pf, true);
597 }
598
599 return;
600 }
601
602
603 if (test_bit(__ICE_PFR_REQ, pf->state))
604 reset_type = ICE_RESET_PFR;
605 if (test_bit(__ICE_CORER_REQ, pf->state))
606 reset_type = ICE_RESET_CORER;
607 if (test_bit(__ICE_GLOBR_REQ, pf->state))
608 reset_type = ICE_RESET_GLOBR;
609
610 if (reset_type == ICE_RESET_INVAL)
611 return;
612
613
614 if (!test_bit(__ICE_DOWN, pf->state) &&
615 !test_bit(__ICE_CFG_BUSY, pf->state)) {
616 ice_do_reset(pf, reset_type);
617 }
618}
619
620
621
622
623
624
625void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
626{
627 struct ice_aqc_get_phy_caps_data *caps;
628 enum ice_status status;
629 const char *fec_req;
630 const char *speed;
631 const char *fec;
632 const char *fc;
633
634 if (!vsi)
635 return;
636
637 if (vsi->current_isup == isup)
638 return;
639
640 vsi->current_isup = isup;
641
642 if (!isup) {
643 netdev_info(vsi->netdev, "NIC Link is Down\n");
644 return;
645 }
646
647 switch (vsi->port_info->phy.link_info.link_speed) {
648 case ICE_AQ_LINK_SPEED_100GB:
649 speed = "100 G";
650 break;
651 case ICE_AQ_LINK_SPEED_50GB:
652 speed = "50 G";
653 break;
654 case ICE_AQ_LINK_SPEED_40GB:
655 speed = "40 G";
656 break;
657 case ICE_AQ_LINK_SPEED_25GB:
658 speed = "25 G";
659 break;
660 case ICE_AQ_LINK_SPEED_20GB:
661 speed = "20 G";
662 break;
663 case ICE_AQ_LINK_SPEED_10GB:
664 speed = "10 G";
665 break;
666 case ICE_AQ_LINK_SPEED_5GB:
667 speed = "5 G";
668 break;
669 case ICE_AQ_LINK_SPEED_2500MB:
670 speed = "2.5 G";
671 break;
672 case ICE_AQ_LINK_SPEED_1000MB:
673 speed = "1 G";
674 break;
675 case ICE_AQ_LINK_SPEED_100MB:
676 speed = "100 M";
677 break;
678 default:
679 speed = "Unknown";
680 break;
681 }
682
683 switch (vsi->port_info->fc.current_mode) {
684 case ICE_FC_FULL:
685 fc = "Rx/Tx";
686 break;
687 case ICE_FC_TX_PAUSE:
688 fc = "Tx";
689 break;
690 case ICE_FC_RX_PAUSE:
691 fc = "Rx";
692 break;
693 case ICE_FC_NONE:
694 fc = "None";
695 break;
696 default:
697 fc = "Unknown";
698 break;
699 }
700
701
702 switch (vsi->port_info->phy.link_info.fec_info) {
703 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
704
705 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
706 fec = "RS-FEC";
707 break;
708 case ICE_AQ_LINK_25G_KR_FEC_EN:
709 fec = "FC-FEC/BASE-R";
710 break;
711 default:
712 fec = "NONE";
713 break;
714 }
715
716
717 caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
718 if (!caps) {
719 fec_req = "Unknown";
720 goto done;
721 }
722
723 status = ice_aq_get_phy_caps(vsi->port_info, false,
724 ICE_AQC_REPORT_SW_CFG, caps, NULL);
725 if (status)
726 netdev_info(vsi->netdev, "Get phy capability failed.\n");
727
728 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
729 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
730 fec_req = "RS-FEC";
731 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
732 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
733 fec_req = "FC-FEC/BASE-R";
734 else
735 fec_req = "NONE";
736
737 devm_kfree(&vsi->back->pdev->dev, caps);
738
739done:
740 netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n",
741 speed, fec_req, fec, fc);
742}
743
744
745
746
747
748
749static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
750{
751 if (!vsi)
752 return;
753
754 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
755 return;
756
757 if (vsi->type == ICE_VSI_PF) {
758 if (link_up == netif_carrier_ok(vsi->netdev))
759 return;
760
761 if (link_up) {
762 netif_carrier_on(vsi->netdev);
763 netif_tx_wake_all_queues(vsi->netdev);
764 } else {
765 netif_carrier_off(vsi->netdev);
766 netif_tx_stop_all_queues(vsi->netdev);
767 }
768 }
769}
770
771
772
773
774
775
776
777
778
779
780static int
781ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
782 u16 link_speed)
783{
784 struct ice_phy_info *phy_info;
785 struct ice_vsi *vsi;
786 u16 old_link_speed;
787 bool old_link;
788 int result;
789
790 phy_info = &pi->phy;
791 phy_info->link_info_old = phy_info->link_info;
792
793 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
794 old_link_speed = phy_info->link_info_old.link_speed;
795
796
797
798
799 result = ice_update_link_info(pi);
800 if (result)
801 dev_dbg(&pf->pdev->dev,
802 "Failed to update link status and re-enable link events for port %d\n",
803 pi->lport);
804
805
806 if (link_up == old_link && link_speed == old_link_speed)
807 return result;
808
809 vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
810 if (!vsi || !vsi->port_info)
811 return -EINVAL;
812
813 ice_vsi_link_event(vsi, link_up);
814 ice_print_link_msg(vsi, link_up);
815
816 if (pf->num_alloc_vfs)
817 ice_vc_notify_link_state(pf);
818
819 return result;
820}
821
822
823
824
825
826static void ice_watchdog_subtask(struct ice_pf *pf)
827{
828 int i;
829
830
831 if (test_bit(__ICE_DOWN, pf->state) ||
832 test_bit(__ICE_CFG_BUSY, pf->state))
833 return;
834
835
836 if (time_before(jiffies,
837 pf->serv_tmr_prev + pf->serv_tmr_period))
838 return;
839
840 pf->serv_tmr_prev = jiffies;
841
842
843
844
845 ice_update_pf_stats(pf);
846 ice_for_each_vsi(pf, i)
847 if (pf->vsi[i] && pf->vsi[i]->netdev)
848 ice_update_vsi_stats(pf->vsi[i]);
849}
850
851
852
853
854
855
856
857static int ice_init_link_events(struct ice_port_info *pi)
858{
859 u16 mask;
860
861 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
862 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
863
864 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
865 dev_dbg(ice_hw_to_dev(pi->hw),
866 "Failed to set link event mask for port %d\n",
867 pi->lport);
868 return -EIO;
869 }
870
871 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
872 dev_dbg(ice_hw_to_dev(pi->hw),
873 "Failed to enable link events for port %d\n",
874 pi->lport);
875 return -EIO;
876 }
877
878 return 0;
879}
880
881
882
883
884
885
886static int
887ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
888{
889 struct ice_aqc_get_link_status_data *link_data;
890 struct ice_port_info *port_info;
891 int status;
892
893 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
894 port_info = pf->hw.port_info;
895 if (!port_info)
896 return -EINVAL;
897
898 status = ice_link_event(pf, port_info,
899 !!(link_data->link_info & ICE_AQ_LINK_UP),
900 le16_to_cpu(link_data->link_speed));
901 if (status)
902 dev_dbg(&pf->pdev->dev,
903 "Could not process link event, error %d\n", status);
904
905 return status;
906}
907
908
909
910
911
912
913static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
914{
915 struct ice_rq_event_info event;
916 struct ice_hw *hw = &pf->hw;
917 struct ice_ctl_q_info *cq;
918 u16 pending, i = 0;
919 const char *qtype;
920 u32 oldval, val;
921
922
923 if (test_bit(__ICE_RESET_FAILED, pf->state))
924 return 0;
925
926 switch (q_type) {
927 case ICE_CTL_Q_ADMIN:
928 cq = &hw->adminq;
929 qtype = "Admin";
930 break;
931 case ICE_CTL_Q_MAILBOX:
932 cq = &hw->mailboxq;
933 qtype = "Mailbox";
934 break;
935 default:
936 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
937 q_type);
938 return 0;
939 }
940
941
942
943
944 val = rd32(hw, cq->rq.len);
945 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
946 PF_FW_ARQLEN_ARQCRIT_M)) {
947 oldval = val;
948 if (val & PF_FW_ARQLEN_ARQVFE_M)
949 dev_dbg(&pf->pdev->dev,
950 "%s Receive Queue VF Error detected\n", qtype);
951 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
952 dev_dbg(&pf->pdev->dev,
953 "%s Receive Queue Overflow Error detected\n",
954 qtype);
955 }
956 if (val & PF_FW_ARQLEN_ARQCRIT_M)
957 dev_dbg(&pf->pdev->dev,
958 "%s Receive Queue Critical Error detected\n",
959 qtype);
960 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
961 PF_FW_ARQLEN_ARQCRIT_M);
962 if (oldval != val)
963 wr32(hw, cq->rq.len, val);
964 }
965
966 val = rd32(hw, cq->sq.len);
967 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
968 PF_FW_ATQLEN_ATQCRIT_M)) {
969 oldval = val;
970 if (val & PF_FW_ATQLEN_ATQVFE_M)
971 dev_dbg(&pf->pdev->dev,
972 "%s Send Queue VF Error detected\n", qtype);
973 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
974 dev_dbg(&pf->pdev->dev,
975 "%s Send Queue Overflow Error detected\n",
976 qtype);
977 }
978 if (val & PF_FW_ATQLEN_ATQCRIT_M)
979 dev_dbg(&pf->pdev->dev,
980 "%s Send Queue Critical Error detected\n",
981 qtype);
982 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
983 PF_FW_ATQLEN_ATQCRIT_M);
984 if (oldval != val)
985 wr32(hw, cq->sq.len, val);
986 }
987
988 event.buf_len = cq->rq_buf_size;
989 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
990 GFP_KERNEL);
991 if (!event.msg_buf)
992 return 0;
993
994 do {
995 enum ice_status ret;
996 u16 opcode;
997
998 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
999 if (ret == ICE_ERR_AQ_NO_WORK)
1000 break;
1001 if (ret) {
1002 dev_err(&pf->pdev->dev,
1003 "%s Receive Queue event error %d\n", qtype,
1004 ret);
1005 break;
1006 }
1007
1008 opcode = le16_to_cpu(event.desc.opcode);
1009
1010 switch (opcode) {
1011 case ice_aqc_opc_get_link_status:
1012 if (ice_handle_link_event(pf, &event))
1013 dev_err(&pf->pdev->dev,
1014 "Could not handle link event\n");
1015 break;
1016 case ice_mbx_opc_send_msg_to_pf:
1017 ice_vc_process_vf_msg(pf, &event);
1018 break;
1019 case ice_aqc_opc_fw_logging:
1020 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1021 break;
1022 case ice_aqc_opc_lldp_set_mib_change:
1023 ice_dcb_process_lldp_set_mib_change(pf, &event);
1024 break;
1025 default:
1026 dev_dbg(&pf->pdev->dev,
1027 "%s Receive Queue unknown event 0x%04x ignored\n",
1028 qtype, opcode);
1029 break;
1030 }
1031 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1032
1033 devm_kfree(&pf->pdev->dev, event.msg_buf);
1034
1035 return pending && (i == ICE_DFLT_IRQ_WORK);
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1046{
1047 u16 ntu;
1048
1049 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1050 return cq->rq.next_to_clean != ntu;
1051}
1052
1053
1054
1055
1056
1057static void ice_clean_adminq_subtask(struct ice_pf *pf)
1058{
1059 struct ice_hw *hw = &pf->hw;
1060
1061 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1062 return;
1063
1064 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1065 return;
1066
1067 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1068
1069
1070
1071
1072
1073
1074 if (ice_ctrlq_pending(hw, &hw->adminq))
1075 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1076
1077 ice_flush(hw);
1078}
1079
1080
1081
1082
1083
1084static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1085{
1086 struct ice_hw *hw = &pf->hw;
1087
1088 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1089 return;
1090
1091 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1092 return;
1093
1094 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1095
1096 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1097 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1098
1099 ice_flush(hw);
1100}
1101
1102
1103
1104
1105
1106
1107
1108static void ice_service_task_schedule(struct ice_pf *pf)
1109{
1110 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1111 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1112 !test_bit(__ICE_NEEDS_RESTART, pf->state))
1113 queue_work(ice_wq, &pf->serv_task);
1114}
1115
1116
1117
1118
1119
1120static void ice_service_task_complete(struct ice_pf *pf)
1121{
1122 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1123
1124
1125 smp_mb__before_atomic();
1126 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1127}
1128
1129
1130
1131
1132
1133static void ice_service_task_stop(struct ice_pf *pf)
1134{
1135 set_bit(__ICE_SERVICE_DIS, pf->state);
1136
1137 if (pf->serv_tmr.function)
1138 del_timer_sync(&pf->serv_tmr);
1139 if (pf->serv_task.func)
1140 cancel_work_sync(&pf->serv_task);
1141
1142 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1143}
1144
1145
1146
1147
1148
1149
1150
1151static void ice_service_task_restart(struct ice_pf *pf)
1152{
1153 clear_bit(__ICE_SERVICE_DIS, pf->state);
1154 ice_service_task_schedule(pf);
1155}
1156
1157
1158
1159
1160
1161static void ice_service_timer(struct timer_list *t)
1162{
1163 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1164
1165 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1166 ice_service_task_schedule(pf);
1167}
1168
1169
1170
1171
1172
1173
1174
1175static void ice_handle_mdd_event(struct ice_pf *pf)
1176{
1177 struct ice_hw *hw = &pf->hw;
1178 bool mdd_detected = false;
1179 u32 reg;
1180 int i;
1181
1182 if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
1183 return;
1184
1185
1186 reg = rd32(hw, GL_MDET_TX_PQM);
1187 if (reg & GL_MDET_TX_PQM_VALID_M) {
1188 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1189 GL_MDET_TX_PQM_PF_NUM_S;
1190 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1191 GL_MDET_TX_PQM_VF_NUM_S;
1192 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1193 GL_MDET_TX_PQM_MAL_TYPE_S;
1194 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1195 GL_MDET_TX_PQM_QNUM_S);
1196
1197 if (netif_msg_tx_err(pf))
1198 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1199 event, queue, pf_num, vf_num);
1200 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1201 mdd_detected = true;
1202 }
1203
1204 reg = rd32(hw, GL_MDET_TX_TCLAN);
1205 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1206 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1207 GL_MDET_TX_TCLAN_PF_NUM_S;
1208 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1209 GL_MDET_TX_TCLAN_VF_NUM_S;
1210 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1211 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1212 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1213 GL_MDET_TX_TCLAN_QNUM_S);
1214
1215 if (netif_msg_rx_err(pf))
1216 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1217 event, queue, pf_num, vf_num);
1218 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1219 mdd_detected = true;
1220 }
1221
1222 reg = rd32(hw, GL_MDET_RX);
1223 if (reg & GL_MDET_RX_VALID_M) {
1224 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1225 GL_MDET_RX_PF_NUM_S;
1226 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1227 GL_MDET_RX_VF_NUM_S;
1228 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1229 GL_MDET_RX_MAL_TYPE_S;
1230 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1231 GL_MDET_RX_QNUM_S);
1232
1233 if (netif_msg_rx_err(pf))
1234 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1235 event, queue, pf_num, vf_num);
1236 wr32(hw, GL_MDET_RX, 0xffffffff);
1237 mdd_detected = true;
1238 }
1239
1240 if (mdd_detected) {
1241 bool pf_mdd_detected = false;
1242
1243 reg = rd32(hw, PF_MDET_TX_PQM);
1244 if (reg & PF_MDET_TX_PQM_VALID_M) {
1245 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1246 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1247 pf_mdd_detected = true;
1248 }
1249
1250 reg = rd32(hw, PF_MDET_TX_TCLAN);
1251 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1252 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1253 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1254 pf_mdd_detected = true;
1255 }
1256
1257 reg = rd32(hw, PF_MDET_RX);
1258 if (reg & PF_MDET_RX_VALID_M) {
1259 wr32(hw, PF_MDET_RX, 0xFFFF);
1260 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
1261 pf_mdd_detected = true;
1262 }
1263
1264 if (pf_mdd_detected) {
1265 set_bit(__ICE_NEEDS_RESTART, pf->state);
1266 ice_service_task_schedule(pf);
1267 }
1268 }
1269
1270
1271 for (i = 0; i < pf->num_alloc_vfs; i++) {
1272 struct ice_vf *vf = &pf->vf[i];
1273
1274 bool vf_mdd_detected = false;
1275
1276 reg = rd32(hw, VP_MDET_TX_PQM(i));
1277 if (reg & VP_MDET_TX_PQM_VALID_M) {
1278 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1279 vf_mdd_detected = true;
1280 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1281 i);
1282 }
1283
1284 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1285 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1286 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1287 vf_mdd_detected = true;
1288 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1289 i);
1290 }
1291
1292 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1293 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1294 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1295 vf_mdd_detected = true;
1296 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1297 i);
1298 }
1299
1300 reg = rd32(hw, VP_MDET_RX(i));
1301 if (reg & VP_MDET_RX_VALID_M) {
1302 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1303 vf_mdd_detected = true;
1304 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
1305 i);
1306 }
1307
1308 if (vf_mdd_detected) {
1309 vf->num_mdd_events++;
1310 if (vf->num_mdd_events > 1)
1311 dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n",
1312 i, vf->num_mdd_events);
1313 }
1314 }
1315}
1316
1317
1318
1319
1320
1321static void ice_service_task(struct work_struct *work)
1322{
1323 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
1324 unsigned long start_time = jiffies;
1325
1326
1327
1328
1329 ice_reset_subtask(pf);
1330
1331
1332 if (ice_is_reset_in_progress(pf->state) ||
1333 test_bit(__ICE_SUSPENDED, pf->state) ||
1334 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
1335 ice_service_task_complete(pf);
1336 return;
1337 }
1338
1339 ice_check_for_hang_subtask(pf);
1340 ice_sync_fltr_subtask(pf);
1341 ice_handle_mdd_event(pf);
1342 ice_process_vflr_event(pf);
1343 ice_watchdog_subtask(pf);
1344 ice_clean_adminq_subtask(pf);
1345 ice_clean_mailboxq_subtask(pf);
1346
1347
1348 ice_service_task_complete(pf);
1349
1350
1351
1352
1353
1354 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
1355 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
1356 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1357 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
1358 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1359 mod_timer(&pf->serv_tmr, jiffies);
1360}
1361
1362
1363
1364
1365
1366static void ice_set_ctrlq_len(struct ice_hw *hw)
1367{
1368 hw->adminq.num_rq_entries = ICE_AQ_LEN;
1369 hw->adminq.num_sq_entries = ICE_AQ_LEN;
1370 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1371 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
1372 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
1373 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
1374 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1375 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386static void
1387ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1388 const cpumask_t *mask)
1389{
1390 struct ice_q_vector *q_vector =
1391 container_of(notify, struct ice_q_vector, affinity_notify);
1392
1393 cpumask_copy(&q_vector->affinity_mask, mask);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1405
1406
1407
1408
1409
1410static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1411{
1412 struct ice_pf *pf = vsi->back;
1413 struct ice_hw *hw = &pf->hw;
1414
1415 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1416 int i;
1417
1418 ice_for_each_q_vector(vsi, i)
1419 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
1420 }
1421
1422 ice_flush(hw);
1423 return 0;
1424}
1425
1426
1427
1428
1429
1430
1431static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
1432{
1433 int q_vectors = vsi->num_q_vectors;
1434 struct ice_pf *pf = vsi->back;
1435 int base = vsi->base_vector;
1436 int rx_int_idx = 0;
1437 int tx_int_idx = 0;
1438 int vector, err;
1439 int irq_num;
1440
1441 for (vector = 0; vector < q_vectors; vector++) {
1442 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
1443
1444 irq_num = pf->msix_entries[base + vector].vector;
1445
1446 if (q_vector->tx.ring && q_vector->rx.ring) {
1447 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1448 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
1449 tx_int_idx++;
1450 } else if (q_vector->rx.ring) {
1451 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1452 "%s-%s-%d", basename, "rx", rx_int_idx++);
1453 } else if (q_vector->tx.ring) {
1454 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1455 "%s-%s-%d", basename, "tx", tx_int_idx++);
1456 } else {
1457
1458 continue;
1459 }
1460 err = devm_request_irq(&pf->pdev->dev, irq_num,
1461 vsi->irq_handler, 0,
1462 q_vector->name, q_vector);
1463 if (err) {
1464 netdev_err(vsi->netdev,
1465 "MSIX request_irq failed, error: %d\n", err);
1466 goto free_q_irqs;
1467 }
1468
1469
1470 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
1471 q_vector->affinity_notify.release = ice_irq_affinity_release;
1472 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
1473
1474
1475 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
1476 }
1477
1478 vsi->irqs_ready = true;
1479 return 0;
1480
1481free_q_irqs:
1482 while (vector) {
1483 vector--;
1484 irq_num = pf->msix_entries[base + vector].vector,
1485 irq_set_affinity_notifier(irq_num, NULL);
1486 irq_set_affinity_hint(irq_num, NULL);
1487 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
1488 }
1489 return err;
1490}
1491
1492
1493
1494
1495
1496static void ice_ena_misc_vector(struct ice_pf *pf)
1497{
1498 struct ice_hw *hw = &pf->hw;
1499 u32 val;
1500
1501
1502 wr32(hw, PFINT_OICR_ENA, 0);
1503 rd32(hw, PFINT_OICR);
1504
1505 val = (PFINT_OICR_ECC_ERR_M |
1506 PFINT_OICR_MAL_DETECT_M |
1507 PFINT_OICR_GRST_M |
1508 PFINT_OICR_PCI_EXCEPTION_M |
1509 PFINT_OICR_VFLR_M |
1510 PFINT_OICR_HMC_ERR_M |
1511 PFINT_OICR_PE_CRITERR_M);
1512
1513 wr32(hw, PFINT_OICR_ENA, val);
1514
1515
1516 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1517 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
1518}
1519
1520
1521
1522
1523
1524
1525static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1526{
1527 struct ice_pf *pf = (struct ice_pf *)data;
1528 struct ice_hw *hw = &pf->hw;
1529 irqreturn_t ret = IRQ_NONE;
1530 u32 oicr, ena_mask;
1531
1532 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1533 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1534
1535 oicr = rd32(hw, PFINT_OICR);
1536 ena_mask = rd32(hw, PFINT_OICR_ENA);
1537
1538 if (oicr & PFINT_OICR_SWINT_M) {
1539 ena_mask &= ~PFINT_OICR_SWINT_M;
1540 pf->sw_int_count++;
1541 }
1542
1543 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1544 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
1545 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1546 }
1547 if (oicr & PFINT_OICR_VFLR_M) {
1548 ena_mask &= ~PFINT_OICR_VFLR_M;
1549 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
1550 }
1551
1552 if (oicr & PFINT_OICR_GRST_M) {
1553 u32 reset;
1554
1555
1556 ena_mask &= ~PFINT_OICR_GRST_M;
1557 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
1558 GLGEN_RSTAT_RESET_TYPE_S;
1559
1560 if (reset == ICE_RESET_CORER)
1561 pf->corer_count++;
1562 else if (reset == ICE_RESET_GLOBR)
1563 pf->globr_count++;
1564 else if (reset == ICE_RESET_EMPR)
1565 pf->empr_count++;
1566 else
1567 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
1568 reset);
1569
1570
1571
1572
1573
1574
1575 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
1576 if (reset == ICE_RESET_CORER)
1577 set_bit(__ICE_CORER_RECV, pf->state);
1578 else if (reset == ICE_RESET_GLOBR)
1579 set_bit(__ICE_GLOBR_RECV, pf->state);
1580 else
1581 set_bit(__ICE_EMPR_RECV, pf->state);
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596 hw->reset_ongoing = true;
1597 }
1598 }
1599
1600 if (oicr & PFINT_OICR_HMC_ERR_M) {
1601 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
1602 dev_dbg(&pf->pdev->dev,
1603 "HMC Error interrupt - info 0x%x, data 0x%x\n",
1604 rd32(hw, PFHMC_ERRORINFO),
1605 rd32(hw, PFHMC_ERRORDATA));
1606 }
1607
1608
1609 oicr &= ena_mask;
1610 if (oicr) {
1611 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
1612 oicr);
1613
1614
1615
1616 if (oicr & (PFINT_OICR_PE_CRITERR_M |
1617 PFINT_OICR_PCI_EXCEPTION_M |
1618 PFINT_OICR_ECC_ERR_M)) {
1619 set_bit(__ICE_PFR_REQ, pf->state);
1620 ice_service_task_schedule(pf);
1621 }
1622 }
1623 ret = IRQ_HANDLED;
1624
1625 if (!test_bit(__ICE_DOWN, pf->state)) {
1626 ice_service_task_schedule(pf);
1627 ice_irq_dynamic_ena(hw, NULL, NULL);
1628 }
1629
1630 return ret;
1631}
1632
1633
1634
1635
1636
1637static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
1638{
1639
1640 wr32(hw, PFINT_FW_CTL,
1641 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
1642
1643
1644 wr32(hw, PFINT_MBX_CTL,
1645 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
1646
1647
1648 wr32(hw, PFINT_OICR_CTL,
1649 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
1650
1651 ice_flush(hw);
1652}
1653
1654
1655
1656
1657
1658static void ice_free_irq_msix_misc(struct ice_pf *pf)
1659{
1660 struct ice_hw *hw = &pf->hw;
1661
1662 ice_dis_ctrlq_interrupts(hw);
1663
1664
1665 wr32(hw, PFINT_OICR_ENA, 0);
1666 ice_flush(hw);
1667
1668 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
1669 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
1670 devm_free_irq(&pf->pdev->dev,
1671 pf->msix_entries[pf->oicr_idx].vector, pf);
1672 }
1673
1674 pf->num_avail_sw_msix += 1;
1675 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
1676}
1677
1678
1679
1680
1681
1682
1683static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
1684{
1685 u32 val;
1686
1687 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
1688 PFINT_OICR_CTL_CAUSE_ENA_M);
1689 wr32(hw, PFINT_OICR_CTL, val);
1690
1691
1692 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
1693 PFINT_FW_CTL_CAUSE_ENA_M);
1694 wr32(hw, PFINT_FW_CTL, val);
1695
1696
1697 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
1698 PFINT_MBX_CTL_CAUSE_ENA_M);
1699 wr32(hw, PFINT_MBX_CTL, val);
1700
1701 ice_flush(hw);
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712static int ice_req_irq_msix_misc(struct ice_pf *pf)
1713{
1714 struct ice_hw *hw = &pf->hw;
1715 int oicr_idx, err = 0;
1716
1717 if (!pf->int_name[0])
1718 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
1719 dev_driver_string(&pf->pdev->dev),
1720 dev_name(&pf->pdev->dev));
1721
1722
1723
1724
1725
1726 if (ice_is_reset_in_progress(pf->state))
1727 goto skip_req_irq;
1728
1729
1730 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1731 if (oicr_idx < 0)
1732 return oicr_idx;
1733
1734 pf->num_avail_sw_msix -= 1;
1735 pf->oicr_idx = oicr_idx;
1736
1737 err = devm_request_irq(&pf->pdev->dev,
1738 pf->msix_entries[pf->oicr_idx].vector,
1739 ice_misc_intr, 0, pf->int_name, pf);
1740 if (err) {
1741 dev_err(&pf->pdev->dev,
1742 "devm_request_irq for %s failed: %d\n",
1743 pf->int_name, err);
1744 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1745 pf->num_avail_sw_msix += 1;
1746 return err;
1747 }
1748
1749skip_req_irq:
1750 ice_ena_misc_vector(pf);
1751
1752 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
1753 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
1754 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
1755
1756 ice_flush(hw);
1757 ice_irq_dynamic_ena(hw, NULL, NULL);
1758
1759 return 0;
1760}
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770static void ice_napi_add(struct ice_vsi *vsi)
1771{
1772 int v_idx;
1773
1774 if (!vsi->netdev)
1775 return;
1776
1777 ice_for_each_q_vector(vsi, v_idx)
1778 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
1779 ice_napi_poll, NAPI_POLL_WEIGHT);
1780}
1781
1782
1783
1784
1785
1786
1787
1788static int ice_cfg_netdev(struct ice_vsi *vsi)
1789{
1790 netdev_features_t csumo_features;
1791 netdev_features_t vlano_features;
1792 netdev_features_t dflt_features;
1793 netdev_features_t tso_features;
1794 struct ice_netdev_priv *np;
1795 struct net_device *netdev;
1796 u8 mac_addr[ETH_ALEN];
1797 int err;
1798
1799 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
1800 vsi->alloc_rxq);
1801 if (!netdev)
1802 return -ENOMEM;
1803
1804 vsi->netdev = netdev;
1805 np = netdev_priv(netdev);
1806 np->vsi = vsi;
1807
1808 dflt_features = NETIF_F_SG |
1809 NETIF_F_HIGHDMA |
1810 NETIF_F_RXHASH;
1811
1812 csumo_features = NETIF_F_RXCSUM |
1813 NETIF_F_IP_CSUM |
1814 NETIF_F_SCTP_CRC |
1815 NETIF_F_IPV6_CSUM;
1816
1817 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
1818 NETIF_F_HW_VLAN_CTAG_TX |
1819 NETIF_F_HW_VLAN_CTAG_RX;
1820
1821 tso_features = NETIF_F_TSO;
1822
1823
1824 netdev->hw_features = dflt_features | csumo_features |
1825 vlano_features | tso_features;
1826
1827
1828 netdev->features |= netdev->hw_features;
1829
1830 netdev->hw_enc_features |= dflt_features | csumo_features |
1831 tso_features;
1832 netdev->vlan_features |= dflt_features | csumo_features |
1833 tso_features;
1834
1835 if (vsi->type == ICE_VSI_PF) {
1836 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
1837 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
1838
1839 ether_addr_copy(netdev->dev_addr, mac_addr);
1840 ether_addr_copy(netdev->perm_addr, mac_addr);
1841 }
1842
1843 netdev->priv_flags |= IFF_UNICAST_FLT;
1844
1845
1846 netdev->netdev_ops = &ice_netdev_ops;
1847
1848
1849 netdev->watchdog_timeo = 5 * HZ;
1850
1851 ice_set_ethtool_ops(netdev);
1852
1853 netdev->min_mtu = ETH_MIN_MTU;
1854 netdev->max_mtu = ICE_MAX_MTU;
1855
1856 err = register_netdev(vsi->netdev);
1857 if (err)
1858 return err;
1859
1860 netif_carrier_off(vsi->netdev);
1861
1862
1863 netif_tx_stop_all_queues(vsi->netdev);
1864
1865 return 0;
1866}
1867
1868
1869
1870
1871
1872
1873
1874void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
1875{
1876 u16 i;
1877
1878 for (i = 0; i < rss_table_size; i++)
1879 lut[i] = i % rss_size;
1880}
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890static struct ice_vsi *
1891ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
1892{
1893 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904struct ice_vsi *
1905ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
1906{
1907 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918static int
1919ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
1920 u16 vid)
1921{
1922 struct ice_netdev_priv *np = netdev_priv(netdev);
1923 struct ice_vsi *vsi = np->vsi;
1924 int ret;
1925
1926 if (vid >= VLAN_N_VID) {
1927 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
1928 vid, VLAN_N_VID);
1929 return -EINVAL;
1930 }
1931
1932 if (vsi->info.pvid)
1933 return -EINVAL;
1934
1935
1936 if (unlikely(!vid)) {
1937 ret = ice_cfg_vlan_pruning(vsi, true, false);
1938 if (ret)
1939 return ret;
1940 }
1941
1942
1943
1944
1945
1946 ret = ice_vsi_add_vlan(vsi, vid);
1947 if (!ret) {
1948 vsi->vlan_ena = true;
1949 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
1950 }
1951
1952 return ret;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static int
1964ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
1965 u16 vid)
1966{
1967 struct ice_netdev_priv *np = netdev_priv(netdev);
1968 struct ice_vsi *vsi = np->vsi;
1969 int ret;
1970
1971 if (vsi->info.pvid)
1972 return -EINVAL;
1973
1974
1975
1976
1977 ret = ice_vsi_kill_vlan(vsi, vid);
1978 if (ret)
1979 return ret;
1980
1981
1982 if (unlikely(!vid))
1983 ret = ice_cfg_vlan_pruning(vsi, false, false);
1984
1985 vsi->vlan_ena = false;
1986 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
1987 return ret;
1988}
1989
1990
1991
1992
1993
1994
1995
1996static int ice_setup_pf_sw(struct ice_pf *pf)
1997{
1998 struct ice_vsi *vsi;
1999 int status = 0;
2000
2001 if (ice_is_reset_in_progress(pf->state))
2002 return -EBUSY;
2003
2004 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
2005 if (!vsi) {
2006 status = -ENOMEM;
2007 goto unroll_vsi_setup;
2008 }
2009
2010 status = ice_cfg_netdev(vsi);
2011 if (status) {
2012 status = -ENODEV;
2013 goto unroll_vsi_setup;
2014 }
2015
2016
2017
2018
2019
2020 ice_napi_add(vsi);
2021
2022 status = ice_init_mac_fltr(pf);
2023 if (status)
2024 goto unroll_napi_add;
2025
2026 return status;
2027
2028unroll_napi_add:
2029 if (vsi) {
2030 ice_napi_del(vsi);
2031 if (vsi->netdev) {
2032 if (vsi->netdev->reg_state == NETREG_REGISTERED)
2033 unregister_netdev(vsi->netdev);
2034 free_netdev(vsi->netdev);
2035 vsi->netdev = NULL;
2036 }
2037 }
2038
2039unroll_vsi_setup:
2040 if (vsi) {
2041 ice_vsi_free_q_vectors(vsi);
2042 ice_vsi_delete(vsi);
2043 ice_vsi_put_qs(vsi);
2044 pf->q_left_tx += vsi->alloc_txq;
2045 pf->q_left_rx += vsi->alloc_rxq;
2046 ice_vsi_clear(vsi);
2047 }
2048 return status;
2049}
2050
2051
2052
2053
2054
2055
2056
2057static void ice_determine_q_usage(struct ice_pf *pf)
2058{
2059 u16 q_left_tx, q_left_rx;
2060
2061 q_left_tx = pf->hw.func_caps.common_cap.num_txq;
2062 q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
2063
2064 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
2065
2066
2067 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2068 pf->num_lan_rx = 1;
2069 else
2070 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
2071
2072 pf->q_left_tx = q_left_tx - pf->num_lan_tx;
2073 pf->q_left_rx = q_left_rx - pf->num_lan_rx;
2074}
2075
2076
2077
2078
2079
2080static void ice_deinit_pf(struct ice_pf *pf)
2081{
2082 ice_service_task_stop(pf);
2083 mutex_destroy(&pf->sw_mutex);
2084 mutex_destroy(&pf->avail_q_mutex);
2085}
2086
2087
2088
2089
2090
2091static void ice_init_pf(struct ice_pf *pf)
2092{
2093 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
2094 set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
2095#ifdef CONFIG_PCI_IOV
2096 if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
2097 struct ice_hw *hw = &pf->hw;
2098
2099 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
2100 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
2101 ICE_MAX_VF_COUNT);
2102 }
2103#endif
2104
2105 mutex_init(&pf->sw_mutex);
2106 mutex_init(&pf->avail_q_mutex);
2107
2108
2109 mutex_lock(&pf->avail_q_mutex);
2110 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
2111 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
2112 mutex_unlock(&pf->avail_q_mutex);
2113
2114 if (pf->hw.func_caps.common_cap.rss_table_size)
2115 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
2116
2117
2118 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
2119 pf->serv_tmr_period = HZ;
2120 INIT_WORK(&pf->serv_task, ice_service_task);
2121 clear_bit(__ICE_SERVICE_SCHED, pf->state);
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131static int ice_ena_msix_range(struct ice_pf *pf)
2132{
2133 int v_left, v_actual, v_budget = 0;
2134 int needed, err, i;
2135
2136 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
2137
2138
2139 needed = 1;
2140 v_budget += needed;
2141 v_left -= needed;
2142
2143
2144 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
2145 v_budget += pf->num_lan_msix;
2146 v_left -= pf->num_lan_msix;
2147
2148 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
2149 sizeof(*pf->msix_entries), GFP_KERNEL);
2150
2151 if (!pf->msix_entries) {
2152 err = -ENOMEM;
2153 goto exit_err;
2154 }
2155
2156 for (i = 0; i < v_budget; i++)
2157 pf->msix_entries[i].entry = i;
2158
2159
2160 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
2161 ICE_MIN_MSIX, v_budget);
2162
2163 if (v_actual < 0) {
2164 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
2165 err = v_actual;
2166 goto msix_err;
2167 }
2168
2169 if (v_actual < v_budget) {
2170 dev_warn(&pf->pdev->dev,
2171 "not enough vectors. requested = %d, obtained = %d\n",
2172 v_budget, v_actual);
2173 if (v_actual >= (pf->num_lan_msix + 1)) {
2174 pf->num_avail_sw_msix = v_actual -
2175 (pf->num_lan_msix + 1);
2176 } else if (v_actual >= 2) {
2177 pf->num_lan_msix = 1;
2178 pf->num_avail_sw_msix = v_actual - 2;
2179 } else {
2180 pci_disable_msix(pf->pdev);
2181 err = -ERANGE;
2182 goto msix_err;
2183 }
2184 }
2185
2186 return v_actual;
2187
2188msix_err:
2189 devm_kfree(&pf->pdev->dev, pf->msix_entries);
2190 goto exit_err;
2191
2192exit_err:
2193 pf->num_lan_msix = 0;
2194 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
2195 return err;
2196}
2197
2198
2199
2200
2201
2202static void ice_dis_msix(struct ice_pf *pf)
2203{
2204 pci_disable_msix(pf->pdev);
2205 devm_kfree(&pf->pdev->dev, pf->msix_entries);
2206 pf->msix_entries = NULL;
2207 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
2208}
2209
2210
2211
2212
2213
2214static void ice_clear_interrupt_scheme(struct ice_pf *pf)
2215{
2216 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2217 ice_dis_msix(pf);
2218
2219 if (pf->irq_tracker) {
2220 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
2221 pf->irq_tracker = NULL;
2222 }
2223}
2224
2225
2226
2227
2228
2229static int ice_init_interrupt_scheme(struct ice_pf *pf)
2230{
2231 int vectors;
2232
2233 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2234 vectors = ice_ena_msix_range(pf);
2235 else
2236 return -ENODEV;
2237
2238 if (vectors < 0)
2239 return vectors;
2240
2241
2242 pf->irq_tracker =
2243 devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
2244 (sizeof(u16) * vectors), GFP_KERNEL);
2245 if (!pf->irq_tracker) {
2246 ice_dis_msix(pf);
2247 return -ENOMEM;
2248 }
2249
2250
2251 pf->num_avail_sw_msix = vectors;
2252 pf->irq_tracker->num_entries = vectors;
2253 pf->irq_tracker->end = pf->irq_tracker->num_entries;
2254
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266static void ice_verify_cacheline_size(struct ice_pf *pf)
2267{
2268 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
2269 dev_warn(&pf->pdev->dev,
2270 "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
2271 ICE_CACHE_LINE_BYTES);
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281static int
2282ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
2283{
2284 struct device *dev = &pdev->dev;
2285 struct ice_pf *pf;
2286 struct ice_hw *hw;
2287 int err;
2288
2289
2290 err = pcim_enable_device(pdev);
2291 if (err)
2292 return err;
2293
2294 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
2295 if (err) {
2296 dev_err(dev, "BAR0 I/O map error %d\n", err);
2297 return err;
2298 }
2299
2300 pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
2301 if (!pf)
2302 return -ENOMEM;
2303
2304
2305 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2306 if (err)
2307 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2308 if (err) {
2309 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
2310 return err;
2311 }
2312
2313 pci_enable_pcie_error_reporting(pdev);
2314 pci_set_master(pdev);
2315
2316 pf->pdev = pdev;
2317 pci_set_drvdata(pdev, pf);
2318 set_bit(__ICE_DOWN, pf->state);
2319
2320 set_bit(__ICE_SERVICE_DIS, pf->state);
2321
2322 hw = &pf->hw;
2323 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
2324 hw->back = pf;
2325 hw->vendor_id = pdev->vendor;
2326 hw->device_id = pdev->device;
2327 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2328 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2329 hw->subsystem_device_id = pdev->subsystem_device;
2330 hw->bus.device = PCI_SLOT(pdev->devfn);
2331 hw->bus.func = PCI_FUNC(pdev->devfn);
2332 ice_set_ctrlq_len(hw);
2333
2334 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
2335
2336#ifndef CONFIG_DYNAMIC_DEBUG
2337 if (debug < -1)
2338 hw->debug_mask = debug;
2339#endif
2340
2341 err = ice_init_hw(hw);
2342 if (err) {
2343 dev_err(dev, "ice_init_hw failed: %d\n", err);
2344 err = -EIO;
2345 goto err_exit_unroll;
2346 }
2347
2348 dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
2349 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2350 hw->api_maj_ver, hw->api_min_ver);
2351
2352 ice_init_pf(pf);
2353
2354 err = ice_init_pf_dcb(pf, false);
2355 if (err) {
2356 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
2357 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
2358
2359
2360 err = 0;
2361 }
2362
2363 ice_determine_q_usage(pf);
2364
2365 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
2366 if (!pf->num_alloc_vsi) {
2367 err = -EIO;
2368 goto err_init_pf_unroll;
2369 }
2370
2371 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
2372 GFP_KERNEL);
2373 if (!pf->vsi) {
2374 err = -ENOMEM;
2375 goto err_init_pf_unroll;
2376 }
2377
2378 err = ice_init_interrupt_scheme(pf);
2379 if (err) {
2380 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
2381 err = -EIO;
2382 goto err_init_interrupt_unroll;
2383 }
2384
2385
2386 clear_bit(__ICE_DOWN, pf->state);
2387
2388
2389
2390
2391
2392
2393 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
2394 err = ice_req_irq_msix_misc(pf);
2395 if (err) {
2396 dev_err(dev, "setup of misc vector failed: %d\n", err);
2397 goto err_init_interrupt_unroll;
2398 }
2399 }
2400
2401
2402 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
2403 if (!pf->first_sw) {
2404 err = -ENOMEM;
2405 goto err_msix_misc_unroll;
2406 }
2407
2408 if (hw->evb_veb)
2409 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
2410 else
2411 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
2412
2413 pf->first_sw->pf = pf;
2414
2415
2416 pf->first_sw->sw_id = hw->port_info->sw_id;
2417
2418 err = ice_setup_pf_sw(pf);
2419 if (err) {
2420 dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
2421 goto err_alloc_sw_unroll;
2422 }
2423
2424 clear_bit(__ICE_SERVICE_DIS, pf->state);
2425
2426
2427 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2428
2429 err = ice_init_link_events(pf->hw.port_info);
2430 if (err) {
2431 dev_err(dev, "ice_init_link_events failed: %d\n", err);
2432 goto err_alloc_sw_unroll;
2433 }
2434
2435 ice_verify_cacheline_size(pf);
2436
2437 return 0;
2438
2439err_alloc_sw_unroll:
2440 set_bit(__ICE_SERVICE_DIS, pf->state);
2441 set_bit(__ICE_DOWN, pf->state);
2442 devm_kfree(&pf->pdev->dev, pf->first_sw);
2443err_msix_misc_unroll:
2444 ice_free_irq_msix_misc(pf);
2445err_init_interrupt_unroll:
2446 ice_clear_interrupt_scheme(pf);
2447 devm_kfree(dev, pf->vsi);
2448err_init_pf_unroll:
2449 ice_deinit_pf(pf);
2450 ice_deinit_hw(hw);
2451err_exit_unroll:
2452 pci_disable_pcie_error_reporting(pdev);
2453 return err;
2454}
2455
2456
2457
2458
2459
2460static void ice_remove(struct pci_dev *pdev)
2461{
2462 struct ice_pf *pf = pci_get_drvdata(pdev);
2463 int i;
2464
2465 if (!pf)
2466 return;
2467
2468 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
2469 if (!ice_is_reset_in_progress(pf->state))
2470 break;
2471 msleep(100);
2472 }
2473
2474 set_bit(__ICE_DOWN, pf->state);
2475 ice_service_task_stop(pf);
2476
2477 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2478 ice_free_vfs(pf);
2479 ice_vsi_release_all(pf);
2480 ice_free_irq_msix_misc(pf);
2481 ice_for_each_vsi(pf, i) {
2482 if (!pf->vsi[i])
2483 continue;
2484 ice_vsi_free_q_vectors(pf->vsi[i]);
2485 }
2486 ice_clear_interrupt_scheme(pf);
2487 ice_deinit_pf(pf);
2488 ice_deinit_hw(&pf->hw);
2489 pci_disable_pcie_error_reporting(pdev);
2490}
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500static pci_ers_result_t
2501ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
2502{
2503 struct ice_pf *pf = pci_get_drvdata(pdev);
2504
2505 if (!pf) {
2506 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
2507 __func__, err);
2508 return PCI_ERS_RESULT_DISCONNECT;
2509 }
2510
2511 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
2512 ice_service_task_stop(pf);
2513
2514 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
2515 set_bit(__ICE_PFR_REQ, pf->state);
2516 ice_prepare_for_reset(pf);
2517 }
2518 }
2519
2520 return PCI_ERS_RESULT_NEED_RESET;
2521}
2522
2523
2524
2525
2526
2527
2528
2529
2530static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
2531{
2532 struct ice_pf *pf = pci_get_drvdata(pdev);
2533 pci_ers_result_t result;
2534 int err;
2535 u32 reg;
2536
2537 err = pci_enable_device_mem(pdev);
2538 if (err) {
2539 dev_err(&pdev->dev,
2540 "Cannot re-enable PCI device after reset, error %d\n",
2541 err);
2542 result = PCI_ERS_RESULT_DISCONNECT;
2543 } else {
2544 pci_set_master(pdev);
2545 pci_restore_state(pdev);
2546 pci_save_state(pdev);
2547 pci_wake_from_d3(pdev, false);
2548
2549
2550 reg = rd32(&pf->hw, GLGEN_RTRIG);
2551 if (!reg)
2552 result = PCI_ERS_RESULT_RECOVERED;
2553 else
2554 result = PCI_ERS_RESULT_DISCONNECT;
2555 }
2556
2557 err = pci_cleanup_aer_uncorrect_error_status(pdev);
2558 if (err)
2559 dev_dbg(&pdev->dev,
2560 "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
2561 err);
2562
2563
2564 return result;
2565}
2566
2567
2568
2569
2570
2571
2572
2573
2574static void ice_pci_err_resume(struct pci_dev *pdev)
2575{
2576 struct ice_pf *pf = pci_get_drvdata(pdev);
2577
2578 if (!pf) {
2579 dev_err(&pdev->dev,
2580 "%s failed, device is unrecoverable\n", __func__);
2581 return;
2582 }
2583
2584 if (test_bit(__ICE_SUSPENDED, pf->state)) {
2585 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
2586 __func__);
2587 return;
2588 }
2589
2590 ice_do_reset(pf, ICE_RESET_PFR);
2591 ice_service_task_restart(pf);
2592 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2593}
2594
2595
2596
2597
2598
2599static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
2600{
2601 struct ice_pf *pf = pci_get_drvdata(pdev);
2602
2603 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
2604 ice_service_task_stop(pf);
2605
2606 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
2607 set_bit(__ICE_PFR_REQ, pf->state);
2608 ice_prepare_for_reset(pf);
2609 }
2610 }
2611}
2612
2613
2614
2615
2616
2617static void ice_pci_err_reset_done(struct pci_dev *pdev)
2618{
2619 ice_pci_err_resume(pdev);
2620}
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630static const struct pci_device_id ice_pci_tbl[] = {
2631 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
2632 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
2633 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
2634
2635 { 0, }
2636};
2637MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
2638
2639static const struct pci_error_handlers ice_pci_err_handler = {
2640 .error_detected = ice_pci_err_detected,
2641 .slot_reset = ice_pci_err_slot_reset,
2642 .reset_prepare = ice_pci_err_reset_prepare,
2643 .reset_done = ice_pci_err_reset_done,
2644 .resume = ice_pci_err_resume
2645};
2646
2647static struct pci_driver ice_driver = {
2648 .name = KBUILD_MODNAME,
2649 .id_table = ice_pci_tbl,
2650 .probe = ice_probe,
2651 .remove = ice_remove,
2652 .sriov_configure = ice_sriov_configure,
2653 .err_handler = &ice_pci_err_handler
2654};
2655
2656
2657
2658
2659
2660
2661
2662static int __init ice_module_init(void)
2663{
2664 int status;
2665
2666 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
2667 pr_info("%s\n", ice_copyright);
2668
2669 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
2670 if (!ice_wq) {
2671 pr_err("Failed to create workqueue\n");
2672 return -ENOMEM;
2673 }
2674
2675 status = pci_register_driver(&ice_driver);
2676 if (status) {
2677 pr_err("failed to register PCI driver, err %d\n", status);
2678 destroy_workqueue(ice_wq);
2679 }
2680
2681 return status;
2682}
2683module_init(ice_module_init);
2684
2685
2686
2687
2688
2689
2690
2691static void __exit ice_module_exit(void)
2692{
2693 pci_unregister_driver(&ice_driver);
2694 destroy_workqueue(ice_wq);
2695 pr_info("module unloaded\n");
2696}
2697module_exit(ice_module_exit);
2698
2699
2700
2701
2702
2703
2704
2705
2706static int ice_set_mac_address(struct net_device *netdev, void *pi)
2707{
2708 struct ice_netdev_priv *np = netdev_priv(netdev);
2709 struct ice_vsi *vsi = np->vsi;
2710 struct ice_pf *pf = vsi->back;
2711 struct ice_hw *hw = &pf->hw;
2712 struct sockaddr *addr = pi;
2713 enum ice_status status;
2714 LIST_HEAD(a_mac_list);
2715 LIST_HEAD(r_mac_list);
2716 u8 flags = 0;
2717 int err;
2718 u8 *mac;
2719
2720 mac = (u8 *)addr->sa_data;
2721
2722 if (!is_valid_ether_addr(mac))
2723 return -EADDRNOTAVAIL;
2724
2725 if (ether_addr_equal(netdev->dev_addr, mac)) {
2726 netdev_warn(netdev, "already using mac %pM\n", mac);
2727 return 0;
2728 }
2729
2730 if (test_bit(__ICE_DOWN, pf->state) ||
2731 ice_is_reset_in_progress(pf->state)) {
2732 netdev_err(netdev, "can't set mac %pM. device not ready\n",
2733 mac);
2734 return -EBUSY;
2735 }
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
2748 if (err) {
2749 err = -EADDRNOTAVAIL;
2750 goto free_lists;
2751 }
2752
2753 status = ice_remove_mac(hw, &r_mac_list);
2754 if (status) {
2755 err = -EADDRNOTAVAIL;
2756 goto free_lists;
2757 }
2758
2759 err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
2760 if (err) {
2761 err = -EADDRNOTAVAIL;
2762 goto free_lists;
2763 }
2764
2765 status = ice_add_mac(hw, &a_mac_list);
2766 if (status) {
2767 err = -EADDRNOTAVAIL;
2768 goto free_lists;
2769 }
2770
2771free_lists:
2772
2773 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
2774 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
2775
2776 if (err) {
2777 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
2778 mac);
2779 return err;
2780 }
2781
2782
2783 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2784 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
2785 netdev->dev_addr);
2786
2787
2788 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2789 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
2790 if (status) {
2791 netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n",
2792 mac);
2793 }
2794 return 0;
2795}
2796
2797
2798
2799
2800
2801static void ice_set_rx_mode(struct net_device *netdev)
2802{
2803 struct ice_netdev_priv *np = netdev_priv(netdev);
2804 struct ice_vsi *vsi = np->vsi;
2805
2806 if (!vsi)
2807 return;
2808
2809
2810
2811
2812
2813 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
2814 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
2815 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
2816
2817
2818
2819
2820 ice_service_task_schedule(vsi->back);
2821}
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833static int
2834ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
2835 struct net_device *dev, const unsigned char *addr, u16 vid,
2836 u16 flags, struct netlink_ext_ack __always_unused *extack)
2837{
2838 int err;
2839
2840 if (vid) {
2841 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
2842 return -EINVAL;
2843 }
2844 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
2845 netdev_err(dev, "FDB only supports static addresses\n");
2846 return -EINVAL;
2847 }
2848
2849 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2850 err = dev_uc_add_excl(dev, addr);
2851 else if (is_multicast_ether_addr(addr))
2852 err = dev_mc_add_excl(dev, addr);
2853 else
2854 err = -EINVAL;
2855
2856
2857 if (err == -EEXIST && !(flags & NLM_F_EXCL))
2858 err = 0;
2859
2860 return err;
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871static int
2872ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
2873 struct net_device *dev, const unsigned char *addr,
2874 __always_unused u16 vid)
2875{
2876 int err;
2877
2878 if (ndm->ndm_state & NUD_PERMANENT) {
2879 netdev_err(dev, "FDB only supports static addresses\n");
2880 return -EINVAL;
2881 }
2882
2883 if (is_unicast_ether_addr(addr))
2884 err = dev_uc_del(dev, addr);
2885 else if (is_multicast_ether_addr(addr))
2886 err = dev_mc_del(dev, addr);
2887 else
2888 err = -EINVAL;
2889
2890 return err;
2891}
2892
2893
2894
2895
2896
2897
2898static int
2899ice_set_features(struct net_device *netdev, netdev_features_t features)
2900{
2901 struct ice_netdev_priv *np = netdev_priv(netdev);
2902 struct ice_vsi *vsi = np->vsi;
2903 int ret = 0;
2904
2905
2906
2907
2908 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
2909 ret = ice_vsi_manage_rss_lut(vsi, true);
2910 else if (!(features & NETIF_F_RXHASH) &&
2911 netdev->features & NETIF_F_RXHASH)
2912 ret = ice_vsi_manage_rss_lut(vsi, false);
2913
2914 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
2915 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
2916 ret = ice_vsi_manage_vlan_stripping(vsi, true);
2917 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
2918 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
2919 ret = ice_vsi_manage_vlan_stripping(vsi, false);
2920
2921 if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
2922 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
2923 ret = ice_vsi_manage_vlan_insertion(vsi);
2924 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
2925 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
2926 ret = ice_vsi_manage_vlan_insertion(vsi);
2927
2928 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2929 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2930 ret = ice_cfg_vlan_pruning(vsi, true, false);
2931 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2932 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2933 ret = ice_cfg_vlan_pruning(vsi, false, false);
2934
2935 return ret;
2936}
2937
2938
2939
2940
2941
2942static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
2943{
2944 int ret = 0;
2945
2946 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2947 ret = ice_vsi_manage_vlan_stripping(vsi, true);
2948 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
2949 ret = ice_vsi_manage_vlan_insertion(vsi);
2950
2951 return ret;
2952}
2953
2954
2955
2956
2957
2958
2959
2960int ice_vsi_cfg(struct ice_vsi *vsi)
2961{
2962 int err;
2963
2964 if (vsi->netdev) {
2965 ice_set_rx_mode(vsi->netdev);
2966
2967 err = ice_vsi_vlan_setup(vsi);
2968
2969 if (err)
2970 return err;
2971 }
2972 ice_vsi_cfg_dcb_rings(vsi);
2973
2974 err = ice_vsi_cfg_lan_txqs(vsi);
2975 if (!err)
2976 err = ice_vsi_cfg_rxqs(vsi);
2977
2978 return err;
2979}
2980
2981
2982
2983
2984
2985static void ice_napi_enable_all(struct ice_vsi *vsi)
2986{
2987 int q_idx;
2988
2989 if (!vsi->netdev)
2990 return;
2991
2992 ice_for_each_q_vector(vsi, q_idx) {
2993 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
2994
2995 if (q_vector->rx.ring || q_vector->tx.ring)
2996 napi_enable(&q_vector->napi);
2997 }
2998}
2999
3000
3001
3002
3003
3004
3005
3006static int ice_up_complete(struct ice_vsi *vsi)
3007{
3008 struct ice_pf *pf = vsi->back;
3009 int err;
3010
3011 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3012 ice_vsi_cfg_msix(vsi);
3013 else
3014 return -ENOTSUPP;
3015
3016
3017
3018
3019
3020 err = ice_vsi_start_rx_rings(vsi);
3021 if (err)
3022 return err;
3023
3024 clear_bit(__ICE_DOWN, vsi->state);
3025 ice_napi_enable_all(vsi);
3026 ice_vsi_ena_irq(vsi);
3027
3028 if (vsi->port_info &&
3029 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
3030 vsi->netdev) {
3031 ice_print_link_msg(vsi, true);
3032 netif_tx_start_all_queues(vsi->netdev);
3033 netif_carrier_on(vsi->netdev);
3034 }
3035
3036 ice_service_task_schedule(pf);
3037
3038 return 0;
3039}
3040
3041
3042
3043
3044
3045int ice_up(struct ice_vsi *vsi)
3046{
3047 int err;
3048
3049 err = ice_vsi_cfg(vsi);
3050 if (!err)
3051 err = ice_up_complete(vsi);
3052
3053 return err;
3054}
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065static void
3066ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
3067{
3068 unsigned int start;
3069 *pkts = 0;
3070 *bytes = 0;
3071
3072 if (!ring)
3073 return;
3074 do {
3075 start = u64_stats_fetch_begin_irq(&ring->syncp);
3076 *pkts = ring->stats.pkts;
3077 *bytes = ring->stats.bytes;
3078 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3079}
3080
3081
3082
3083
3084
3085static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
3086{
3087 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
3088 struct ice_ring *ring;
3089 u64 pkts, bytes;
3090 int i;
3091
3092
3093 vsi_stats->tx_packets = 0;
3094 vsi_stats->tx_bytes = 0;
3095 vsi_stats->rx_packets = 0;
3096 vsi_stats->rx_bytes = 0;
3097
3098
3099 vsi->tx_restart = 0;
3100 vsi->tx_busy = 0;
3101 vsi->tx_linearize = 0;
3102 vsi->rx_buf_failed = 0;
3103 vsi->rx_page_failed = 0;
3104
3105 rcu_read_lock();
3106
3107
3108 ice_for_each_txq(vsi, i) {
3109 ring = READ_ONCE(vsi->tx_rings[i]);
3110 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3111 vsi_stats->tx_packets += pkts;
3112 vsi_stats->tx_bytes += bytes;
3113 vsi->tx_restart += ring->tx_stats.restart_q;
3114 vsi->tx_busy += ring->tx_stats.tx_busy;
3115 vsi->tx_linearize += ring->tx_stats.tx_linearize;
3116 }
3117
3118
3119 ice_for_each_rxq(vsi, i) {
3120 ring = READ_ONCE(vsi->rx_rings[i]);
3121 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3122 vsi_stats->rx_packets += pkts;
3123 vsi_stats->rx_bytes += bytes;
3124 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
3125 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
3126 }
3127
3128 rcu_read_unlock();
3129}
3130
3131
3132
3133
3134
3135static void ice_update_vsi_stats(struct ice_vsi *vsi)
3136{
3137 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
3138 struct ice_eth_stats *cur_es = &vsi->eth_stats;
3139 struct ice_pf *pf = vsi->back;
3140
3141 if (test_bit(__ICE_DOWN, vsi->state) ||
3142 test_bit(__ICE_CFG_BUSY, pf->state))
3143 return;
3144
3145
3146 ice_update_vsi_ring_stats(vsi);
3147
3148
3149 ice_update_eth_stats(vsi);
3150
3151 cur_ns->tx_errors = cur_es->tx_errors;
3152 cur_ns->rx_dropped = cur_es->rx_discards;
3153 cur_ns->tx_dropped = cur_es->tx_discards;
3154 cur_ns->multicast = cur_es->rx_multicast;
3155
3156
3157 if (vsi->type == ICE_VSI_PF) {
3158 cur_ns->rx_crc_errors = pf->stats.crc_errors;
3159 cur_ns->rx_errors = pf->stats.crc_errors +
3160 pf->stats.illegal_bytes;
3161 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
3162 }
3163}
3164
3165
3166
3167
3168
3169static void ice_update_pf_stats(struct ice_pf *pf)
3170{
3171 struct ice_hw_port_stats *prev_ps, *cur_ps;
3172 struct ice_hw *hw = &pf->hw;
3173 u8 pf_id;
3174
3175 prev_ps = &pf->stats_prev;
3176 cur_ps = &pf->stats;
3177 pf_id = hw->pf_id;
3178
3179 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
3180 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
3181 &cur_ps->eth.rx_bytes);
3182
3183 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
3184 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
3185 &cur_ps->eth.rx_unicast);
3186
3187 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
3188 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
3189 &cur_ps->eth.rx_multicast);
3190
3191 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
3192 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
3193 &cur_ps->eth.rx_broadcast);
3194
3195 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
3196 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
3197 &cur_ps->eth.tx_bytes);
3198
3199 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
3200 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
3201 &cur_ps->eth.tx_unicast);
3202
3203 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
3204 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
3205 &cur_ps->eth.tx_multicast);
3206
3207 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
3208 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
3209 &cur_ps->eth.tx_broadcast);
3210
3211 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
3212 &prev_ps->tx_dropped_link_down,
3213 &cur_ps->tx_dropped_link_down);
3214
3215 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
3216 pf->stat_prev_loaded, &prev_ps->rx_size_64,
3217 &cur_ps->rx_size_64);
3218
3219 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
3220 pf->stat_prev_loaded, &prev_ps->rx_size_127,
3221 &cur_ps->rx_size_127);
3222
3223 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
3224 pf->stat_prev_loaded, &prev_ps->rx_size_255,
3225 &cur_ps->rx_size_255);
3226
3227 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
3228 pf->stat_prev_loaded, &prev_ps->rx_size_511,
3229 &cur_ps->rx_size_511);
3230
3231 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
3232 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
3233 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
3234
3235 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
3236 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
3237 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
3238
3239 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
3240 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
3241 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
3242
3243 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
3244 pf->stat_prev_loaded, &prev_ps->tx_size_64,
3245 &cur_ps->tx_size_64);
3246
3247 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
3248 pf->stat_prev_loaded, &prev_ps->tx_size_127,
3249 &cur_ps->tx_size_127);
3250
3251 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
3252 pf->stat_prev_loaded, &prev_ps->tx_size_255,
3253 &cur_ps->tx_size_255);
3254
3255 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
3256 pf->stat_prev_loaded, &prev_ps->tx_size_511,
3257 &cur_ps->tx_size_511);
3258
3259 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
3260 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
3261 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
3262
3263 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
3264 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
3265 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
3266
3267 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
3268 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
3269 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
3270
3271 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
3272 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
3273
3274 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
3275 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
3276
3277 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
3278 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
3279
3280 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
3281 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
3282
3283 ice_update_dcb_stats(pf);
3284
3285 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
3286 &prev_ps->crc_errors, &cur_ps->crc_errors);
3287
3288 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
3289 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
3290
3291 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
3292 &prev_ps->mac_local_faults,
3293 &cur_ps->mac_local_faults);
3294
3295 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
3296 &prev_ps->mac_remote_faults,
3297 &cur_ps->mac_remote_faults);
3298
3299 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
3300 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
3301
3302 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
3303 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
3304
3305 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
3306 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
3307
3308 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
3309 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
3310
3311 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
3312 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
3313
3314 pf->stat_prev_loaded = true;
3315}
3316
3317
3318
3319
3320
3321
3322static
3323void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3324{
3325 struct ice_netdev_priv *np = netdev_priv(netdev);
3326 struct rtnl_link_stats64 *vsi_stats;
3327 struct ice_vsi *vsi = np->vsi;
3328
3329 vsi_stats = &vsi->net_stats;
3330
3331 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
3332 return;
3333
3334
3335
3336 ice_update_vsi_ring_stats(vsi);
3337 stats->tx_packets = vsi_stats->tx_packets;
3338 stats->tx_bytes = vsi_stats->tx_bytes;
3339 stats->rx_packets = vsi_stats->rx_packets;
3340 stats->rx_bytes = vsi_stats->rx_bytes;
3341
3342
3343
3344
3345
3346 stats->multicast = vsi_stats->multicast;
3347 stats->tx_errors = vsi_stats->tx_errors;
3348 stats->tx_dropped = vsi_stats->tx_dropped;
3349 stats->rx_errors = vsi_stats->rx_errors;
3350 stats->rx_dropped = vsi_stats->rx_dropped;
3351 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
3352 stats->rx_length_errors = vsi_stats->rx_length_errors;
3353}
3354
3355
3356
3357
3358
3359static void ice_napi_disable_all(struct ice_vsi *vsi)
3360{
3361 int q_idx;
3362
3363 if (!vsi->netdev)
3364 return;
3365
3366 ice_for_each_q_vector(vsi, q_idx) {
3367 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3368
3369 if (q_vector->rx.ring || q_vector->tx.ring)
3370 napi_disable(&q_vector->napi);
3371 }
3372}
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
3387{
3388 struct ice_aqc_get_phy_caps_data *pcaps;
3389 struct ice_aqc_set_phy_cfg_data *cfg;
3390 struct ice_port_info *pi;
3391 struct device *dev;
3392 int retcode;
3393
3394 if (!vsi || !vsi->port_info || !vsi->back)
3395 return -EINVAL;
3396 if (vsi->type != ICE_VSI_PF)
3397 return 0;
3398
3399 dev = &vsi->back->pdev->dev;
3400
3401 pi = vsi->port_info;
3402
3403 pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
3404 if (!pcaps)
3405 return -ENOMEM;
3406
3407 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3408 NULL);
3409 if (retcode) {
3410 dev_err(dev,
3411 "Failed to get phy capabilities, VSI %d error %d\n",
3412 vsi->vsi_num, retcode);
3413 retcode = -EIO;
3414 goto out;
3415 }
3416
3417
3418 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3419 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3420 goto out;
3421
3422 cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
3423 if (!cfg) {
3424 retcode = -ENOMEM;
3425 goto out;
3426 }
3427
3428 cfg->phy_type_low = pcaps->phy_type_low;
3429 cfg->phy_type_high = pcaps->phy_type_high;
3430 cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3431 cfg->low_power_ctrl = pcaps->low_power_ctrl;
3432 cfg->eee_cap = pcaps->eee_cap;
3433 cfg->eeer_value = pcaps->eeer_value;
3434 cfg->link_fec_opt = pcaps->link_fec_options;
3435 if (link_up)
3436 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
3437 else
3438 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
3439
3440 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
3441 if (retcode) {
3442 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
3443 vsi->vsi_num, retcode);
3444 retcode = -EIO;
3445 }
3446
3447 devm_kfree(dev, cfg);
3448out:
3449 devm_kfree(dev, pcaps);
3450 return retcode;
3451}
3452
3453
3454
3455
3456
3457int ice_down(struct ice_vsi *vsi)
3458{
3459 int i, tx_err, rx_err, link_err = 0;
3460
3461
3462
3463
3464 if (vsi->netdev) {
3465 netif_carrier_off(vsi->netdev);
3466 netif_tx_disable(vsi->netdev);
3467 }
3468
3469 ice_vsi_dis_irq(vsi);
3470
3471 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
3472 if (tx_err)
3473 netdev_err(vsi->netdev,
3474 "Failed stop Tx rings, VSI %d error %d\n",
3475 vsi->vsi_num, tx_err);
3476
3477 rx_err = ice_vsi_stop_rx_rings(vsi);
3478 if (rx_err)
3479 netdev_err(vsi->netdev,
3480 "Failed stop Rx rings, VSI %d error %d\n",
3481 vsi->vsi_num, rx_err);
3482
3483 ice_napi_disable_all(vsi);
3484
3485 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
3486 link_err = ice_force_phys_link_state(vsi, false);
3487 if (link_err)
3488 netdev_err(vsi->netdev,
3489 "Failed to set physical link down, VSI %d error %d\n",
3490 vsi->vsi_num, link_err);
3491 }
3492
3493 ice_for_each_txq(vsi, i)
3494 ice_clean_tx_ring(vsi->tx_rings[i]);
3495
3496 ice_for_each_rxq(vsi, i)
3497 ice_clean_rx_ring(vsi->rx_rings[i]);
3498
3499 if (tx_err || rx_err || link_err) {
3500 netdev_err(vsi->netdev,
3501 "Failed to close VSI 0x%04X on switch 0x%04X\n",
3502 vsi->vsi_num, vsi->vsw->sw_id);
3503 return -EIO;
3504 }
3505
3506 return 0;
3507}
3508
3509
3510
3511
3512
3513
3514
3515int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
3516{
3517 int i, err = 0;
3518
3519 if (!vsi->num_txq) {
3520 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
3521 vsi->vsi_num);
3522 return -EINVAL;
3523 }
3524
3525 ice_for_each_txq(vsi, i) {
3526 vsi->tx_rings[i]->netdev = vsi->netdev;
3527 err = ice_setup_tx_ring(vsi->tx_rings[i]);
3528 if (err)
3529 break;
3530 }
3531
3532 return err;
3533}
3534
3535
3536
3537
3538
3539
3540
3541int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
3542{
3543 int i, err = 0;
3544
3545 if (!vsi->num_rxq) {
3546 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
3547 vsi->vsi_num);
3548 return -EINVAL;
3549 }
3550
3551 ice_for_each_rxq(vsi, i) {
3552 vsi->rx_rings[i]->netdev = vsi->netdev;
3553 err = ice_setup_rx_ring(vsi->rx_rings[i]);
3554 if (err)
3555 break;
3556 }
3557
3558 return err;
3559}
3560
3561
3562
3563
3564
3565
3566
3567
3568static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
3569{
3570 struct ice_pf *pf = vsi->back;
3571 int err = -EINVAL;
3572
3573 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3574 err = ice_vsi_req_irq_msix(vsi, basename);
3575
3576 return err;
3577}
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587static int ice_vsi_open(struct ice_vsi *vsi)
3588{
3589 char int_name[ICE_INT_NAME_STR_LEN];
3590 struct ice_pf *pf = vsi->back;
3591 int err;
3592
3593
3594 err = ice_vsi_setup_tx_rings(vsi);
3595 if (err)
3596 goto err_setup_tx;
3597
3598 err = ice_vsi_setup_rx_rings(vsi);
3599 if (err)
3600 goto err_setup_rx;
3601
3602 err = ice_vsi_cfg(vsi);
3603 if (err)
3604 goto err_setup_rx;
3605
3606 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3607 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
3608 err = ice_vsi_req_irq(vsi, int_name);
3609 if (err)
3610 goto err_setup_rx;
3611
3612
3613 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
3614 if (err)
3615 goto err_set_qs;
3616
3617 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
3618 if (err)
3619 goto err_set_qs;
3620
3621 err = ice_up_complete(vsi);
3622 if (err)
3623 goto err_up_complete;
3624
3625 return 0;
3626
3627err_up_complete:
3628 ice_down(vsi);
3629err_set_qs:
3630 ice_vsi_free_irq(vsi);
3631err_setup_rx:
3632 ice_vsi_free_rx_rings(vsi);
3633err_setup_tx:
3634 ice_vsi_free_tx_rings(vsi);
3635
3636 return err;
3637}
3638
3639
3640
3641
3642
3643static void ice_vsi_release_all(struct ice_pf *pf)
3644{
3645 int err, i;
3646
3647 if (!pf->vsi)
3648 return;
3649
3650 ice_for_each_vsi(pf, i) {
3651 if (!pf->vsi[i])
3652 continue;
3653
3654 err = ice_vsi_release(pf->vsi[i]);
3655 if (err)
3656 dev_dbg(&pf->pdev->dev,
3657 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
3658 i, err, pf->vsi[i]->vsi_num);
3659 }
3660}
3661
3662
3663
3664
3665
3666
3667static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
3668{
3669 int err = 0;
3670
3671 if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
3672 return err;
3673
3674 clear_bit(__ICE_NEEDS_RESTART, vsi->state);
3675
3676 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
3677 struct net_device *netd = vsi->netdev;
3678
3679 if (netif_running(vsi->netdev)) {
3680 if (locked) {
3681 err = netd->netdev_ops->ndo_open(netd);
3682 } else {
3683 rtnl_lock();
3684 err = netd->netdev_ops->ndo_open(netd);
3685 rtnl_unlock();
3686 }
3687 } else {
3688 err = ice_vsi_open(vsi);
3689 }
3690 }
3691
3692 return err;
3693}
3694
3695
3696
3697
3698
3699
3700#ifdef CONFIG_DCB
3701int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3702#else
3703static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3704#endif
3705{
3706 int v;
3707
3708 ice_for_each_vsi(pf, v)
3709 if (pf->vsi[v])
3710 if (ice_ena_vsi(pf->vsi[v], locked))
3711 return -EIO;
3712
3713 return 0;
3714}
3715
3716
3717
3718
3719
3720static int ice_vsi_rebuild_all(struct ice_pf *pf)
3721{
3722 int i;
3723
3724
3725 ice_for_each_vsi(pf, i) {
3726 int err;
3727
3728 if (!pf->vsi[i])
3729 continue;
3730
3731 err = ice_vsi_rebuild(pf->vsi[i]);
3732 if (err) {
3733 dev_err(&pf->pdev->dev,
3734 "VSI at index %d rebuild failed\n",
3735 pf->vsi[i]->idx);
3736 return err;
3737 }
3738
3739 dev_info(&pf->pdev->dev,
3740 "VSI at index %d rebuilt. vsi_num = 0x%x\n",
3741 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
3742 }
3743
3744 return 0;
3745}
3746
3747
3748
3749
3750
3751static int ice_vsi_replay_all(struct ice_pf *pf)
3752{
3753 struct ice_hw *hw = &pf->hw;
3754 enum ice_status ret;
3755 int i;
3756
3757
3758 ice_for_each_vsi(pf, i) {
3759 if (!pf->vsi[i])
3760 continue;
3761
3762 ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
3763 if (ret) {
3764 dev_err(&pf->pdev->dev,
3765 "VSI at index %d replay failed %d\n",
3766 pf->vsi[i]->idx, ret);
3767 return -EIO;
3768 }
3769
3770
3771
3772
3773 pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
3774
3775 dev_info(&pf->pdev->dev,
3776 "VSI at index %d filter replayed successfully - vsi_num %i\n",
3777 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
3778 }
3779
3780
3781 ice_replay_post(hw);
3782 return 0;
3783}
3784
3785
3786
3787
3788
3789static void ice_rebuild(struct ice_pf *pf)
3790{
3791 struct device *dev = &pf->pdev->dev;
3792 struct ice_hw *hw = &pf->hw;
3793 enum ice_status ret;
3794 int err, i;
3795
3796 if (test_bit(__ICE_DOWN, pf->state))
3797 goto clear_recovery;
3798
3799 dev_dbg(dev, "rebuilding PF\n");
3800
3801 ret = ice_init_all_ctrlq(hw);
3802 if (ret) {
3803 dev_err(dev, "control queues init failed %d\n", ret);
3804 goto err_init_ctrlq;
3805 }
3806
3807 ret = ice_clear_pf_cfg(hw);
3808 if (ret) {
3809 dev_err(dev, "clear PF configuration failed %d\n", ret);
3810 goto err_init_ctrlq;
3811 }
3812
3813 ice_clear_pxe_mode(hw);
3814
3815 ret = ice_get_caps(hw);
3816 if (ret) {
3817 dev_err(dev, "ice_get_caps failed %d\n", ret);
3818 goto err_init_ctrlq;
3819 }
3820
3821 err = ice_sched_init_port(hw->port_info);
3822 if (err)
3823 goto err_sched_init_port;
3824
3825 ice_dcb_rebuild(pf);
3826
3827 err = ice_vsi_rebuild_all(pf);
3828 if (err) {
3829 dev_err(dev, "ice_vsi_rebuild_all failed\n");
3830 goto err_vsi_rebuild;
3831 }
3832
3833 err = ice_update_link_info(hw->port_info);
3834 if (err)
3835 dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
3836
3837
3838 if (ice_vsi_replay_all(pf)) {
3839 dev_err(&pf->pdev->dev,
3840 "error replaying VSI configurations with switch filter rules\n");
3841 goto err_vsi_rebuild;
3842 }
3843
3844
3845 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
3846 err = ice_req_irq_msix_misc(pf);
3847 if (err) {
3848 dev_err(dev, "misc vector setup failed: %d\n", err);
3849 goto err_vsi_rebuild;
3850 }
3851 }
3852
3853
3854 err = ice_pf_ena_all_vsi(pf, false);
3855 if (err) {
3856 dev_err(&pf->pdev->dev, "error enabling VSIs\n");
3857
3858
3859
3860 goto err_vsi_rebuild;
3861 }
3862
3863 ice_for_each_vsi(pf, i) {
3864 bool link_up;
3865
3866 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
3867 continue;
3868 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
3869 if (link_up) {
3870 netif_carrier_on(pf->vsi[i]->netdev);
3871 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
3872 } else {
3873 netif_carrier_off(pf->vsi[i]->netdev);
3874 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
3875 }
3876 }
3877
3878
3879 clear_bit(__ICE_RESET_FAILED, pf->state);
3880 return;
3881
3882err_vsi_rebuild:
3883 ice_vsi_release_all(pf);
3884err_sched_init_port:
3885 ice_sched_cleanup_all(hw);
3886err_init_ctrlq:
3887 ice_shutdown_all_ctrlq(hw);
3888 set_bit(__ICE_RESET_FAILED, pf->state);
3889clear_recovery:
3890
3891 set_bit(__ICE_NEEDS_RESTART, pf->state);
3892 dev_err(dev, "Rebuild failed, unload and reload driver\n");
3893}
3894
3895
3896
3897
3898
3899
3900
3901
3902static int ice_change_mtu(struct net_device *netdev, int new_mtu)
3903{
3904 struct ice_netdev_priv *np = netdev_priv(netdev);
3905 struct ice_vsi *vsi = np->vsi;
3906 struct ice_pf *pf = vsi->back;
3907 u8 count = 0;
3908
3909 if (new_mtu == netdev->mtu) {
3910 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
3911 return 0;
3912 }
3913
3914 if (new_mtu < netdev->min_mtu) {
3915 netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
3916 netdev->min_mtu);
3917 return -EINVAL;
3918 } else if (new_mtu > netdev->max_mtu) {
3919 netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
3920 netdev->min_mtu);
3921 return -EINVAL;
3922 }
3923
3924 do {
3925 if (ice_is_reset_in_progress(pf->state)) {
3926 count++;
3927 usleep_range(1000, 2000);
3928 } else {
3929 break;
3930 }
3931
3932 } while (count < 100);
3933
3934 if (count == 100) {
3935 netdev_err(netdev, "can't change MTU. Device is busy\n");
3936 return -EBUSY;
3937 }
3938
3939 netdev->mtu = new_mtu;
3940
3941
3942 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
3943 int err;
3944
3945 err = ice_down(vsi);
3946 if (err) {
3947 netdev_err(netdev, "change MTU if_up err %d\n", err);
3948 return err;
3949 }
3950
3951 err = ice_up(vsi);
3952 if (err) {
3953 netdev_err(netdev, "change MTU if_up err %d\n", err);
3954 return err;
3955 }
3956 }
3957
3958 netdev_info(netdev, "changed MTU to %d\n", new_mtu);
3959 return 0;
3960}
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
3972{
3973 struct ice_pf *pf = vsi->back;
3974 struct ice_hw *hw = &pf->hw;
3975 enum ice_status status;
3976
3977 if (seed) {
3978 struct ice_aqc_get_set_rss_keys *buf =
3979 (struct ice_aqc_get_set_rss_keys *)seed;
3980
3981 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
3982
3983 if (status) {
3984 dev_err(&pf->pdev->dev,
3985 "Cannot set RSS key, err %d aq_err %d\n",
3986 status, hw->adminq.rq_last_status);
3987 return -EIO;
3988 }
3989 }
3990
3991 if (lut) {
3992 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
3993 lut, lut_size);
3994 if (status) {
3995 dev_err(&pf->pdev->dev,
3996 "Cannot set RSS lut, err %d aq_err %d\n",
3997 status, hw->adminq.rq_last_status);
3998 return -EIO;
3999 }
4000 }
4001
4002 return 0;
4003}
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
4015{
4016 struct ice_pf *pf = vsi->back;
4017 struct ice_hw *hw = &pf->hw;
4018 enum ice_status status;
4019
4020 if (seed) {
4021 struct ice_aqc_get_set_rss_keys *buf =
4022 (struct ice_aqc_get_set_rss_keys *)seed;
4023
4024 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
4025 if (status) {
4026 dev_err(&pf->pdev->dev,
4027 "Cannot get RSS key, err %d aq_err %d\n",
4028 status, hw->adminq.rq_last_status);
4029 return -EIO;
4030 }
4031 }
4032
4033 if (lut) {
4034 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
4035 lut, lut_size);
4036 if (status) {
4037 dev_err(&pf->pdev->dev,
4038 "Cannot get RSS lut, err %d aq_err %d\n",
4039 status, hw->adminq.rq_last_status);
4040 return -EIO;
4041 }
4042 }
4043
4044 return 0;
4045}
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058static int
4059ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4060 struct net_device *dev, u32 filter_mask, int nlflags)
4061{
4062 struct ice_netdev_priv *np = netdev_priv(dev);
4063 struct ice_vsi *vsi = np->vsi;
4064 struct ice_pf *pf = vsi->back;
4065 u16 bmode;
4066
4067 bmode = pf->first_sw->bridge_mode;
4068
4069 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
4070 filter_mask, NULL);
4071}
4072
4073
4074
4075
4076
4077
4078
4079
4080static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
4081{
4082 struct device *dev = &vsi->back->pdev->dev;
4083 struct ice_aqc_vsi_props *vsi_props;
4084 struct ice_hw *hw = &vsi->back->hw;
4085 struct ice_vsi_ctx *ctxt;
4086 enum ice_status status;
4087 int ret = 0;
4088
4089 vsi_props = &vsi->info;
4090
4091 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
4092 if (!ctxt)
4093 return -ENOMEM;
4094
4095 ctxt->info = vsi->info;
4096
4097 if (bmode == BRIDGE_MODE_VEB)
4098
4099 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
4100 else
4101
4102 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
4103 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
4104
4105 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4106 if (status) {
4107 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
4108 bmode, status, hw->adminq.sq_last_status);
4109 ret = -EIO;
4110 goto out;
4111 }
4112
4113 vsi_props->sw_flags = ctxt->info.sw_flags;
4114
4115out:
4116 devm_kfree(dev, ctxt);
4117 return ret;
4118}
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132static int
4133ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4134 u16 __always_unused flags,
4135 struct netlink_ext_ack __always_unused *extack)
4136{
4137 struct ice_netdev_priv *np = netdev_priv(dev);
4138 struct ice_pf *pf = np->vsi->back;
4139 struct nlattr *attr, *br_spec;
4140 struct ice_hw *hw = &pf->hw;
4141 enum ice_status status;
4142 struct ice_sw *pf_sw;
4143 int rem, v, err = 0;
4144
4145 pf_sw = pf->first_sw;
4146
4147 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4148
4149 nla_for_each_nested(attr, br_spec, rem) {
4150 __u16 mode;
4151
4152 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4153 continue;
4154 mode = nla_get_u16(attr);
4155 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4156 return -EINVAL;
4157
4158 if (mode == pf_sw->bridge_mode)
4159 continue;
4160
4161
4162
4163 ice_for_each_vsi(pf, v) {
4164 if (!pf->vsi[v])
4165 continue;
4166 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
4167 if (err)
4168 return err;
4169 }
4170
4171 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
4172
4173
4174
4175 status = ice_update_sw_rule_bridge_mode(hw);
4176 if (status) {
4177 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
4178 mode, status, hw->adminq.sq_last_status);
4179
4180 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
4181 return -EIO;
4182 }
4183
4184 pf_sw->bridge_mode = mode;
4185 }
4186
4187 return 0;
4188}
4189
4190
4191
4192
4193
4194static void ice_tx_timeout(struct net_device *netdev)
4195{
4196 struct ice_netdev_priv *np = netdev_priv(netdev);
4197 struct ice_ring *tx_ring = NULL;
4198 struct ice_vsi *vsi = np->vsi;
4199 struct ice_pf *pf = vsi->back;
4200 int hung_queue = -1;
4201 u32 i;
4202
4203 pf->tx_timeout_count++;
4204
4205
4206 for (i = 0; i < netdev->num_tx_queues; i++) {
4207 unsigned long trans_start;
4208 struct netdev_queue *q;
4209
4210 q = netdev_get_tx_queue(netdev, i);
4211 trans_start = q->trans_start;
4212 if (netif_xmit_stopped(q) &&
4213 time_after(jiffies,
4214 trans_start + netdev->watchdog_timeo)) {
4215 hung_queue = i;
4216 break;
4217 }
4218 }
4219
4220 if (i == netdev->num_tx_queues)
4221 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
4222 else
4223
4224 for (i = 0; i < vsi->num_txq; i++)
4225 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
4226 if (hung_queue == vsi->tx_rings[i]->q_index) {
4227 tx_ring = vsi->tx_rings[i];
4228 break;
4229 }
4230
4231
4232
4233
4234 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
4235 pf->tx_timeout_recovery_level = 1;
4236 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
4237 netdev->watchdog_timeo)))
4238 return;
4239
4240 if (tx_ring) {
4241 struct ice_hw *hw = &pf->hw;
4242 u32 head, val = 0;
4243
4244 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
4245 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
4246
4247 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
4248 val = rd32(hw,
4249 GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
4250
4251 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
4252 vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
4253 head, tx_ring->next_to_use, val);
4254 }
4255
4256 pf->tx_timeout_last_recovery = jiffies;
4257 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
4258 pf->tx_timeout_recovery_level, hung_queue);
4259
4260 switch (pf->tx_timeout_recovery_level) {
4261 case 1:
4262 set_bit(__ICE_PFR_REQ, pf->state);
4263 break;
4264 case 2:
4265 set_bit(__ICE_CORER_REQ, pf->state);
4266 break;
4267 case 3:
4268 set_bit(__ICE_GLOBR_REQ, pf->state);
4269 break;
4270 default:
4271 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
4272 set_bit(__ICE_DOWN, pf->state);
4273 set_bit(__ICE_NEEDS_RESTART, vsi->state);
4274 set_bit(__ICE_SERVICE_DIS, pf->state);
4275 break;
4276 }
4277
4278 ice_service_task_schedule(pf);
4279 pf->tx_timeout_recovery_level++;
4280}
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294int ice_open(struct net_device *netdev)
4295{
4296 struct ice_netdev_priv *np = netdev_priv(netdev);
4297 struct ice_vsi *vsi = np->vsi;
4298 int err;
4299
4300 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
4301 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
4302 return -EIO;
4303 }
4304
4305 netif_carrier_off(netdev);
4306
4307 err = ice_force_phys_link_state(vsi, true);
4308 if (err) {
4309 netdev_err(netdev,
4310 "Failed to set physical link up, error %d\n", err);
4311 return err;
4312 }
4313
4314 err = ice_vsi_open(vsi);
4315 if (err)
4316 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
4317 vsi->vsi_num, vsi->vsw->sw_id);
4318 return err;
4319}
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331int ice_stop(struct net_device *netdev)
4332{
4333 struct ice_netdev_priv *np = netdev_priv(netdev);
4334 struct ice_vsi *vsi = np->vsi;
4335
4336 ice_vsi_close(vsi);
4337
4338 return 0;
4339}
4340
4341
4342
4343
4344
4345
4346
4347static netdev_features_t
4348ice_features_check(struct sk_buff *skb,
4349 struct net_device __always_unused *netdev,
4350 netdev_features_t features)
4351{
4352 size_t len;
4353
4354
4355
4356
4357
4358 if (skb->ip_summed != CHECKSUM_PARTIAL)
4359 return features;
4360
4361
4362
4363
4364 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4365 features &= ~NETIF_F_GSO_MASK;
4366
4367 len = skb_network_header(skb) - skb->data;
4368 if (len & ~(ICE_TXD_MACLEN_MAX))
4369 goto out_rm_features;
4370
4371 len = skb_transport_header(skb) - skb_network_header(skb);
4372 if (len & ~(ICE_TXD_IPLEN_MAX))
4373 goto out_rm_features;
4374
4375 if (skb->encapsulation) {
4376 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4377 if (len & ~(ICE_TXD_L4LEN_MAX))
4378 goto out_rm_features;
4379
4380 len = skb_inner_transport_header(skb) -
4381 skb_inner_network_header(skb);
4382 if (len & ~(ICE_TXD_IPLEN_MAX))
4383 goto out_rm_features;
4384 }
4385
4386 return features;
4387out_rm_features:
4388 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4389}
4390
4391static const struct net_device_ops ice_netdev_ops = {
4392 .ndo_open = ice_open,
4393 .ndo_stop = ice_stop,
4394 .ndo_start_xmit = ice_start_xmit,
4395 .ndo_features_check = ice_features_check,
4396 .ndo_set_rx_mode = ice_set_rx_mode,
4397 .ndo_set_mac_address = ice_set_mac_address,
4398 .ndo_validate_addr = eth_validate_addr,
4399 .ndo_change_mtu = ice_change_mtu,
4400 .ndo_get_stats64 = ice_get_stats64,
4401 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
4402 .ndo_set_vf_mac = ice_set_vf_mac,
4403 .ndo_get_vf_config = ice_get_vf_cfg,
4404 .ndo_set_vf_trust = ice_set_vf_trust,
4405 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
4406 .ndo_set_vf_link_state = ice_set_vf_link_state,
4407 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
4408 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
4409 .ndo_set_features = ice_set_features,
4410 .ndo_bridge_getlink = ice_bridge_getlink,
4411 .ndo_bridge_setlink = ice_bridge_setlink,
4412 .ndo_fdb_add = ice_fdb_add,
4413 .ndo_fdb_del = ice_fdb_del,
4414 .ndo_tx_timeout = ice_tx_timeout,
4415};
4416