1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include "ice.h"
9#include "ice_lib.h"
10#include "ice_dcb_lib.h"
11
12#define DRV_VERSION "0.7.4-k"
13#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
14const char ice_drv_ver[] = DRV_VERSION;
15static const char ice_driver_string[] = DRV_SUMMARY;
16static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
17
18MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
19MODULE_DESCRIPTION(DRV_SUMMARY);
20MODULE_LICENSE("GPL v2");
21MODULE_VERSION(DRV_VERSION);
22
23static int debug = -1;
24module_param(debug, int, 0644);
25#ifndef CONFIG_DYNAMIC_DEBUG
26MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
27#else
28MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
29#endif
30
31static struct workqueue_struct *ice_wq;
32static const struct net_device_ops ice_netdev_ops;
33
34static void ice_rebuild(struct ice_pf *pf);
35
36static void ice_vsi_release_all(struct ice_pf *pf);
37static void ice_update_vsi_stats(struct ice_vsi *vsi);
38static void ice_update_pf_stats(struct ice_pf *pf);
39
40
41
42
43
44static u32 ice_get_tx_pending(struct ice_ring *ring)
45{
46 u32 head, tail;
47
48 head = ring->next_to_clean;
49 tail = readl(ring->tail);
50
51 if (head != tail)
52 return (head < tail) ?
53 tail - head : (tail + ring->count - head);
54 return 0;
55}
56
57
58
59
60
61static void ice_check_for_hang_subtask(struct ice_pf *pf)
62{
63 struct ice_vsi *vsi = NULL;
64 unsigned int i;
65 u32 v, v_idx;
66 int packets;
67
68 ice_for_each_vsi(pf, v)
69 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
70 vsi = pf->vsi[v];
71 break;
72 }
73
74 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
75 return;
76
77 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
78 return;
79
80 for (i = 0; i < vsi->num_txq; i++) {
81 struct ice_ring *tx_ring = vsi->tx_rings[i];
82
83 if (tx_ring && tx_ring->desc) {
84 int itr = ICE_ITR_NONE;
85
86
87
88
89
90
91
92
93 packets = tx_ring->stats.pkts & INT_MAX;
94 if (tx_ring->tx_stats.prev_pkt == packets) {
95
96 v_idx = tx_ring->q_vector->v_idx;
97 wr32(&vsi->back->hw,
98 GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
99 (itr << GLINT_DYN_CTL_ITR_INDX_S) |
100 GLINT_DYN_CTL_SWINT_TRIG_M |
101 GLINT_DYN_CTL_INTENA_MSK_M);
102 continue;
103 }
104
105
106
107
108 smp_rmb();
109 tx_ring->tx_stats.prev_pkt =
110 ice_get_tx_pending(tx_ring) ? packets : -1;
111 }
112 }
113}
114
115
116
117
118
119
120
121
122
123
124
125static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
126{
127 struct ice_netdev_priv *np = netdev_priv(netdev);
128 struct ice_vsi *vsi = np->vsi;
129
130 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
131 return -EINVAL;
132
133 return 0;
134}
135
136
137
138
139
140
141
142
143
144
145
146static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
147{
148 struct ice_netdev_priv *np = netdev_priv(netdev);
149 struct ice_vsi *vsi = np->vsi;
150
151 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
152 return -EINVAL;
153
154 return 0;
155}
156
157
158
159
160
161
162
163static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
164{
165 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
166 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
167 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
168}
169
170
171
172
173
174
175
176
177static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
178{
179 struct ice_hw *hw = &vsi->back->hw;
180 enum ice_status status = 0;
181
182 if (vsi->type != ICE_VSI_PF)
183 return 0;
184
185 if (vsi->vlan_ena) {
186 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
187 set_promisc);
188 } else {
189 if (set_promisc)
190 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
191 0);
192 else
193 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
194 0);
195 }
196
197 if (status)
198 return -EIO;
199
200 return 0;
201}
202
203
204
205
206
207
208
209static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
210{
211 struct device *dev = &vsi->back->pdev->dev;
212 struct net_device *netdev = vsi->netdev;
213 bool promisc_forced_on = false;
214 struct ice_pf *pf = vsi->back;
215 struct ice_hw *hw = &pf->hw;
216 enum ice_status status = 0;
217 u32 changed_flags = 0;
218 u8 promisc_m;
219 int err = 0;
220
221 if (!vsi->netdev)
222 return -EINVAL;
223
224 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
225 usleep_range(1000, 2000);
226
227 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
228 vsi->current_netdev_flags = vsi->netdev->flags;
229
230 INIT_LIST_HEAD(&vsi->tmp_sync_list);
231 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
232
233 if (ice_vsi_fltr_changed(vsi)) {
234 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
235 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
236 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
237
238
239 netif_addr_lock_bh(netdev);
240 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
241 ice_add_mac_to_unsync_list);
242 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
243 ice_add_mac_to_unsync_list);
244
245 netif_addr_unlock_bh(netdev);
246 }
247
248
249 status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
250 ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
251 if (status) {
252 netdev_err(netdev, "Failed to delete MAC filters\n");
253
254 if (status == ICE_ERR_NO_MEMORY) {
255 err = -ENOMEM;
256 goto out;
257 }
258 }
259
260
261 status = ice_add_mac(hw, &vsi->tmp_sync_list);
262 ice_free_fltr_list(dev, &vsi->tmp_sync_list);
263
264
265
266
267 if (status && status != ICE_ERR_ALREADY_EXISTS) {
268 netdev_err(netdev, "Failed to add MAC filters\n");
269
270
271
272
273 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
274 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
275 vsi->state)) {
276 promisc_forced_on = true;
277 netdev_warn(netdev,
278 "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
279 vsi->vsi_num);
280 } else {
281 err = -EIO;
282 goto out;
283 }
284 }
285
286 if (changed_flags & IFF_ALLMULTI) {
287 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
288 if (vsi->vlan_ena)
289 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
290 else
291 promisc_m = ICE_MCAST_PROMISC_BITS;
292
293 err = ice_cfg_promisc(vsi, promisc_m, true);
294 if (err) {
295 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
296 vsi->vsi_num);
297 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
298 goto out_promisc;
299 }
300 } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
301 if (vsi->vlan_ena)
302 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
303 else
304 promisc_m = ICE_MCAST_PROMISC_BITS;
305
306 err = ice_cfg_promisc(vsi, promisc_m, false);
307 if (err) {
308 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
309 vsi->vsi_num);
310 vsi->current_netdev_flags |= IFF_ALLMULTI;
311 goto out_promisc;
312 }
313 }
314 }
315
316 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
317 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
318 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
319 if (vsi->current_netdev_flags & IFF_PROMISC) {
320
321 status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
322 ICE_FLTR_RX);
323 if (status) {
324 netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
325 vsi->vsi_num);
326 vsi->current_netdev_flags &= ~IFF_PROMISC;
327 err = -EIO;
328 goto out_promisc;
329 }
330 } else {
331
332 status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
333 ICE_FLTR_RX);
334 if (status) {
335 netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
336 vsi->vsi_num);
337 vsi->current_netdev_flags |= IFF_PROMISC;
338 err = -EIO;
339 goto out_promisc;
340 }
341 }
342 }
343 goto exit;
344
345out_promisc:
346 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
347 goto exit;
348out:
349
350 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
351 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
352exit:
353 clear_bit(__ICE_CFG_BUSY, vsi->state);
354 return err;
355}
356
357
358
359
360
361static void ice_sync_fltr_subtask(struct ice_pf *pf)
362{
363 int v;
364
365 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
366 return;
367
368 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
369
370 ice_for_each_vsi(pf, v)
371 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
372 ice_vsi_sync_fltr(pf->vsi[v])) {
373
374 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
375 break;
376 }
377}
378
379
380
381
382
383
384static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
385{
386 if (test_bit(__ICE_DOWN, vsi->state))
387 return;
388
389 set_bit(__ICE_NEEDS_RESTART, vsi->state);
390
391 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
392 if (netif_running(vsi->netdev)) {
393 if (!locked) {
394 rtnl_lock();
395 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
396 rtnl_unlock();
397 } else {
398 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
399 }
400 } else {
401 ice_vsi_close(vsi);
402 }
403 }
404}
405
406
407
408
409
410
411#ifdef CONFIG_DCB
412void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
413#else
414static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
415#endif
416{
417 int v;
418
419 ice_for_each_vsi(pf, v)
420 if (pf->vsi[v])
421 ice_dis_vsi(pf->vsi[v], locked);
422}
423
424
425
426
427
428
429
430static void
431ice_prepare_for_reset(struct ice_pf *pf)
432{
433 struct ice_hw *hw = &pf->hw;
434
435
436 if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
437 return;
438
439
440 if (ice_check_sq_alive(hw, &hw->mailboxq))
441 ice_vc_notify_reset(pf);
442
443
444 ice_pf_dis_all_vsi(pf, false);
445
446 if (hw->port_info)
447 ice_sched_clear_port(hw->port_info);
448
449 ice_shutdown_all_ctrlq(hw);
450
451 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
452}
453
454
455
456
457
458
459
460static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
461{
462 struct device *dev = &pf->pdev->dev;
463 struct ice_hw *hw = &pf->hw;
464
465 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
466 WARN_ON(in_interrupt());
467
468 ice_prepare_for_reset(pf);
469
470
471 if (ice_reset(hw, reset_type)) {
472 dev_err(dev, "reset %d failed\n", reset_type);
473 set_bit(__ICE_RESET_FAILED, pf->state);
474 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
475 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
476 clear_bit(__ICE_PFR_REQ, pf->state);
477 clear_bit(__ICE_CORER_REQ, pf->state);
478 clear_bit(__ICE_GLOBR_REQ, pf->state);
479 return;
480 }
481
482
483
484
485
486 if (reset_type == ICE_RESET_PFR) {
487 pf->pfr_count++;
488 ice_rebuild(pf);
489 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
490 clear_bit(__ICE_PFR_REQ, pf->state);
491 ice_reset_all_vfs(pf, true);
492 }
493}
494
495
496
497
498
499static void ice_reset_subtask(struct ice_pf *pf)
500{
501 enum ice_reset_req reset_type = ICE_RESET_INVAL;
502
503
504
505
506
507
508
509
510
511
512
513 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
514
515 if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
516 reset_type = ICE_RESET_CORER;
517 if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
518 reset_type = ICE_RESET_GLOBR;
519
520 if (reset_type == ICE_RESET_INVAL)
521 return;
522 ice_prepare_for_reset(pf);
523
524
525 if (ice_check_reset(&pf->hw)) {
526 set_bit(__ICE_RESET_FAILED, pf->state);
527 } else {
528
529 pf->hw.reset_ongoing = false;
530 ice_rebuild(pf);
531
532
533
534 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
535 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
536 clear_bit(__ICE_PFR_REQ, pf->state);
537 clear_bit(__ICE_CORER_REQ, pf->state);
538 clear_bit(__ICE_GLOBR_REQ, pf->state);
539 ice_reset_all_vfs(pf, true);
540 }
541
542 return;
543 }
544
545
546 if (test_bit(__ICE_PFR_REQ, pf->state))
547 reset_type = ICE_RESET_PFR;
548 if (test_bit(__ICE_CORER_REQ, pf->state))
549 reset_type = ICE_RESET_CORER;
550 if (test_bit(__ICE_GLOBR_REQ, pf->state))
551 reset_type = ICE_RESET_GLOBR;
552
553 if (reset_type == ICE_RESET_INVAL)
554 return;
555
556
557 if (!test_bit(__ICE_DOWN, pf->state) &&
558 !test_bit(__ICE_CFG_BUSY, pf->state)) {
559 ice_do_reset(pf, reset_type);
560 }
561}
562
563
564
565
566
567
568void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
569{
570 const char *speed;
571 const char *fc;
572
573 if (!vsi)
574 return;
575
576 if (vsi->current_isup == isup)
577 return;
578
579 vsi->current_isup = isup;
580
581 if (!isup) {
582 netdev_info(vsi->netdev, "NIC Link is Down\n");
583 return;
584 }
585
586 switch (vsi->port_info->phy.link_info.link_speed) {
587 case ICE_AQ_LINK_SPEED_40GB:
588 speed = "40 G";
589 break;
590 case ICE_AQ_LINK_SPEED_25GB:
591 speed = "25 G";
592 break;
593 case ICE_AQ_LINK_SPEED_20GB:
594 speed = "20 G";
595 break;
596 case ICE_AQ_LINK_SPEED_10GB:
597 speed = "10 G";
598 break;
599 case ICE_AQ_LINK_SPEED_5GB:
600 speed = "5 G";
601 break;
602 case ICE_AQ_LINK_SPEED_2500MB:
603 speed = "2.5 G";
604 break;
605 case ICE_AQ_LINK_SPEED_1000MB:
606 speed = "1 G";
607 break;
608 case ICE_AQ_LINK_SPEED_100MB:
609 speed = "100 M";
610 break;
611 default:
612 speed = "Unknown";
613 break;
614 }
615
616 switch (vsi->port_info->fc.current_mode) {
617 case ICE_FC_FULL:
618 fc = "RX/TX";
619 break;
620 case ICE_FC_TX_PAUSE:
621 fc = "TX";
622 break;
623 case ICE_FC_RX_PAUSE:
624 fc = "RX";
625 break;
626 case ICE_FC_NONE:
627 fc = "None";
628 break;
629 default:
630 fc = "Unknown";
631 break;
632 }
633
634 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
635 speed, fc);
636}
637
638
639
640
641
642
643static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
644{
645 if (!vsi)
646 return;
647
648 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
649 return;
650
651 if (vsi->type == ICE_VSI_PF) {
652 if (link_up == netif_carrier_ok(vsi->netdev))
653 return;
654
655 if (link_up) {
656 netif_carrier_on(vsi->netdev);
657 netif_tx_wake_all_queues(vsi->netdev);
658 } else {
659 netif_carrier_off(vsi->netdev);
660 netif_tx_stop_all_queues(vsi->netdev);
661 }
662 }
663}
664
665
666
667
668
669
670
671
672
673
674static int
675ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
676 u16 link_speed)
677{
678 struct ice_phy_info *phy_info;
679 struct ice_vsi *vsi;
680 u16 old_link_speed;
681 bool old_link;
682 int result;
683
684 phy_info = &pi->phy;
685 phy_info->link_info_old = phy_info->link_info;
686
687 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
688 old_link_speed = phy_info->link_info_old.link_speed;
689
690
691
692
693 result = ice_update_link_info(pi);
694 if (result)
695 dev_dbg(&pf->pdev->dev,
696 "Failed to update link status and re-enable link events for port %d\n",
697 pi->lport);
698
699
700 if (link_up == old_link && link_speed == old_link_speed)
701 return result;
702
703 vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
704 if (!vsi || !vsi->port_info)
705 return -EINVAL;
706
707 ice_vsi_link_event(vsi, link_up);
708 ice_print_link_msg(vsi, link_up);
709
710 if (pf->num_alloc_vfs)
711 ice_vc_notify_link_state(pf);
712
713 return result;
714}
715
716
717
718
719
720static void ice_watchdog_subtask(struct ice_pf *pf)
721{
722 int i;
723
724
725 if (test_bit(__ICE_DOWN, pf->state) ||
726 test_bit(__ICE_CFG_BUSY, pf->state))
727 return;
728
729
730 if (time_before(jiffies,
731 pf->serv_tmr_prev + pf->serv_tmr_period))
732 return;
733
734 pf->serv_tmr_prev = jiffies;
735
736
737
738
739 ice_update_pf_stats(pf);
740 ice_for_each_vsi(pf, i)
741 if (pf->vsi[i] && pf->vsi[i]->netdev)
742 ice_update_vsi_stats(pf->vsi[i]);
743}
744
745
746
747
748
749
750
751static int ice_init_link_events(struct ice_port_info *pi)
752{
753 u16 mask;
754
755 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
756 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
757
758 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
759 dev_dbg(ice_hw_to_dev(pi->hw),
760 "Failed to set link event mask for port %d\n",
761 pi->lport);
762 return -EIO;
763 }
764
765 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
766 dev_dbg(ice_hw_to_dev(pi->hw),
767 "Failed to enable link events for port %d\n",
768 pi->lport);
769 return -EIO;
770 }
771
772 return 0;
773}
774
775
776
777
778
779
780static int
781ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
782{
783 struct ice_aqc_get_link_status_data *link_data;
784 struct ice_port_info *port_info;
785 int status;
786
787 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
788 port_info = pf->hw.port_info;
789 if (!port_info)
790 return -EINVAL;
791
792 status = ice_link_event(pf, port_info,
793 !!(link_data->link_info & ICE_AQ_LINK_UP),
794 le16_to_cpu(link_data->link_speed));
795 if (status)
796 dev_dbg(&pf->pdev->dev,
797 "Could not process link event, error %d\n", status);
798
799 return status;
800}
801
802
803
804
805
806
807static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
808{
809 struct ice_rq_event_info event;
810 struct ice_hw *hw = &pf->hw;
811 struct ice_ctl_q_info *cq;
812 u16 pending, i = 0;
813 const char *qtype;
814 u32 oldval, val;
815
816
817 if (test_bit(__ICE_RESET_FAILED, pf->state))
818 return 0;
819
820 switch (q_type) {
821 case ICE_CTL_Q_ADMIN:
822 cq = &hw->adminq;
823 qtype = "Admin";
824 break;
825 case ICE_CTL_Q_MAILBOX:
826 cq = &hw->mailboxq;
827 qtype = "Mailbox";
828 break;
829 default:
830 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
831 q_type);
832 return 0;
833 }
834
835
836
837
838 val = rd32(hw, cq->rq.len);
839 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
840 PF_FW_ARQLEN_ARQCRIT_M)) {
841 oldval = val;
842 if (val & PF_FW_ARQLEN_ARQVFE_M)
843 dev_dbg(&pf->pdev->dev,
844 "%s Receive Queue VF Error detected\n", qtype);
845 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
846 dev_dbg(&pf->pdev->dev,
847 "%s Receive Queue Overflow Error detected\n",
848 qtype);
849 }
850 if (val & PF_FW_ARQLEN_ARQCRIT_M)
851 dev_dbg(&pf->pdev->dev,
852 "%s Receive Queue Critical Error detected\n",
853 qtype);
854 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
855 PF_FW_ARQLEN_ARQCRIT_M);
856 if (oldval != val)
857 wr32(hw, cq->rq.len, val);
858 }
859
860 val = rd32(hw, cq->sq.len);
861 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
862 PF_FW_ATQLEN_ATQCRIT_M)) {
863 oldval = val;
864 if (val & PF_FW_ATQLEN_ATQVFE_M)
865 dev_dbg(&pf->pdev->dev,
866 "%s Send Queue VF Error detected\n", qtype);
867 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
868 dev_dbg(&pf->pdev->dev,
869 "%s Send Queue Overflow Error detected\n",
870 qtype);
871 }
872 if (val & PF_FW_ATQLEN_ATQCRIT_M)
873 dev_dbg(&pf->pdev->dev,
874 "%s Send Queue Critical Error detected\n",
875 qtype);
876 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
877 PF_FW_ATQLEN_ATQCRIT_M);
878 if (oldval != val)
879 wr32(hw, cq->sq.len, val);
880 }
881
882 event.buf_len = cq->rq_buf_size;
883 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
884 GFP_KERNEL);
885 if (!event.msg_buf)
886 return 0;
887
888 do {
889 enum ice_status ret;
890 u16 opcode;
891
892 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
893 if (ret == ICE_ERR_AQ_NO_WORK)
894 break;
895 if (ret) {
896 dev_err(&pf->pdev->dev,
897 "%s Receive Queue event error %d\n", qtype,
898 ret);
899 break;
900 }
901
902 opcode = le16_to_cpu(event.desc.opcode);
903
904 switch (opcode) {
905 case ice_aqc_opc_get_link_status:
906 if (ice_handle_link_event(pf, &event))
907 dev_err(&pf->pdev->dev,
908 "Could not handle link event\n");
909 break;
910 case ice_mbx_opc_send_msg_to_pf:
911 ice_vc_process_vf_msg(pf, &event);
912 break;
913 case ice_aqc_opc_fw_logging:
914 ice_output_fw_log(hw, &event.desc, event.msg_buf);
915 break;
916 case ice_aqc_opc_lldp_set_mib_change:
917 ice_dcb_process_lldp_set_mib_change(pf, &event);
918 break;
919 default:
920 dev_dbg(&pf->pdev->dev,
921 "%s Receive Queue unknown event 0x%04x ignored\n",
922 qtype, opcode);
923 break;
924 }
925 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
926
927 devm_kfree(&pf->pdev->dev, event.msg_buf);
928
929 return pending && (i == ICE_DFLT_IRQ_WORK);
930}
931
932
933
934
935
936
937
938
939static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
940{
941 u16 ntu;
942
943 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
944 return cq->rq.next_to_clean != ntu;
945}
946
947
948
949
950
951static void ice_clean_adminq_subtask(struct ice_pf *pf)
952{
953 struct ice_hw *hw = &pf->hw;
954
955 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
956 return;
957
958 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
959 return;
960
961 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
962
963
964
965
966
967
968 if (ice_ctrlq_pending(hw, &hw->adminq))
969 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
970
971 ice_flush(hw);
972}
973
974
975
976
977
978static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
979{
980 struct ice_hw *hw = &pf->hw;
981
982 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
983 return;
984
985 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
986 return;
987
988 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
989
990 if (ice_ctrlq_pending(hw, &hw->mailboxq))
991 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
992
993 ice_flush(hw);
994}
995
996
997
998
999
1000
1001
1002static void ice_service_task_schedule(struct ice_pf *pf)
1003{
1004 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1005 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1006 !test_bit(__ICE_NEEDS_RESTART, pf->state))
1007 queue_work(ice_wq, &pf->serv_task);
1008}
1009
1010
1011
1012
1013
1014static void ice_service_task_complete(struct ice_pf *pf)
1015{
1016 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1017
1018
1019 smp_mb__before_atomic();
1020 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1021}
1022
1023
1024
1025
1026
1027static void ice_service_task_stop(struct ice_pf *pf)
1028{
1029 set_bit(__ICE_SERVICE_DIS, pf->state);
1030
1031 if (pf->serv_tmr.function)
1032 del_timer_sync(&pf->serv_tmr);
1033 if (pf->serv_task.func)
1034 cancel_work_sync(&pf->serv_task);
1035
1036 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1037}
1038
1039
1040
1041
1042
1043
1044
1045static void ice_service_task_restart(struct ice_pf *pf)
1046{
1047 clear_bit(__ICE_SERVICE_DIS, pf->state);
1048 ice_service_task_schedule(pf);
1049}
1050
1051
1052
1053
1054
1055static void ice_service_timer(struct timer_list *t)
1056{
1057 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1058
1059 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1060 ice_service_task_schedule(pf);
1061}
1062
1063
1064
1065
1066
1067
1068
1069static void ice_handle_mdd_event(struct ice_pf *pf)
1070{
1071 struct ice_hw *hw = &pf->hw;
1072 bool mdd_detected = false;
1073 u32 reg;
1074 int i;
1075
1076 if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
1077 return;
1078
1079
1080 reg = rd32(hw, GL_MDET_TX_PQM);
1081 if (reg & GL_MDET_TX_PQM_VALID_M) {
1082 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1083 GL_MDET_TX_PQM_PF_NUM_S;
1084 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1085 GL_MDET_TX_PQM_VF_NUM_S;
1086 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1087 GL_MDET_TX_PQM_MAL_TYPE_S;
1088 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1089 GL_MDET_TX_PQM_QNUM_S);
1090
1091 if (netif_msg_tx_err(pf))
1092 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1093 event, queue, pf_num, vf_num);
1094 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1095 mdd_detected = true;
1096 }
1097
1098 reg = rd32(hw, GL_MDET_TX_TCLAN);
1099 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1100 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1101 GL_MDET_TX_TCLAN_PF_NUM_S;
1102 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1103 GL_MDET_TX_TCLAN_VF_NUM_S;
1104 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1105 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1106 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1107 GL_MDET_TX_TCLAN_QNUM_S);
1108
1109 if (netif_msg_rx_err(pf))
1110 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1111 event, queue, pf_num, vf_num);
1112 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1113 mdd_detected = true;
1114 }
1115
1116 reg = rd32(hw, GL_MDET_RX);
1117 if (reg & GL_MDET_RX_VALID_M) {
1118 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1119 GL_MDET_RX_PF_NUM_S;
1120 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1121 GL_MDET_RX_VF_NUM_S;
1122 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1123 GL_MDET_RX_MAL_TYPE_S;
1124 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1125 GL_MDET_RX_QNUM_S);
1126
1127 if (netif_msg_rx_err(pf))
1128 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1129 event, queue, pf_num, vf_num);
1130 wr32(hw, GL_MDET_RX, 0xffffffff);
1131 mdd_detected = true;
1132 }
1133
1134 if (mdd_detected) {
1135 bool pf_mdd_detected = false;
1136
1137 reg = rd32(hw, PF_MDET_TX_PQM);
1138 if (reg & PF_MDET_TX_PQM_VALID_M) {
1139 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1140 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1141 pf_mdd_detected = true;
1142 }
1143
1144 reg = rd32(hw, PF_MDET_TX_TCLAN);
1145 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1146 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1147 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1148 pf_mdd_detected = true;
1149 }
1150
1151 reg = rd32(hw, PF_MDET_RX);
1152 if (reg & PF_MDET_RX_VALID_M) {
1153 wr32(hw, PF_MDET_RX, 0xFFFF);
1154 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
1155 pf_mdd_detected = true;
1156 }
1157
1158 if (pf_mdd_detected) {
1159 set_bit(__ICE_NEEDS_RESTART, pf->state);
1160 ice_service_task_schedule(pf);
1161 }
1162 }
1163
1164
1165 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
1166 struct ice_vf *vf = &pf->vf[i];
1167
1168 mdd_detected = false;
1169
1170 reg = rd32(hw, VP_MDET_TX_PQM(i));
1171 if (reg & VP_MDET_TX_PQM_VALID_M) {
1172 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1173 mdd_detected = true;
1174 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1175 i);
1176 }
1177
1178 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1179 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1180 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1181 mdd_detected = true;
1182 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1183 i);
1184 }
1185
1186 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1187 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1188 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1189 mdd_detected = true;
1190 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1191 i);
1192 }
1193
1194 reg = rd32(hw, VP_MDET_RX(i));
1195 if (reg & VP_MDET_RX_VALID_M) {
1196 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1197 mdd_detected = true;
1198 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
1199 i);
1200 }
1201
1202 if (mdd_detected) {
1203 vf->num_mdd_events++;
1204 dev_info(&pf->pdev->dev,
1205 "Use PF Control I/F to re-enable the VF\n");
1206 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1207 }
1208 }
1209
1210}
1211
1212
1213
1214
1215
1216static void ice_service_task(struct work_struct *work)
1217{
1218 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
1219 unsigned long start_time = jiffies;
1220
1221
1222
1223
1224 ice_reset_subtask(pf);
1225
1226
1227 if (ice_is_reset_in_progress(pf->state) ||
1228 test_bit(__ICE_SUSPENDED, pf->state) ||
1229 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
1230 ice_service_task_complete(pf);
1231 return;
1232 }
1233
1234 ice_check_for_hang_subtask(pf);
1235 ice_sync_fltr_subtask(pf);
1236 ice_handle_mdd_event(pf);
1237 ice_process_vflr_event(pf);
1238 ice_watchdog_subtask(pf);
1239 ice_clean_adminq_subtask(pf);
1240 ice_clean_mailboxq_subtask(pf);
1241
1242
1243 ice_service_task_complete(pf);
1244
1245
1246
1247
1248
1249 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
1250 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
1251 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1252 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
1253 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1254 mod_timer(&pf->serv_tmr, jiffies);
1255}
1256
1257
1258
1259
1260
1261static void ice_set_ctrlq_len(struct ice_hw *hw)
1262{
1263 hw->adminq.num_rq_entries = ICE_AQ_LEN;
1264 hw->adminq.num_sq_entries = ICE_AQ_LEN;
1265 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1266 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
1267 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
1268 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
1269 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1270 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static void
1282ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1283 const cpumask_t *mask)
1284{
1285 struct ice_q_vector *q_vector =
1286 container_of(notify, struct ice_q_vector, affinity_notify);
1287
1288 cpumask_copy(&q_vector->affinity_mask, mask);
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1300
1301
1302
1303
1304
1305static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1306{
1307 struct ice_pf *pf = vsi->back;
1308 struct ice_hw *hw = &pf->hw;
1309
1310 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1311 int i;
1312
1313 ice_for_each_q_vector(vsi, i)
1314 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
1315 }
1316
1317 ice_flush(hw);
1318 return 0;
1319}
1320
1321
1322
1323
1324
1325
1326static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
1327{
1328 int q_vectors = vsi->num_q_vectors;
1329 struct ice_pf *pf = vsi->back;
1330 int base = vsi->sw_base_vector;
1331 int rx_int_idx = 0;
1332 int tx_int_idx = 0;
1333 int vector, err;
1334 int irq_num;
1335
1336 for (vector = 0; vector < q_vectors; vector++) {
1337 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
1338
1339 irq_num = pf->msix_entries[base + vector].vector;
1340
1341 if (q_vector->tx.ring && q_vector->rx.ring) {
1342 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1343 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
1344 tx_int_idx++;
1345 } else if (q_vector->rx.ring) {
1346 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1347 "%s-%s-%d", basename, "rx", rx_int_idx++);
1348 } else if (q_vector->tx.ring) {
1349 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1350 "%s-%s-%d", basename, "tx", tx_int_idx++);
1351 } else {
1352
1353 continue;
1354 }
1355 err = devm_request_irq(&pf->pdev->dev, irq_num,
1356 vsi->irq_handler, 0,
1357 q_vector->name, q_vector);
1358 if (err) {
1359 netdev_err(vsi->netdev,
1360 "MSIX request_irq failed, error: %d\n", err);
1361 goto free_q_irqs;
1362 }
1363
1364
1365 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
1366 q_vector->affinity_notify.release = ice_irq_affinity_release;
1367 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
1368
1369
1370 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
1371 }
1372
1373 vsi->irqs_ready = true;
1374 return 0;
1375
1376free_q_irqs:
1377 while (vector) {
1378 vector--;
1379 irq_num = pf->msix_entries[base + vector].vector,
1380 irq_set_affinity_notifier(irq_num, NULL);
1381 irq_set_affinity_hint(irq_num, NULL);
1382 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
1383 }
1384 return err;
1385}
1386
1387
1388
1389
1390
1391static void ice_ena_misc_vector(struct ice_pf *pf)
1392{
1393 struct ice_hw *hw = &pf->hw;
1394 u32 val;
1395
1396
1397 wr32(hw, PFINT_OICR_ENA, 0);
1398 rd32(hw, PFINT_OICR);
1399
1400 val = (PFINT_OICR_ECC_ERR_M |
1401 PFINT_OICR_MAL_DETECT_M |
1402 PFINT_OICR_GRST_M |
1403 PFINT_OICR_PCI_EXCEPTION_M |
1404 PFINT_OICR_VFLR_M |
1405 PFINT_OICR_HMC_ERR_M |
1406 PFINT_OICR_PE_CRITERR_M);
1407
1408 wr32(hw, PFINT_OICR_ENA, val);
1409
1410
1411 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
1412 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
1413}
1414
1415
1416
1417
1418
1419
1420static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1421{
1422 struct ice_pf *pf = (struct ice_pf *)data;
1423 struct ice_hw *hw = &pf->hw;
1424 irqreturn_t ret = IRQ_NONE;
1425 u32 oicr, ena_mask;
1426
1427 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1428 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1429
1430 oicr = rd32(hw, PFINT_OICR);
1431 ena_mask = rd32(hw, PFINT_OICR_ENA);
1432
1433 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1434 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
1435 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1436 }
1437 if (oicr & PFINT_OICR_VFLR_M) {
1438 ena_mask &= ~PFINT_OICR_VFLR_M;
1439 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
1440 }
1441
1442 if (oicr & PFINT_OICR_GRST_M) {
1443 u32 reset;
1444
1445
1446 ena_mask &= ~PFINT_OICR_GRST_M;
1447 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
1448 GLGEN_RSTAT_RESET_TYPE_S;
1449
1450 if (reset == ICE_RESET_CORER)
1451 pf->corer_count++;
1452 else if (reset == ICE_RESET_GLOBR)
1453 pf->globr_count++;
1454 else if (reset == ICE_RESET_EMPR)
1455 pf->empr_count++;
1456 else
1457 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
1458 reset);
1459
1460
1461
1462
1463
1464
1465 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
1466 if (reset == ICE_RESET_CORER)
1467 set_bit(__ICE_CORER_RECV, pf->state);
1468 else if (reset == ICE_RESET_GLOBR)
1469 set_bit(__ICE_GLOBR_RECV, pf->state);
1470 else
1471 set_bit(__ICE_EMPR_RECV, pf->state);
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 hw->reset_ongoing = true;
1487 }
1488 }
1489
1490 if (oicr & PFINT_OICR_HMC_ERR_M) {
1491 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
1492 dev_dbg(&pf->pdev->dev,
1493 "HMC Error interrupt - info 0x%x, data 0x%x\n",
1494 rd32(hw, PFHMC_ERRORINFO),
1495 rd32(hw, PFHMC_ERRORDATA));
1496 }
1497
1498
1499 oicr &= ena_mask;
1500 if (oicr) {
1501 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
1502 oicr);
1503
1504
1505
1506 if (oicr & (PFINT_OICR_PE_CRITERR_M |
1507 PFINT_OICR_PCI_EXCEPTION_M |
1508 PFINT_OICR_ECC_ERR_M)) {
1509 set_bit(__ICE_PFR_REQ, pf->state);
1510 ice_service_task_schedule(pf);
1511 }
1512 }
1513 ret = IRQ_HANDLED;
1514
1515 if (!test_bit(__ICE_DOWN, pf->state)) {
1516 ice_service_task_schedule(pf);
1517 ice_irq_dynamic_ena(hw, NULL, NULL);
1518 }
1519
1520 return ret;
1521}
1522
1523
1524
1525
1526
1527static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
1528{
1529
1530 wr32(hw, PFINT_FW_CTL,
1531 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
1532
1533
1534 wr32(hw, PFINT_MBX_CTL,
1535 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
1536
1537
1538 wr32(hw, PFINT_OICR_CTL,
1539 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
1540
1541 ice_flush(hw);
1542}
1543
1544
1545
1546
1547
1548static void ice_free_irq_msix_misc(struct ice_pf *pf)
1549{
1550 struct ice_hw *hw = &pf->hw;
1551
1552 ice_dis_ctrlq_interrupts(hw);
1553
1554
1555 wr32(hw, PFINT_OICR_ENA, 0);
1556 ice_flush(hw);
1557
1558 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
1559 synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
1560 devm_free_irq(&pf->pdev->dev,
1561 pf->msix_entries[pf->sw_oicr_idx].vector, pf);
1562 }
1563
1564 pf->num_avail_sw_msix += 1;
1565 ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
1566 pf->num_avail_hw_msix += 1;
1567 ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
1568}
1569
1570
1571
1572
1573
1574
1575static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
1576{
1577 u32 val;
1578
1579 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
1580 PFINT_OICR_CTL_CAUSE_ENA_M);
1581 wr32(hw, PFINT_OICR_CTL, val);
1582
1583
1584 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
1585 PFINT_FW_CTL_CAUSE_ENA_M);
1586 wr32(hw, PFINT_FW_CTL, val);
1587
1588
1589 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
1590 PFINT_MBX_CTL_CAUSE_ENA_M);
1591 wr32(hw, PFINT_MBX_CTL, val);
1592
1593 ice_flush(hw);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604static int ice_req_irq_msix_misc(struct ice_pf *pf)
1605{
1606 struct ice_hw *hw = &pf->hw;
1607 int oicr_idx, err = 0;
1608
1609 if (!pf->int_name[0])
1610 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
1611 dev_driver_string(&pf->pdev->dev),
1612 dev_name(&pf->pdev->dev));
1613
1614
1615
1616
1617
1618 if (ice_is_reset_in_progress(pf->state))
1619 goto skip_req_irq;
1620
1621
1622 oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1623 if (oicr_idx < 0)
1624 return oicr_idx;
1625
1626 pf->num_avail_sw_msix -= 1;
1627 pf->sw_oicr_idx = oicr_idx;
1628
1629
1630 oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1631 if (oicr_idx < 0) {
1632 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1633 pf->num_avail_sw_msix += 1;
1634 return oicr_idx;
1635 }
1636 pf->num_avail_hw_msix -= 1;
1637 pf->hw_oicr_idx = oicr_idx;
1638
1639 err = devm_request_irq(&pf->pdev->dev,
1640 pf->msix_entries[pf->sw_oicr_idx].vector,
1641 ice_misc_intr, 0, pf->int_name, pf);
1642 if (err) {
1643 dev_err(&pf->pdev->dev,
1644 "devm_request_irq for %s failed: %d\n",
1645 pf->int_name, err);
1646 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1647 pf->num_avail_sw_msix += 1;
1648 ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1649 pf->num_avail_hw_msix += 1;
1650 return err;
1651 }
1652
1653skip_req_irq:
1654 ice_ena_misc_vector(pf);
1655
1656 ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx);
1657 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
1658 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
1659
1660 ice_flush(hw);
1661 ice_irq_dynamic_ena(hw, NULL, NULL);
1662
1663 return 0;
1664}
1665
1666
1667
1668
1669
1670void ice_napi_del(struct ice_vsi *vsi)
1671{
1672 int v_idx;
1673
1674 if (!vsi->netdev)
1675 return;
1676
1677 ice_for_each_q_vector(vsi, v_idx)
1678 netif_napi_del(&vsi->q_vectors[v_idx]->napi);
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static void ice_napi_add(struct ice_vsi *vsi)
1690{
1691 int v_idx;
1692
1693 if (!vsi->netdev)
1694 return;
1695
1696 ice_for_each_q_vector(vsi, v_idx)
1697 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
1698 ice_napi_poll, NAPI_POLL_WEIGHT);
1699}
1700
1701
1702
1703
1704
1705
1706
1707static int ice_cfg_netdev(struct ice_vsi *vsi)
1708{
1709 netdev_features_t csumo_features;
1710 netdev_features_t vlano_features;
1711 netdev_features_t dflt_features;
1712 netdev_features_t tso_features;
1713 struct ice_netdev_priv *np;
1714 struct net_device *netdev;
1715 u8 mac_addr[ETH_ALEN];
1716 int err;
1717
1718 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
1719 vsi->alloc_rxq);
1720 if (!netdev)
1721 return -ENOMEM;
1722
1723 vsi->netdev = netdev;
1724 np = netdev_priv(netdev);
1725 np->vsi = vsi;
1726
1727 dflt_features = NETIF_F_SG |
1728 NETIF_F_HIGHDMA |
1729 NETIF_F_RXHASH;
1730
1731 csumo_features = NETIF_F_RXCSUM |
1732 NETIF_F_IP_CSUM |
1733 NETIF_F_SCTP_CRC |
1734 NETIF_F_IPV6_CSUM;
1735
1736 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
1737 NETIF_F_HW_VLAN_CTAG_TX |
1738 NETIF_F_HW_VLAN_CTAG_RX;
1739
1740 tso_features = NETIF_F_TSO;
1741
1742
1743 netdev->hw_features = dflt_features | csumo_features |
1744 vlano_features | tso_features;
1745
1746
1747 netdev->features |= netdev->hw_features;
1748
1749 netdev->hw_enc_features |= dflt_features | csumo_features |
1750 tso_features;
1751 netdev->vlan_features |= dflt_features | csumo_features |
1752 tso_features;
1753
1754 if (vsi->type == ICE_VSI_PF) {
1755 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
1756 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
1757
1758 ether_addr_copy(netdev->dev_addr, mac_addr);
1759 ether_addr_copy(netdev->perm_addr, mac_addr);
1760 }
1761
1762 netdev->priv_flags |= IFF_UNICAST_FLT;
1763
1764
1765 netdev->netdev_ops = &ice_netdev_ops;
1766
1767
1768 netdev->watchdog_timeo = 5 * HZ;
1769
1770 ice_set_ethtool_ops(netdev);
1771
1772 netdev->min_mtu = ETH_MIN_MTU;
1773 netdev->max_mtu = ICE_MAX_MTU;
1774
1775 err = register_netdev(vsi->netdev);
1776 if (err)
1777 return err;
1778
1779 netif_carrier_off(vsi->netdev);
1780
1781
1782 netif_tx_stop_all_queues(vsi->netdev);
1783
1784 return 0;
1785}
1786
1787
1788
1789
1790
1791
1792
1793void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
1794{
1795 u16 i;
1796
1797 for (i = 0; i < rss_table_size; i++)
1798 lut[i] = i % rss_size;
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static struct ice_vsi *
1810ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
1811{
1812 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823static int
1824ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
1825 u16 vid)
1826{
1827 struct ice_netdev_priv *np = netdev_priv(netdev);
1828 struct ice_vsi *vsi = np->vsi;
1829 int ret;
1830
1831 if (vid >= VLAN_N_VID) {
1832 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
1833 vid, VLAN_N_VID);
1834 return -EINVAL;
1835 }
1836
1837 if (vsi->info.pvid)
1838 return -EINVAL;
1839
1840
1841 if (unlikely(!vid)) {
1842 ret = ice_cfg_vlan_pruning(vsi, true, false);
1843 if (ret)
1844 return ret;
1845 }
1846
1847
1848
1849
1850
1851 ret = ice_vsi_add_vlan(vsi, vid);
1852 if (!ret) {
1853 vsi->vlan_ena = true;
1854 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
1855 }
1856
1857 return ret;
1858}
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868static int
1869ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
1870 u16 vid)
1871{
1872 struct ice_netdev_priv *np = netdev_priv(netdev);
1873 struct ice_vsi *vsi = np->vsi;
1874 int ret;
1875
1876 if (vsi->info.pvid)
1877 return -EINVAL;
1878
1879
1880
1881
1882 ret = ice_vsi_kill_vlan(vsi, vid);
1883 if (ret)
1884 return ret;
1885
1886
1887 if (unlikely(!vid))
1888 ret = ice_cfg_vlan_pruning(vsi, false, false);
1889
1890 vsi->vlan_ena = false;
1891 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
1892 return ret;
1893}
1894
1895
1896
1897
1898
1899
1900
1901static int ice_setup_pf_sw(struct ice_pf *pf)
1902{
1903 LIST_HEAD(tmp_add_list);
1904 u8 broadcast[ETH_ALEN];
1905 struct ice_vsi *vsi;
1906 int status = 0;
1907
1908 if (ice_is_reset_in_progress(pf->state))
1909 return -EBUSY;
1910
1911 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
1912 if (!vsi) {
1913 status = -ENOMEM;
1914 goto unroll_vsi_setup;
1915 }
1916
1917 status = ice_cfg_netdev(vsi);
1918 if (status) {
1919 status = -ENODEV;
1920 goto unroll_vsi_setup;
1921 }
1922
1923
1924
1925
1926
1927 ice_napi_add(vsi);
1928
1929
1930
1931
1932
1933
1934 status = ice_add_mac_to_list(vsi, &tmp_add_list,
1935 vsi->port_info->mac.perm_addr);
1936 if (status)
1937 goto unroll_napi_add;
1938
1939
1940
1941
1942 eth_broadcast_addr(broadcast);
1943 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
1944 if (status)
1945 goto free_mac_list;
1946
1947
1948 status = ice_add_mac(&pf->hw, &tmp_add_list);
1949 if (status) {
1950 dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
1951 status = -ENOMEM;
1952 goto free_mac_list;
1953 }
1954
1955 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1956 return status;
1957
1958free_mac_list:
1959 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1960
1961unroll_napi_add:
1962 if (vsi) {
1963 ice_napi_del(vsi);
1964 if (vsi->netdev) {
1965 if (vsi->netdev->reg_state == NETREG_REGISTERED)
1966 unregister_netdev(vsi->netdev);
1967 free_netdev(vsi->netdev);
1968 vsi->netdev = NULL;
1969 }
1970 }
1971
1972unroll_vsi_setup:
1973 if (vsi) {
1974 ice_vsi_free_q_vectors(vsi);
1975 ice_vsi_delete(vsi);
1976 ice_vsi_put_qs(vsi);
1977 pf->q_left_tx += vsi->alloc_txq;
1978 pf->q_left_rx += vsi->alloc_rxq;
1979 ice_vsi_clear(vsi);
1980 }
1981 return status;
1982}
1983
1984
1985
1986
1987
1988
1989
1990static void ice_determine_q_usage(struct ice_pf *pf)
1991{
1992 u16 q_left_tx, q_left_rx;
1993
1994 q_left_tx = pf->hw.func_caps.common_cap.num_txq;
1995 q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
1996
1997 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
1998
1999
2000 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2001 pf->num_lan_rx = 1;
2002 else
2003 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
2004
2005 pf->q_left_tx = q_left_tx - pf->num_lan_tx;
2006 pf->q_left_rx = q_left_rx - pf->num_lan_rx;
2007}
2008
2009
2010
2011
2012
2013static void ice_deinit_pf(struct ice_pf *pf)
2014{
2015 ice_service_task_stop(pf);
2016 mutex_destroy(&pf->sw_mutex);
2017 mutex_destroy(&pf->avail_q_mutex);
2018}
2019
2020
2021
2022
2023
2024static void ice_init_pf(struct ice_pf *pf)
2025{
2026 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
2027 set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
2028#ifdef CONFIG_PCI_IOV
2029 if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
2030 struct ice_hw *hw = &pf->hw;
2031
2032 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
2033 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
2034 ICE_MAX_VF_COUNT);
2035 }
2036#endif
2037
2038 mutex_init(&pf->sw_mutex);
2039 mutex_init(&pf->avail_q_mutex);
2040
2041
2042 mutex_lock(&pf->avail_q_mutex);
2043 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
2044 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
2045 mutex_unlock(&pf->avail_q_mutex);
2046
2047 if (pf->hw.func_caps.common_cap.rss_table_size)
2048 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
2049
2050
2051 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
2052 pf->serv_tmr_period = HZ;
2053 INIT_WORK(&pf->serv_task, ice_service_task);
2054 clear_bit(__ICE_SERVICE_SCHED, pf->state);
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064static int ice_ena_msix_range(struct ice_pf *pf)
2065{
2066 int v_left, v_actual, v_budget = 0;
2067 int needed, err, i;
2068
2069 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
2070
2071
2072 needed = 1;
2073 v_budget += needed;
2074 v_left -= needed;
2075
2076
2077 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
2078 v_budget += pf->num_lan_msix;
2079 v_left -= pf->num_lan_msix;
2080
2081 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
2082 sizeof(*pf->msix_entries), GFP_KERNEL);
2083
2084 if (!pf->msix_entries) {
2085 err = -ENOMEM;
2086 goto exit_err;
2087 }
2088
2089 for (i = 0; i < v_budget; i++)
2090 pf->msix_entries[i].entry = i;
2091
2092
2093 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
2094 ICE_MIN_MSIX, v_budget);
2095
2096 if (v_actual < 0) {
2097 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
2098 err = v_actual;
2099 goto msix_err;
2100 }
2101
2102 if (v_actual < v_budget) {
2103 dev_warn(&pf->pdev->dev,
2104 "not enough vectors. requested = %d, obtained = %d\n",
2105 v_budget, v_actual);
2106 if (v_actual >= (pf->num_lan_msix + 1)) {
2107 pf->num_avail_sw_msix = v_actual -
2108 (pf->num_lan_msix + 1);
2109 } else if (v_actual >= 2) {
2110 pf->num_lan_msix = 1;
2111 pf->num_avail_sw_msix = v_actual - 2;
2112 } else {
2113 pci_disable_msix(pf->pdev);
2114 err = -ERANGE;
2115 goto msix_err;
2116 }
2117 }
2118
2119 return v_actual;
2120
2121msix_err:
2122 devm_kfree(&pf->pdev->dev, pf->msix_entries);
2123 goto exit_err;
2124
2125exit_err:
2126 pf->num_lan_msix = 0;
2127 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
2128 return err;
2129}
2130
2131
2132
2133
2134
2135static void ice_dis_msix(struct ice_pf *pf)
2136{
2137 pci_disable_msix(pf->pdev);
2138 devm_kfree(&pf->pdev->dev, pf->msix_entries);
2139 pf->msix_entries = NULL;
2140 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
2141}
2142
2143
2144
2145
2146
2147static void ice_clear_interrupt_scheme(struct ice_pf *pf)
2148{
2149 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2150 ice_dis_msix(pf);
2151
2152 if (pf->sw_irq_tracker) {
2153 devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
2154 pf->sw_irq_tracker = NULL;
2155 }
2156
2157 if (pf->hw_irq_tracker) {
2158 devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
2159 pf->hw_irq_tracker = NULL;
2160 }
2161}
2162
2163
2164
2165
2166
2167static int ice_init_interrupt_scheme(struct ice_pf *pf)
2168{
2169 int vectors = 0, hw_vectors = 0;
2170
2171 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2172 vectors = ice_ena_msix_range(pf);
2173 else
2174 return -ENODEV;
2175
2176 if (vectors < 0)
2177 return vectors;
2178
2179
2180 pf->sw_irq_tracker =
2181 devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) +
2182 (sizeof(u16) * vectors), GFP_KERNEL);
2183 if (!pf->sw_irq_tracker) {
2184 ice_dis_msix(pf);
2185 return -ENOMEM;
2186 }
2187
2188
2189 pf->num_avail_sw_msix = vectors;
2190 pf->sw_irq_tracker->num_entries = vectors;
2191
2192
2193 hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
2194 pf->hw_irq_tracker =
2195 devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) +
2196 (sizeof(u16) * hw_vectors), GFP_KERNEL);
2197 if (!pf->hw_irq_tracker) {
2198 ice_clear_interrupt_scheme(pf);
2199 return -ENOMEM;
2200 }
2201
2202
2203 pf->num_avail_hw_msix = hw_vectors;
2204 pf->hw_irq_tracker->num_entries = hw_vectors;
2205
2206 return 0;
2207}
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217static void ice_verify_cacheline_size(struct ice_pf *pf)
2218{
2219 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
2220 dev_warn(&pf->pdev->dev,
2221 "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
2222 ICE_CACHE_LINE_BYTES);
2223}
2224
2225
2226
2227
2228
2229
2230
2231
2232static int
2233ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
2234{
2235 struct device *dev = &pdev->dev;
2236 struct ice_pf *pf;
2237 struct ice_hw *hw;
2238 int err;
2239
2240
2241 err = pcim_enable_device(pdev);
2242 if (err)
2243 return err;
2244
2245 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
2246 if (err) {
2247 dev_err(dev, "BAR0 I/O map error %d\n", err);
2248 return err;
2249 }
2250
2251 pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
2252 if (!pf)
2253 return -ENOMEM;
2254
2255
2256 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2257 if (err)
2258 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2259 if (err) {
2260 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
2261 return err;
2262 }
2263
2264 pci_enable_pcie_error_reporting(pdev);
2265 pci_set_master(pdev);
2266
2267 pf->pdev = pdev;
2268 pci_set_drvdata(pdev, pf);
2269 set_bit(__ICE_DOWN, pf->state);
2270
2271 set_bit(__ICE_SERVICE_DIS, pf->state);
2272
2273 hw = &pf->hw;
2274 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
2275 hw->back = pf;
2276 hw->vendor_id = pdev->vendor;
2277 hw->device_id = pdev->device;
2278 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2279 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2280 hw->subsystem_device_id = pdev->subsystem_device;
2281 hw->bus.device = PCI_SLOT(pdev->devfn);
2282 hw->bus.func = PCI_FUNC(pdev->devfn);
2283 ice_set_ctrlq_len(hw);
2284
2285 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
2286
2287#ifndef CONFIG_DYNAMIC_DEBUG
2288 if (debug < -1)
2289 hw->debug_mask = debug;
2290#endif
2291
2292 err = ice_init_hw(hw);
2293 if (err) {
2294 dev_err(dev, "ice_init_hw failed: %d\n", err);
2295 err = -EIO;
2296 goto err_exit_unroll;
2297 }
2298
2299 dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
2300 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2301 hw->api_maj_ver, hw->api_min_ver);
2302
2303 ice_init_pf(pf);
2304
2305 err = ice_init_pf_dcb(pf);
2306 if (err) {
2307 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
2308 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
2309
2310
2311 err = 0;
2312 }
2313
2314 ice_determine_q_usage(pf);
2315
2316 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
2317 if (!pf->num_alloc_vsi) {
2318 err = -EIO;
2319 goto err_init_pf_unroll;
2320 }
2321
2322 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
2323 GFP_KERNEL);
2324 if (!pf->vsi) {
2325 err = -ENOMEM;
2326 goto err_init_pf_unroll;
2327 }
2328
2329 err = ice_init_interrupt_scheme(pf);
2330 if (err) {
2331 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
2332 err = -EIO;
2333 goto err_init_interrupt_unroll;
2334 }
2335
2336
2337 clear_bit(__ICE_DOWN, pf->state);
2338
2339
2340
2341
2342
2343
2344 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
2345 err = ice_req_irq_msix_misc(pf);
2346 if (err) {
2347 dev_err(dev, "setup of misc vector failed: %d\n", err);
2348 goto err_init_interrupt_unroll;
2349 }
2350 }
2351
2352
2353 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
2354 if (!pf->first_sw) {
2355 err = -ENOMEM;
2356 goto err_msix_misc_unroll;
2357 }
2358
2359 if (hw->evb_veb)
2360 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
2361 else
2362 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
2363
2364 pf->first_sw->pf = pf;
2365
2366
2367 pf->first_sw->sw_id = hw->port_info->sw_id;
2368
2369 err = ice_setup_pf_sw(pf);
2370 if (err) {
2371 dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
2372 goto err_alloc_sw_unroll;
2373 }
2374
2375 clear_bit(__ICE_SERVICE_DIS, pf->state);
2376
2377
2378 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2379
2380 err = ice_init_link_events(pf->hw.port_info);
2381 if (err) {
2382 dev_err(dev, "ice_init_link_events failed: %d\n", err);
2383 goto err_alloc_sw_unroll;
2384 }
2385
2386 ice_verify_cacheline_size(pf);
2387
2388 return 0;
2389
2390err_alloc_sw_unroll:
2391 set_bit(__ICE_SERVICE_DIS, pf->state);
2392 set_bit(__ICE_DOWN, pf->state);
2393 devm_kfree(&pf->pdev->dev, pf->first_sw);
2394err_msix_misc_unroll:
2395 ice_free_irq_msix_misc(pf);
2396err_init_interrupt_unroll:
2397 ice_clear_interrupt_scheme(pf);
2398 devm_kfree(dev, pf->vsi);
2399err_init_pf_unroll:
2400 ice_deinit_pf(pf);
2401 ice_deinit_hw(hw);
2402err_exit_unroll:
2403 pci_disable_pcie_error_reporting(pdev);
2404 return err;
2405}
2406
2407
2408
2409
2410
2411static void ice_remove(struct pci_dev *pdev)
2412{
2413 struct ice_pf *pf = pci_get_drvdata(pdev);
2414 int i;
2415
2416 if (!pf)
2417 return;
2418
2419 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
2420 if (!ice_is_reset_in_progress(pf->state))
2421 break;
2422 msleep(100);
2423 }
2424
2425 set_bit(__ICE_DOWN, pf->state);
2426 ice_service_task_stop(pf);
2427
2428 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2429 ice_free_vfs(pf);
2430 ice_vsi_release_all(pf);
2431 ice_free_irq_msix_misc(pf);
2432 ice_for_each_vsi(pf, i) {
2433 if (!pf->vsi[i])
2434 continue;
2435 ice_vsi_free_q_vectors(pf->vsi[i]);
2436 }
2437 ice_clear_interrupt_scheme(pf);
2438 ice_deinit_pf(pf);
2439 ice_deinit_hw(&pf->hw);
2440 pci_disable_pcie_error_reporting(pdev);
2441}
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451static pci_ers_result_t
2452ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
2453{
2454 struct ice_pf *pf = pci_get_drvdata(pdev);
2455
2456 if (!pf) {
2457 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
2458 __func__, err);
2459 return PCI_ERS_RESULT_DISCONNECT;
2460 }
2461
2462 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
2463 ice_service_task_stop(pf);
2464
2465 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
2466 set_bit(__ICE_PFR_REQ, pf->state);
2467 ice_prepare_for_reset(pf);
2468 }
2469 }
2470
2471 return PCI_ERS_RESULT_NEED_RESET;
2472}
2473
2474
2475
2476
2477
2478
2479
2480
2481static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
2482{
2483 struct ice_pf *pf = pci_get_drvdata(pdev);
2484 pci_ers_result_t result;
2485 int err;
2486 u32 reg;
2487
2488 err = pci_enable_device_mem(pdev);
2489 if (err) {
2490 dev_err(&pdev->dev,
2491 "Cannot re-enable PCI device after reset, error %d\n",
2492 err);
2493 result = PCI_ERS_RESULT_DISCONNECT;
2494 } else {
2495 pci_set_master(pdev);
2496 pci_restore_state(pdev);
2497 pci_save_state(pdev);
2498 pci_wake_from_d3(pdev, false);
2499
2500
2501 reg = rd32(&pf->hw, GLGEN_RTRIG);
2502 if (!reg)
2503 result = PCI_ERS_RESULT_RECOVERED;
2504 else
2505 result = PCI_ERS_RESULT_DISCONNECT;
2506 }
2507
2508 err = pci_cleanup_aer_uncorrect_error_status(pdev);
2509 if (err)
2510 dev_dbg(&pdev->dev,
2511 "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
2512 err);
2513
2514
2515 return result;
2516}
2517
2518
2519
2520
2521
2522
2523
2524
2525static void ice_pci_err_resume(struct pci_dev *pdev)
2526{
2527 struct ice_pf *pf = pci_get_drvdata(pdev);
2528
2529 if (!pf) {
2530 dev_err(&pdev->dev,
2531 "%s failed, device is unrecoverable\n", __func__);
2532 return;
2533 }
2534
2535 if (test_bit(__ICE_SUSPENDED, pf->state)) {
2536 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
2537 __func__);
2538 return;
2539 }
2540
2541 ice_do_reset(pf, ICE_RESET_PFR);
2542 ice_service_task_restart(pf);
2543 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2544}
2545
2546
2547
2548
2549
2550static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
2551{
2552 struct ice_pf *pf = pci_get_drvdata(pdev);
2553
2554 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
2555 ice_service_task_stop(pf);
2556
2557 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
2558 set_bit(__ICE_PFR_REQ, pf->state);
2559 ice_prepare_for_reset(pf);
2560 }
2561 }
2562}
2563
2564
2565
2566
2567
2568static void ice_pci_err_reset_done(struct pci_dev *pdev)
2569{
2570 ice_pci_err_resume(pdev);
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581static const struct pci_device_id ice_pci_tbl[] = {
2582 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
2583 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
2584 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
2585
2586 { 0, }
2587};
2588MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
2589
2590static const struct pci_error_handlers ice_pci_err_handler = {
2591 .error_detected = ice_pci_err_detected,
2592 .slot_reset = ice_pci_err_slot_reset,
2593 .reset_prepare = ice_pci_err_reset_prepare,
2594 .reset_done = ice_pci_err_reset_done,
2595 .resume = ice_pci_err_resume
2596};
2597
2598static struct pci_driver ice_driver = {
2599 .name = KBUILD_MODNAME,
2600 .id_table = ice_pci_tbl,
2601 .probe = ice_probe,
2602 .remove = ice_remove,
2603 .sriov_configure = ice_sriov_configure,
2604 .err_handler = &ice_pci_err_handler
2605};
2606
2607
2608
2609
2610
2611
2612
2613static int __init ice_module_init(void)
2614{
2615 int status;
2616
2617 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
2618 pr_info("%s\n", ice_copyright);
2619
2620 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
2621 if (!ice_wq) {
2622 pr_err("Failed to create workqueue\n");
2623 return -ENOMEM;
2624 }
2625
2626 status = pci_register_driver(&ice_driver);
2627 if (status) {
2628 pr_err("failed to register pci driver, err %d\n", status);
2629 destroy_workqueue(ice_wq);
2630 }
2631
2632 return status;
2633}
2634module_init(ice_module_init);
2635
2636
2637
2638
2639
2640
2641
2642static void __exit ice_module_exit(void)
2643{
2644 pci_unregister_driver(&ice_driver);
2645 destroy_workqueue(ice_wq);
2646 pr_info("module unloaded\n");
2647}
2648module_exit(ice_module_exit);
2649
2650
2651
2652
2653
2654
2655
2656
2657static int ice_set_mac_address(struct net_device *netdev, void *pi)
2658{
2659 struct ice_netdev_priv *np = netdev_priv(netdev);
2660 struct ice_vsi *vsi = np->vsi;
2661 struct ice_pf *pf = vsi->back;
2662 struct ice_hw *hw = &pf->hw;
2663 struct sockaddr *addr = pi;
2664 enum ice_status status;
2665 LIST_HEAD(a_mac_list);
2666 LIST_HEAD(r_mac_list);
2667 u8 flags = 0;
2668 int err;
2669 u8 *mac;
2670
2671 mac = (u8 *)addr->sa_data;
2672
2673 if (!is_valid_ether_addr(mac))
2674 return -EADDRNOTAVAIL;
2675
2676 if (ether_addr_equal(netdev->dev_addr, mac)) {
2677 netdev_warn(netdev, "already using mac %pM\n", mac);
2678 return 0;
2679 }
2680
2681 if (test_bit(__ICE_DOWN, pf->state) ||
2682 ice_is_reset_in_progress(pf->state)) {
2683 netdev_err(netdev, "can't set mac %pM. device not ready\n",
2684 mac);
2685 return -EBUSY;
2686 }
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
2699 if (err) {
2700 err = -EADDRNOTAVAIL;
2701 goto free_lists;
2702 }
2703
2704 status = ice_remove_mac(hw, &r_mac_list);
2705 if (status) {
2706 err = -EADDRNOTAVAIL;
2707 goto free_lists;
2708 }
2709
2710 err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
2711 if (err) {
2712 err = -EADDRNOTAVAIL;
2713 goto free_lists;
2714 }
2715
2716 status = ice_add_mac(hw, &a_mac_list);
2717 if (status) {
2718 err = -EADDRNOTAVAIL;
2719 goto free_lists;
2720 }
2721
2722free_lists:
2723
2724 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
2725 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
2726
2727 if (err) {
2728 netdev_err(netdev, "can't set mac %pM. filter update failed\n",
2729 mac);
2730 return err;
2731 }
2732
2733
2734 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2735 netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
2736 netdev->dev_addr);
2737
2738
2739 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2740 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
2741 if (status) {
2742 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
2743 mac);
2744 }
2745 return 0;
2746}
2747
2748
2749
2750
2751
2752static void ice_set_rx_mode(struct net_device *netdev)
2753{
2754 struct ice_netdev_priv *np = netdev_priv(netdev);
2755 struct ice_vsi *vsi = np->vsi;
2756
2757 if (!vsi)
2758 return;
2759
2760
2761
2762
2763
2764 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
2765 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
2766 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
2767
2768
2769
2770
2771 ice_service_task_schedule(vsi->back);
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784static int
2785ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
2786 struct net_device *dev, const unsigned char *addr, u16 vid,
2787 u16 flags, struct netlink_ext_ack __always_unused *extack)
2788{
2789 int err;
2790
2791 if (vid) {
2792 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
2793 return -EINVAL;
2794 }
2795 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
2796 netdev_err(dev, "FDB only supports static addresses\n");
2797 return -EINVAL;
2798 }
2799
2800 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2801 err = dev_uc_add_excl(dev, addr);
2802 else if (is_multicast_ether_addr(addr))
2803 err = dev_mc_add_excl(dev, addr);
2804 else
2805 err = -EINVAL;
2806
2807
2808 if (err == -EEXIST && !(flags & NLM_F_EXCL))
2809 err = 0;
2810
2811 return err;
2812}
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822static int
2823ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
2824 struct net_device *dev, const unsigned char *addr,
2825 __always_unused u16 vid)
2826{
2827 int err;
2828
2829 if (ndm->ndm_state & NUD_PERMANENT) {
2830 netdev_err(dev, "FDB only supports static addresses\n");
2831 return -EINVAL;
2832 }
2833
2834 if (is_unicast_ether_addr(addr))
2835 err = dev_uc_del(dev, addr);
2836 else if (is_multicast_ether_addr(addr))
2837 err = dev_mc_del(dev, addr);
2838 else
2839 err = -EINVAL;
2840
2841 return err;
2842}
2843
2844
2845
2846
2847
2848
2849static int
2850ice_set_features(struct net_device *netdev, netdev_features_t features)
2851{
2852 struct ice_netdev_priv *np = netdev_priv(netdev);
2853 struct ice_vsi *vsi = np->vsi;
2854 int ret = 0;
2855
2856
2857
2858
2859 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
2860 ret = ice_vsi_manage_rss_lut(vsi, true);
2861 else if (!(features & NETIF_F_RXHASH) &&
2862 netdev->features & NETIF_F_RXHASH)
2863 ret = ice_vsi_manage_rss_lut(vsi, false);
2864
2865 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
2866 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
2867 ret = ice_vsi_manage_vlan_stripping(vsi, true);
2868 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
2869 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
2870 ret = ice_vsi_manage_vlan_stripping(vsi, false);
2871
2872 if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
2873 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
2874 ret = ice_vsi_manage_vlan_insertion(vsi);
2875 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
2876 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
2877 ret = ice_vsi_manage_vlan_insertion(vsi);
2878
2879 return ret;
2880}
2881
2882
2883
2884
2885
2886static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
2887{
2888 int ret = 0;
2889
2890 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2891 ret = ice_vsi_manage_vlan_stripping(vsi, true);
2892 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
2893 ret = ice_vsi_manage_vlan_insertion(vsi);
2894
2895 return ret;
2896}
2897
2898
2899
2900
2901
2902
2903
2904static int ice_vsi_cfg(struct ice_vsi *vsi)
2905{
2906 int err;
2907
2908 if (vsi->netdev) {
2909 ice_set_rx_mode(vsi->netdev);
2910
2911 err = ice_vsi_vlan_setup(vsi);
2912
2913 if (err)
2914 return err;
2915 }
2916 ice_vsi_cfg_dcb_rings(vsi);
2917
2918 err = ice_vsi_cfg_lan_txqs(vsi);
2919 if (!err)
2920 err = ice_vsi_cfg_rxqs(vsi);
2921
2922 return err;
2923}
2924
2925
2926
2927
2928
2929static void ice_napi_enable_all(struct ice_vsi *vsi)
2930{
2931 int q_idx;
2932
2933 if (!vsi->netdev)
2934 return;
2935
2936 ice_for_each_q_vector(vsi, q_idx) {
2937 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
2938
2939 if (q_vector->rx.ring || q_vector->tx.ring)
2940 napi_enable(&q_vector->napi);
2941 }
2942}
2943
2944
2945
2946
2947
2948
2949
2950static int ice_up_complete(struct ice_vsi *vsi)
2951{
2952 struct ice_pf *pf = vsi->back;
2953 int err;
2954
2955 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2956 ice_vsi_cfg_msix(vsi);
2957 else
2958 return -ENOTSUPP;
2959
2960
2961
2962
2963
2964 err = ice_vsi_start_rx_rings(vsi);
2965 if (err)
2966 return err;
2967
2968 clear_bit(__ICE_DOWN, vsi->state);
2969 ice_napi_enable_all(vsi);
2970 ice_vsi_ena_irq(vsi);
2971
2972 if (vsi->port_info &&
2973 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
2974 vsi->netdev) {
2975 ice_print_link_msg(vsi, true);
2976 netif_tx_start_all_queues(vsi->netdev);
2977 netif_carrier_on(vsi->netdev);
2978 }
2979
2980 ice_service_task_schedule(pf);
2981
2982 return 0;
2983}
2984
2985
2986
2987
2988
2989int ice_up(struct ice_vsi *vsi)
2990{
2991 int err;
2992
2993 err = ice_vsi_cfg(vsi);
2994 if (!err)
2995 err = ice_up_complete(vsi);
2996
2997 return err;
2998}
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009static void
3010ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
3011{
3012 unsigned int start;
3013 *pkts = 0;
3014 *bytes = 0;
3015
3016 if (!ring)
3017 return;
3018 do {
3019 start = u64_stats_fetch_begin_irq(&ring->syncp);
3020 *pkts = ring->stats.pkts;
3021 *bytes = ring->stats.bytes;
3022 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3023}
3024
3025
3026
3027
3028
3029static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
3030{
3031 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
3032 struct ice_ring *ring;
3033 u64 pkts, bytes;
3034 int i;
3035
3036
3037 vsi_stats->tx_packets = 0;
3038 vsi_stats->tx_bytes = 0;
3039 vsi_stats->rx_packets = 0;
3040 vsi_stats->rx_bytes = 0;
3041
3042
3043 vsi->tx_restart = 0;
3044 vsi->tx_busy = 0;
3045 vsi->tx_linearize = 0;
3046 vsi->rx_buf_failed = 0;
3047 vsi->rx_page_failed = 0;
3048
3049 rcu_read_lock();
3050
3051
3052 ice_for_each_txq(vsi, i) {
3053 ring = READ_ONCE(vsi->tx_rings[i]);
3054 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3055 vsi_stats->tx_packets += pkts;
3056 vsi_stats->tx_bytes += bytes;
3057 vsi->tx_restart += ring->tx_stats.restart_q;
3058 vsi->tx_busy += ring->tx_stats.tx_busy;
3059 vsi->tx_linearize += ring->tx_stats.tx_linearize;
3060 }
3061
3062
3063 ice_for_each_rxq(vsi, i) {
3064 ring = READ_ONCE(vsi->rx_rings[i]);
3065 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3066 vsi_stats->rx_packets += pkts;
3067 vsi_stats->rx_bytes += bytes;
3068 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
3069 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
3070 }
3071
3072 rcu_read_unlock();
3073}
3074
3075
3076
3077
3078
3079static void ice_update_vsi_stats(struct ice_vsi *vsi)
3080{
3081 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
3082 struct ice_eth_stats *cur_es = &vsi->eth_stats;
3083 struct ice_pf *pf = vsi->back;
3084
3085 if (test_bit(__ICE_DOWN, vsi->state) ||
3086 test_bit(__ICE_CFG_BUSY, pf->state))
3087 return;
3088
3089
3090 ice_update_vsi_ring_stats(vsi);
3091
3092
3093 ice_update_eth_stats(vsi);
3094
3095 cur_ns->tx_errors = cur_es->tx_errors;
3096 cur_ns->rx_dropped = cur_es->rx_discards;
3097 cur_ns->tx_dropped = cur_es->tx_discards;
3098 cur_ns->multicast = cur_es->rx_multicast;
3099
3100
3101 if (vsi->type == ICE_VSI_PF) {
3102 cur_ns->rx_crc_errors = pf->stats.crc_errors;
3103 cur_ns->rx_errors = pf->stats.crc_errors +
3104 pf->stats.illegal_bytes;
3105 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
3106 }
3107}
3108
3109
3110
3111
3112
3113static void ice_update_pf_stats(struct ice_pf *pf)
3114{
3115 struct ice_hw_port_stats *prev_ps, *cur_ps;
3116 struct ice_hw *hw = &pf->hw;
3117 u8 pf_id;
3118
3119 prev_ps = &pf->stats_prev;
3120 cur_ps = &pf->stats;
3121 pf_id = hw->pf_id;
3122
3123 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
3124 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
3125 &cur_ps->eth.rx_bytes);
3126
3127 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
3128 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
3129 &cur_ps->eth.rx_unicast);
3130
3131 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
3132 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
3133 &cur_ps->eth.rx_multicast);
3134
3135 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
3136 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
3137 &cur_ps->eth.rx_broadcast);
3138
3139 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
3140 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
3141 &cur_ps->eth.tx_bytes);
3142
3143 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
3144 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
3145 &cur_ps->eth.tx_unicast);
3146
3147 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
3148 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
3149 &cur_ps->eth.tx_multicast);
3150
3151 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
3152 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
3153 &cur_ps->eth.tx_broadcast);
3154
3155 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
3156 &prev_ps->tx_dropped_link_down,
3157 &cur_ps->tx_dropped_link_down);
3158
3159 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
3160 pf->stat_prev_loaded, &prev_ps->rx_size_64,
3161 &cur_ps->rx_size_64);
3162
3163 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
3164 pf->stat_prev_loaded, &prev_ps->rx_size_127,
3165 &cur_ps->rx_size_127);
3166
3167 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
3168 pf->stat_prev_loaded, &prev_ps->rx_size_255,
3169 &cur_ps->rx_size_255);
3170
3171 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
3172 pf->stat_prev_loaded, &prev_ps->rx_size_511,
3173 &cur_ps->rx_size_511);
3174
3175 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
3176 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
3177 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
3178
3179 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
3180 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
3181 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
3182
3183 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
3184 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
3185 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
3186
3187 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
3188 pf->stat_prev_loaded, &prev_ps->tx_size_64,
3189 &cur_ps->tx_size_64);
3190
3191 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
3192 pf->stat_prev_loaded, &prev_ps->tx_size_127,
3193 &cur_ps->tx_size_127);
3194
3195 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
3196 pf->stat_prev_loaded, &prev_ps->tx_size_255,
3197 &cur_ps->tx_size_255);
3198
3199 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
3200 pf->stat_prev_loaded, &prev_ps->tx_size_511,
3201 &cur_ps->tx_size_511);
3202
3203 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
3204 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
3205 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
3206
3207 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
3208 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
3209 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
3210
3211 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
3212 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
3213 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
3214
3215 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
3216 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
3217
3218 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
3219 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
3220
3221 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
3222 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
3223
3224 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
3225 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
3226
3227 ice_update_dcb_stats(pf);
3228
3229 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
3230 &prev_ps->crc_errors, &cur_ps->crc_errors);
3231
3232 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
3233 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
3234
3235 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
3236 &prev_ps->mac_local_faults,
3237 &cur_ps->mac_local_faults);
3238
3239 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
3240 &prev_ps->mac_remote_faults,
3241 &cur_ps->mac_remote_faults);
3242
3243 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
3244 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
3245
3246 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
3247 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
3248
3249 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
3250 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
3251
3252 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
3253 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
3254
3255 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
3256 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
3257
3258 pf->stat_prev_loaded = true;
3259}
3260
3261
3262
3263
3264
3265
3266static
3267void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3268{
3269 struct ice_netdev_priv *np = netdev_priv(netdev);
3270 struct rtnl_link_stats64 *vsi_stats;
3271 struct ice_vsi *vsi = np->vsi;
3272
3273 vsi_stats = &vsi->net_stats;
3274
3275 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
3276 return;
3277
3278
3279
3280 ice_update_vsi_ring_stats(vsi);
3281 stats->tx_packets = vsi_stats->tx_packets;
3282 stats->tx_bytes = vsi_stats->tx_bytes;
3283 stats->rx_packets = vsi_stats->rx_packets;
3284 stats->rx_bytes = vsi_stats->rx_bytes;
3285
3286
3287
3288
3289
3290 stats->multicast = vsi_stats->multicast;
3291 stats->tx_errors = vsi_stats->tx_errors;
3292 stats->tx_dropped = vsi_stats->tx_dropped;
3293 stats->rx_errors = vsi_stats->rx_errors;
3294 stats->rx_dropped = vsi_stats->rx_dropped;
3295 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
3296 stats->rx_length_errors = vsi_stats->rx_length_errors;
3297}
3298
3299
3300
3301
3302
3303static void ice_napi_disable_all(struct ice_vsi *vsi)
3304{
3305 int q_idx;
3306
3307 if (!vsi->netdev)
3308 return;
3309
3310 ice_for_each_q_vector(vsi, q_idx) {
3311 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3312
3313 if (q_vector->rx.ring || q_vector->tx.ring)
3314 napi_disable(&q_vector->napi);
3315 }
3316}
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
3331{
3332 struct ice_aqc_get_phy_caps_data *pcaps;
3333 struct ice_aqc_set_phy_cfg_data *cfg;
3334 struct ice_port_info *pi;
3335 struct device *dev;
3336 int retcode;
3337
3338 if (!vsi || !vsi->port_info || !vsi->back)
3339 return -EINVAL;
3340 if (vsi->type != ICE_VSI_PF)
3341 return 0;
3342
3343 dev = &vsi->back->pdev->dev;
3344
3345 pi = vsi->port_info;
3346
3347 pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
3348 if (!pcaps)
3349 return -ENOMEM;
3350
3351 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3352 NULL);
3353 if (retcode) {
3354 dev_err(dev,
3355 "Failed to get phy capabilities, VSI %d error %d\n",
3356 vsi->vsi_num, retcode);
3357 retcode = -EIO;
3358 goto out;
3359 }
3360
3361
3362 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3363 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3364 goto out;
3365
3366 cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
3367 if (!cfg) {
3368 retcode = -ENOMEM;
3369 goto out;
3370 }
3371
3372 cfg->phy_type_low = pcaps->phy_type_low;
3373 cfg->phy_type_high = pcaps->phy_type_high;
3374 cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3375 cfg->low_power_ctrl = pcaps->low_power_ctrl;
3376 cfg->eee_cap = pcaps->eee_cap;
3377 cfg->eeer_value = pcaps->eeer_value;
3378 cfg->link_fec_opt = pcaps->link_fec_options;
3379 if (link_up)
3380 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
3381 else
3382 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
3383
3384 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
3385 if (retcode) {
3386 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
3387 vsi->vsi_num, retcode);
3388 retcode = -EIO;
3389 }
3390
3391 devm_kfree(dev, cfg);
3392out:
3393 devm_kfree(dev, pcaps);
3394 return retcode;
3395}
3396
3397
3398
3399
3400
3401int ice_down(struct ice_vsi *vsi)
3402{
3403 int i, tx_err, rx_err, link_err = 0;
3404
3405
3406
3407
3408 if (vsi->netdev) {
3409 netif_carrier_off(vsi->netdev);
3410 netif_tx_disable(vsi->netdev);
3411 }
3412
3413 ice_vsi_dis_irq(vsi);
3414
3415 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
3416 if (tx_err)
3417 netdev_err(vsi->netdev,
3418 "Failed stop Tx rings, VSI %d error %d\n",
3419 vsi->vsi_num, tx_err);
3420
3421 rx_err = ice_vsi_stop_rx_rings(vsi);
3422 if (rx_err)
3423 netdev_err(vsi->netdev,
3424 "Failed stop Rx rings, VSI %d error %d\n",
3425 vsi->vsi_num, rx_err);
3426
3427 ice_napi_disable_all(vsi);
3428
3429 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
3430 link_err = ice_force_phys_link_state(vsi, false);
3431 if (link_err)
3432 netdev_err(vsi->netdev,
3433 "Failed to set physical link down, VSI %d error %d\n",
3434 vsi->vsi_num, link_err);
3435 }
3436
3437 ice_for_each_txq(vsi, i)
3438 ice_clean_tx_ring(vsi->tx_rings[i]);
3439
3440 ice_for_each_rxq(vsi, i)
3441 ice_clean_rx_ring(vsi->rx_rings[i]);
3442
3443 if (tx_err || rx_err || link_err) {
3444 netdev_err(vsi->netdev,
3445 "Failed to close VSI 0x%04X on switch 0x%04X\n",
3446 vsi->vsi_num, vsi->vsw->sw_id);
3447 return -EIO;
3448 }
3449
3450 return 0;
3451}
3452
3453
3454
3455
3456
3457
3458
3459static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
3460{
3461 int i, err = 0;
3462
3463 if (!vsi->num_txq) {
3464 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
3465 vsi->vsi_num);
3466 return -EINVAL;
3467 }
3468
3469 ice_for_each_txq(vsi, i) {
3470 vsi->tx_rings[i]->netdev = vsi->netdev;
3471 err = ice_setup_tx_ring(vsi->tx_rings[i]);
3472 if (err)
3473 break;
3474 }
3475
3476 return err;
3477}
3478
3479
3480
3481
3482
3483
3484
3485static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
3486{
3487 int i, err = 0;
3488
3489 if (!vsi->num_rxq) {
3490 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
3491 vsi->vsi_num);
3492 return -EINVAL;
3493 }
3494
3495 ice_for_each_rxq(vsi, i) {
3496 vsi->rx_rings[i]->netdev = vsi->netdev;
3497 err = ice_setup_rx_ring(vsi->rx_rings[i]);
3498 if (err)
3499 break;
3500 }
3501
3502 return err;
3503}
3504
3505
3506
3507
3508
3509
3510
3511
3512static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
3513{
3514 struct ice_pf *pf = vsi->back;
3515 int err = -EINVAL;
3516
3517 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3518 err = ice_vsi_req_irq_msix(vsi, basename);
3519
3520 return err;
3521}
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531static int ice_vsi_open(struct ice_vsi *vsi)
3532{
3533 char int_name[ICE_INT_NAME_STR_LEN];
3534 struct ice_pf *pf = vsi->back;
3535 int err;
3536
3537
3538 err = ice_vsi_setup_tx_rings(vsi);
3539 if (err)
3540 goto err_setup_tx;
3541
3542 err = ice_vsi_setup_rx_rings(vsi);
3543 if (err)
3544 goto err_setup_rx;
3545
3546 err = ice_vsi_cfg(vsi);
3547 if (err)
3548 goto err_setup_rx;
3549
3550 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3551 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
3552 err = ice_vsi_req_irq(vsi, int_name);
3553 if (err)
3554 goto err_setup_rx;
3555
3556
3557 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
3558 if (err)
3559 goto err_set_qs;
3560
3561 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
3562 if (err)
3563 goto err_set_qs;
3564
3565 err = ice_up_complete(vsi);
3566 if (err)
3567 goto err_up_complete;
3568
3569 return 0;
3570
3571err_up_complete:
3572 ice_down(vsi);
3573err_set_qs:
3574 ice_vsi_free_irq(vsi);
3575err_setup_rx:
3576 ice_vsi_free_rx_rings(vsi);
3577err_setup_tx:
3578 ice_vsi_free_tx_rings(vsi);
3579
3580 return err;
3581}
3582
3583
3584
3585
3586
3587static void ice_vsi_release_all(struct ice_pf *pf)
3588{
3589 int err, i;
3590
3591 if (!pf->vsi)
3592 return;
3593
3594 ice_for_each_vsi(pf, i) {
3595 if (!pf->vsi[i])
3596 continue;
3597
3598 err = ice_vsi_release(pf->vsi[i]);
3599 if (err)
3600 dev_dbg(&pf->pdev->dev,
3601 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
3602 i, err, pf->vsi[i]->vsi_num);
3603 }
3604}
3605
3606
3607
3608
3609
3610
3611static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
3612{
3613 int err = 0;
3614
3615 if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
3616 return err;
3617
3618 clear_bit(__ICE_NEEDS_RESTART, vsi->state);
3619
3620 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
3621 struct net_device *netd = vsi->netdev;
3622
3623 if (netif_running(vsi->netdev)) {
3624 if (locked) {
3625 err = netd->netdev_ops->ndo_open(netd);
3626 } else {
3627 rtnl_lock();
3628 err = netd->netdev_ops->ndo_open(netd);
3629 rtnl_unlock();
3630 }
3631 } else {
3632 err = ice_vsi_open(vsi);
3633 }
3634 }
3635
3636 return err;
3637}
3638
3639
3640
3641
3642
3643
3644#ifdef CONFIG_DCB
3645int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3646#else
3647static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3648#endif
3649{
3650 int v;
3651
3652 ice_for_each_vsi(pf, v)
3653 if (pf->vsi[v])
3654 if (ice_ena_vsi(pf->vsi[v], locked))
3655 return -EIO;
3656
3657 return 0;
3658}
3659
3660
3661
3662
3663
3664static int ice_vsi_rebuild_all(struct ice_pf *pf)
3665{
3666 int i;
3667
3668
3669 ice_for_each_vsi(pf, i) {
3670 int err;
3671
3672 if (!pf->vsi[i])
3673 continue;
3674
3675 err = ice_vsi_rebuild(pf->vsi[i]);
3676 if (err) {
3677 dev_err(&pf->pdev->dev,
3678 "VSI at index %d rebuild failed\n",
3679 pf->vsi[i]->idx);
3680 return err;
3681 }
3682
3683 dev_info(&pf->pdev->dev,
3684 "VSI at index %d rebuilt. vsi_num = 0x%x\n",
3685 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
3686 }
3687
3688 return 0;
3689}
3690
3691
3692
3693
3694
3695static int ice_vsi_replay_all(struct ice_pf *pf)
3696{
3697 struct ice_hw *hw = &pf->hw;
3698 enum ice_status ret;
3699 int i;
3700
3701
3702 ice_for_each_vsi(pf, i) {
3703 if (!pf->vsi[i])
3704 continue;
3705
3706 ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
3707 if (ret) {
3708 dev_err(&pf->pdev->dev,
3709 "VSI at index %d replay failed %d\n",
3710 pf->vsi[i]->idx, ret);
3711 return -EIO;
3712 }
3713
3714
3715
3716
3717 pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
3718
3719 dev_info(&pf->pdev->dev,
3720 "VSI at index %d filter replayed successfully - vsi_num %i\n",
3721 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
3722 }
3723
3724
3725 ice_replay_post(hw);
3726 return 0;
3727}
3728
3729
3730
3731
3732
3733static void ice_rebuild(struct ice_pf *pf)
3734{
3735 struct device *dev = &pf->pdev->dev;
3736 struct ice_hw *hw = &pf->hw;
3737 enum ice_status ret;
3738 int err, i;
3739
3740 if (test_bit(__ICE_DOWN, pf->state))
3741 goto clear_recovery;
3742
3743 dev_dbg(dev, "rebuilding pf\n");
3744
3745 ret = ice_init_all_ctrlq(hw);
3746 if (ret) {
3747 dev_err(dev, "control queues init failed %d\n", ret);
3748 goto err_init_ctrlq;
3749 }
3750
3751 ret = ice_clear_pf_cfg(hw);
3752 if (ret) {
3753 dev_err(dev, "clear PF configuration failed %d\n", ret);
3754 goto err_init_ctrlq;
3755 }
3756
3757 ice_clear_pxe_mode(hw);
3758
3759 ret = ice_get_caps(hw);
3760 if (ret) {
3761 dev_err(dev, "ice_get_caps failed %d\n", ret);
3762 goto err_init_ctrlq;
3763 }
3764
3765 err = ice_sched_init_port(hw->port_info);
3766 if (err)
3767 goto err_sched_init_port;
3768
3769 ice_dcb_rebuild(pf);
3770
3771
3772
3773
3774 pf->sw_irq_tracker->search_hint = 0;
3775 pf->hw_irq_tracker->search_hint = 0;
3776
3777 err = ice_vsi_rebuild_all(pf);
3778 if (err) {
3779 dev_err(dev, "ice_vsi_rebuild_all failed\n");
3780 goto err_vsi_rebuild;
3781 }
3782
3783 err = ice_update_link_info(hw->port_info);
3784 if (err)
3785 dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
3786
3787
3788 if (ice_vsi_replay_all(pf)) {
3789 dev_err(&pf->pdev->dev,
3790 "error replaying VSI configurations with switch filter rules\n");
3791 goto err_vsi_rebuild;
3792 }
3793
3794
3795 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
3796 err = ice_req_irq_msix_misc(pf);
3797 if (err) {
3798 dev_err(dev, "misc vector setup failed: %d\n", err);
3799 goto err_vsi_rebuild;
3800 }
3801 }
3802
3803
3804 err = ice_pf_ena_all_vsi(pf, false);
3805 if (err) {
3806 dev_err(&pf->pdev->dev, "error enabling VSIs\n");
3807
3808
3809
3810 goto err_vsi_rebuild;
3811 }
3812
3813 ice_for_each_vsi(pf, i) {
3814 bool link_up;
3815
3816 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
3817 continue;
3818 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
3819 if (link_up) {
3820 netif_carrier_on(pf->vsi[i]->netdev);
3821 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
3822 } else {
3823 netif_carrier_off(pf->vsi[i]->netdev);
3824 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
3825 }
3826 }
3827
3828
3829 clear_bit(__ICE_RESET_FAILED, pf->state);
3830 return;
3831
3832err_vsi_rebuild:
3833 ice_vsi_release_all(pf);
3834err_sched_init_port:
3835 ice_sched_cleanup_all(hw);
3836err_init_ctrlq:
3837 ice_shutdown_all_ctrlq(hw);
3838 set_bit(__ICE_RESET_FAILED, pf->state);
3839clear_recovery:
3840
3841 set_bit(__ICE_NEEDS_RESTART, pf->state);
3842 dev_err(dev, "Rebuild failed, unload and reload driver\n");
3843}
3844
3845
3846
3847
3848
3849
3850
3851
3852static int ice_change_mtu(struct net_device *netdev, int new_mtu)
3853{
3854 struct ice_netdev_priv *np = netdev_priv(netdev);
3855 struct ice_vsi *vsi = np->vsi;
3856 struct ice_pf *pf = vsi->back;
3857 u8 count = 0;
3858
3859 if (new_mtu == netdev->mtu) {
3860 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
3861 return 0;
3862 }
3863
3864 if (new_mtu < netdev->min_mtu) {
3865 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
3866 netdev->min_mtu);
3867 return -EINVAL;
3868 } else if (new_mtu > netdev->max_mtu) {
3869 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
3870 netdev->min_mtu);
3871 return -EINVAL;
3872 }
3873
3874 do {
3875 if (ice_is_reset_in_progress(pf->state)) {
3876 count++;
3877 usleep_range(1000, 2000);
3878 } else {
3879 break;
3880 }
3881
3882 } while (count < 100);
3883
3884 if (count == 100) {
3885 netdev_err(netdev, "can't change mtu. Device is busy\n");
3886 return -EBUSY;
3887 }
3888
3889 netdev->mtu = new_mtu;
3890
3891
3892 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
3893 int err;
3894
3895 err = ice_down(vsi);
3896 if (err) {
3897 netdev_err(netdev, "change mtu if_up err %d\n", err);
3898 return err;
3899 }
3900
3901 err = ice_up(vsi);
3902 if (err) {
3903 netdev_err(netdev, "change mtu if_up err %d\n", err);
3904 return err;
3905 }
3906 }
3907
3908 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
3909 return 0;
3910}
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
3922{
3923 struct ice_pf *pf = vsi->back;
3924 struct ice_hw *hw = &pf->hw;
3925 enum ice_status status;
3926
3927 if (seed) {
3928 struct ice_aqc_get_set_rss_keys *buf =
3929 (struct ice_aqc_get_set_rss_keys *)seed;
3930
3931 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
3932
3933 if (status) {
3934 dev_err(&pf->pdev->dev,
3935 "Cannot set RSS key, err %d aq_err %d\n",
3936 status, hw->adminq.rq_last_status);
3937 return -EIO;
3938 }
3939 }
3940
3941 if (lut) {
3942 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
3943 lut, lut_size);
3944 if (status) {
3945 dev_err(&pf->pdev->dev,
3946 "Cannot set RSS lut, err %d aq_err %d\n",
3947 status, hw->adminq.rq_last_status);
3948 return -EIO;
3949 }
3950 }
3951
3952 return 0;
3953}
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
3965{
3966 struct ice_pf *pf = vsi->back;
3967 struct ice_hw *hw = &pf->hw;
3968 enum ice_status status;
3969
3970 if (seed) {
3971 struct ice_aqc_get_set_rss_keys *buf =
3972 (struct ice_aqc_get_set_rss_keys *)seed;
3973
3974 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
3975 if (status) {
3976 dev_err(&pf->pdev->dev,
3977 "Cannot get RSS key, err %d aq_err %d\n",
3978 status, hw->adminq.rq_last_status);
3979 return -EIO;
3980 }
3981 }
3982
3983 if (lut) {
3984 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
3985 lut, lut_size);
3986 if (status) {
3987 dev_err(&pf->pdev->dev,
3988 "Cannot get RSS lut, err %d aq_err %d\n",
3989 status, hw->adminq.rq_last_status);
3990 return -EIO;
3991 }
3992 }
3993
3994 return 0;
3995}
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008static int
4009ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4010 struct net_device *dev, u32 filter_mask, int nlflags)
4011{
4012 struct ice_netdev_priv *np = netdev_priv(dev);
4013 struct ice_vsi *vsi = np->vsi;
4014 struct ice_pf *pf = vsi->back;
4015 u16 bmode;
4016
4017 bmode = pf->first_sw->bridge_mode;
4018
4019 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
4020 filter_mask, NULL);
4021}
4022
4023
4024
4025
4026
4027
4028
4029
4030static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
4031{
4032 struct device *dev = &vsi->back->pdev->dev;
4033 struct ice_aqc_vsi_props *vsi_props;
4034 struct ice_hw *hw = &vsi->back->hw;
4035 struct ice_vsi_ctx *ctxt;
4036 enum ice_status status;
4037 int ret = 0;
4038
4039 vsi_props = &vsi->info;
4040
4041 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
4042 if (!ctxt)
4043 return -ENOMEM;
4044
4045 ctxt->info = vsi->info;
4046
4047 if (bmode == BRIDGE_MODE_VEB)
4048
4049 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
4050 else
4051
4052 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
4053 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
4054
4055 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4056 if (status) {
4057 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
4058 bmode, status, hw->adminq.sq_last_status);
4059 ret = -EIO;
4060 goto out;
4061 }
4062
4063 vsi_props->sw_flags = ctxt->info.sw_flags;
4064
4065out:
4066 devm_kfree(dev, ctxt);
4067 return ret;
4068}
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082static int
4083ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4084 u16 __always_unused flags,
4085 struct netlink_ext_ack __always_unused *extack)
4086{
4087 struct ice_netdev_priv *np = netdev_priv(dev);
4088 struct ice_pf *pf = np->vsi->back;
4089 struct nlattr *attr, *br_spec;
4090 struct ice_hw *hw = &pf->hw;
4091 enum ice_status status;
4092 struct ice_sw *pf_sw;
4093 int rem, v, err = 0;
4094
4095 pf_sw = pf->first_sw;
4096
4097 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4098
4099 nla_for_each_nested(attr, br_spec, rem) {
4100 __u16 mode;
4101
4102 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4103 continue;
4104 mode = nla_get_u16(attr);
4105 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4106 return -EINVAL;
4107
4108 if (mode == pf_sw->bridge_mode)
4109 continue;
4110
4111
4112
4113 ice_for_each_vsi(pf, v) {
4114 if (!pf->vsi[v])
4115 continue;
4116 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
4117 if (err)
4118 return err;
4119 }
4120
4121 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
4122
4123
4124
4125 status = ice_update_sw_rule_bridge_mode(hw);
4126 if (status) {
4127 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
4128 mode, status, hw->adminq.sq_last_status);
4129
4130 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
4131 return -EIO;
4132 }
4133
4134 pf_sw->bridge_mode = mode;
4135 }
4136
4137 return 0;
4138}
4139
4140
4141
4142
4143
4144static void ice_tx_timeout(struct net_device *netdev)
4145{
4146 struct ice_netdev_priv *np = netdev_priv(netdev);
4147 struct ice_ring *tx_ring = NULL;
4148 struct ice_vsi *vsi = np->vsi;
4149 struct ice_pf *pf = vsi->back;
4150 int hung_queue = -1;
4151 u32 i;
4152
4153 pf->tx_timeout_count++;
4154
4155
4156 for (i = 0; i < netdev->num_tx_queues; i++) {
4157 unsigned long trans_start;
4158 struct netdev_queue *q;
4159
4160 q = netdev_get_tx_queue(netdev, i);
4161 trans_start = q->trans_start;
4162 if (netif_xmit_stopped(q) &&
4163 time_after(jiffies,
4164 trans_start + netdev->watchdog_timeo)) {
4165 hung_queue = i;
4166 break;
4167 }
4168 }
4169
4170 if (i == netdev->num_tx_queues)
4171 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
4172 else
4173
4174 for (i = 0; i < vsi->num_txq; i++)
4175 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
4176 if (hung_queue == vsi->tx_rings[i]->q_index) {
4177 tx_ring = vsi->tx_rings[i];
4178 break;
4179 }
4180
4181
4182
4183
4184 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
4185 pf->tx_timeout_recovery_level = 1;
4186 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
4187 netdev->watchdog_timeo)))
4188 return;
4189
4190 if (tx_ring) {
4191 struct ice_hw *hw = &pf->hw;
4192 u32 head, val = 0;
4193
4194 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
4195 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
4196
4197 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
4198 val = rd32(hw,
4199 GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
4200
4201 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
4202 vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
4203 head, tx_ring->next_to_use, val);
4204 }
4205
4206 pf->tx_timeout_last_recovery = jiffies;
4207 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
4208 pf->tx_timeout_recovery_level, hung_queue);
4209
4210 switch (pf->tx_timeout_recovery_level) {
4211 case 1:
4212 set_bit(__ICE_PFR_REQ, pf->state);
4213 break;
4214 case 2:
4215 set_bit(__ICE_CORER_REQ, pf->state);
4216 break;
4217 case 3:
4218 set_bit(__ICE_GLOBR_REQ, pf->state);
4219 break;
4220 default:
4221 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
4222 set_bit(__ICE_DOWN, pf->state);
4223 set_bit(__ICE_NEEDS_RESTART, vsi->state);
4224 set_bit(__ICE_SERVICE_DIS, pf->state);
4225 break;
4226 }
4227
4228 ice_service_task_schedule(pf);
4229 pf->tx_timeout_recovery_level++;
4230}
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244static int ice_open(struct net_device *netdev)
4245{
4246 struct ice_netdev_priv *np = netdev_priv(netdev);
4247 struct ice_vsi *vsi = np->vsi;
4248 int err;
4249
4250 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
4251 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
4252 return -EIO;
4253 }
4254
4255 netif_carrier_off(netdev);
4256
4257 err = ice_force_phys_link_state(vsi, true);
4258 if (err) {
4259 netdev_err(netdev,
4260 "Failed to set physical link up, error %d\n", err);
4261 return err;
4262 }
4263
4264 err = ice_vsi_open(vsi);
4265 if (err)
4266 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
4267 vsi->vsi_num, vsi->vsw->sw_id);
4268 return err;
4269}
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281static int ice_stop(struct net_device *netdev)
4282{
4283 struct ice_netdev_priv *np = netdev_priv(netdev);
4284 struct ice_vsi *vsi = np->vsi;
4285
4286 ice_vsi_close(vsi);
4287
4288 return 0;
4289}
4290
4291
4292
4293
4294
4295
4296
4297static netdev_features_t
4298ice_features_check(struct sk_buff *skb,
4299 struct net_device __always_unused *netdev,
4300 netdev_features_t features)
4301{
4302 size_t len;
4303
4304
4305
4306
4307
4308 if (skb->ip_summed != CHECKSUM_PARTIAL)
4309 return features;
4310
4311
4312
4313
4314 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4315 features &= ~NETIF_F_GSO_MASK;
4316
4317 len = skb_network_header(skb) - skb->data;
4318 if (len & ~(ICE_TXD_MACLEN_MAX))
4319 goto out_rm_features;
4320
4321 len = skb_transport_header(skb) - skb_network_header(skb);
4322 if (len & ~(ICE_TXD_IPLEN_MAX))
4323 goto out_rm_features;
4324
4325 if (skb->encapsulation) {
4326 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4327 if (len & ~(ICE_TXD_L4LEN_MAX))
4328 goto out_rm_features;
4329
4330 len = skb_inner_transport_header(skb) -
4331 skb_inner_network_header(skb);
4332 if (len & ~(ICE_TXD_IPLEN_MAX))
4333 goto out_rm_features;
4334 }
4335
4336 return features;
4337out_rm_features:
4338 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4339}
4340
4341static const struct net_device_ops ice_netdev_ops = {
4342 .ndo_open = ice_open,
4343 .ndo_stop = ice_stop,
4344 .ndo_start_xmit = ice_start_xmit,
4345 .ndo_features_check = ice_features_check,
4346 .ndo_set_rx_mode = ice_set_rx_mode,
4347 .ndo_set_mac_address = ice_set_mac_address,
4348 .ndo_validate_addr = eth_validate_addr,
4349 .ndo_change_mtu = ice_change_mtu,
4350 .ndo_get_stats64 = ice_get_stats64,
4351 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
4352 .ndo_set_vf_mac = ice_set_vf_mac,
4353 .ndo_get_vf_config = ice_get_vf_cfg,
4354 .ndo_set_vf_trust = ice_set_vf_trust,
4355 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
4356 .ndo_set_vf_link_state = ice_set_vf_link_state,
4357 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
4358 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
4359 .ndo_set_features = ice_set_features,
4360 .ndo_bridge_getlink = ice_bridge_getlink,
4361 .ndo_bridge_setlink = ice_bridge_setlink,
4362 .ndo_fdb_add = ice_fdb_add,
4363 .ndo_fdb_del = ice_fdb_del,
4364 .ndo_tx_timeout = ice_tx_timeout,
4365};
4366