1
2
3
4#include "iavf.h"
5#include "iavf_prototype.h"
6#include "iavf_client.h"
7
8
9
10
11#define CREATE_TRACE_POINTS
12#include "iavf_trace.h"
13
14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16static int iavf_close(struct net_device *netdev);
17
18char iavf_driver_name[] = "iavf";
19static const char iavf_driver_string[] =
20 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
21
22#define DRV_KERN "-k"
23
24#define DRV_VERSION_MAJOR 3
25#define DRV_VERSION_MINOR 2
26#define DRV_VERSION_BUILD 3
27#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
28 __stringify(DRV_VERSION_MINOR) "." \
29 __stringify(DRV_VERSION_BUILD) \
30 DRV_KERN
31const char iavf_driver_version[] = DRV_VERSION;
32static const char iavf_copyright[] =
33 "Copyright (c) 2013 - 2018 Intel Corporation.";
34
35
36
37
38
39
40
41
42
43static const struct pci_device_id iavf_pci_tbl[] = {
44 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
45 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
46 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
47 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
48
49 {0, }
50};
51
52MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
53
54MODULE_ALIAS("i40evf");
55MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
56MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
57MODULE_LICENSE("GPL v2");
58MODULE_VERSION(DRV_VERSION);
59
60static struct workqueue_struct *iavf_wq;
61
62
63
64
65
66
67
68
69iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
70 struct iavf_dma_mem *mem,
71 u64 size, u32 alignment)
72{
73 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
74
75 if (!mem)
76 return I40E_ERR_PARAM;
77
78 mem->size = ALIGN(size, alignment);
79 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
80 (dma_addr_t *)&mem->pa, GFP_KERNEL);
81 if (mem->va)
82 return 0;
83 else
84 return I40E_ERR_NO_MEMORY;
85}
86
87
88
89
90
91
92iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
93{
94 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
95
96 if (!mem || !mem->va)
97 return I40E_ERR_PARAM;
98 dma_free_coherent(&adapter->pdev->dev, mem->size,
99 mem->va, (dma_addr_t)mem->pa);
100 return 0;
101}
102
103
104
105
106
107
108
109iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
110 struct iavf_virt_mem *mem, u32 size)
111{
112 if (!mem)
113 return I40E_ERR_PARAM;
114
115 mem->size = size;
116 mem->va = kzalloc(size, GFP_KERNEL);
117
118 if (mem->va)
119 return 0;
120 else
121 return I40E_ERR_NO_MEMORY;
122}
123
124
125
126
127
128
129iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
130{
131 if (!mem)
132 return I40E_ERR_PARAM;
133
134
135 kfree(mem->va);
136
137 return 0;
138}
139
140
141
142
143
144
145
146void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
147{
148 char buf[512];
149 va_list argptr;
150
151 if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
152 return;
153
154 va_start(argptr, fmt_str);
155 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
156 va_end(argptr);
157
158
159 pr_info("%s", buf);
160}
161
162
163
164
165
166void iavf_schedule_reset(struct iavf_adapter *adapter)
167{
168 if (!(adapter->flags &
169 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
170 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
171 schedule_work(&adapter->reset_task);
172 }
173}
174
175
176
177
178
179static void iavf_tx_timeout(struct net_device *netdev)
180{
181 struct iavf_adapter *adapter = netdev_priv(netdev);
182
183 adapter->tx_timeout_count++;
184 iavf_schedule_reset(adapter);
185}
186
187
188
189
190
191static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
192{
193 struct iavf_hw *hw = &adapter->hw;
194
195 if (!adapter->msix_entries)
196 return;
197
198 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
199
200 iavf_flush(hw);
201
202 synchronize_irq(adapter->msix_entries[0].vector);
203}
204
205
206
207
208
209static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
210{
211 struct iavf_hw *hw = &adapter->hw;
212
213 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
214 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
215 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
216
217 iavf_flush(hw);
218}
219
220
221
222
223
224static void iavf_irq_disable(struct iavf_adapter *adapter)
225{
226 int i;
227 struct iavf_hw *hw = &adapter->hw;
228
229 if (!adapter->msix_entries)
230 return;
231
232 for (i = 1; i < adapter->num_msix_vectors; i++) {
233 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
234 synchronize_irq(adapter->msix_entries[i].vector);
235 }
236 iavf_flush(hw);
237}
238
239
240
241
242
243
244void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
245{
246 struct iavf_hw *hw = &adapter->hw;
247 int i;
248
249 for (i = 1; i < adapter->num_msix_vectors; i++) {
250 if (mask & BIT(i - 1)) {
251 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
252 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
253 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
254 }
255 }
256}
257
258
259
260
261
262
263void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
264{
265 struct iavf_hw *hw = &adapter->hw;
266
267 iavf_misc_irq_enable(adapter);
268 iavf_irq_enable_queues(adapter, ~0);
269
270 if (flush)
271 iavf_flush(hw);
272}
273
274
275
276
277
278
279static irqreturn_t iavf_msix_aq(int irq, void *data)
280{
281 struct net_device *netdev = data;
282 struct iavf_adapter *adapter = netdev_priv(netdev);
283 struct iavf_hw *hw = &adapter->hw;
284
285
286 rd32(hw, IAVF_VFINT_ICR01);
287 rd32(hw, IAVF_VFINT_ICR0_ENA1);
288
289
290 schedule_work(&adapter->adminq_task);
291
292 return IRQ_HANDLED;
293}
294
295
296
297
298
299
300static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
301{
302 struct iavf_q_vector *q_vector = data;
303
304 if (!q_vector->tx.ring && !q_vector->rx.ring)
305 return IRQ_HANDLED;
306
307 napi_schedule_irqoff(&q_vector->napi);
308
309 return IRQ_HANDLED;
310}
311
312
313
314
315
316
317
318static void
319iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
320{
321 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
322 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
323 struct iavf_hw *hw = &adapter->hw;
324
325 rx_ring->q_vector = q_vector;
326 rx_ring->next = q_vector->rx.ring;
327 rx_ring->vsi = &adapter->vsi;
328 q_vector->rx.ring = rx_ring;
329 q_vector->rx.count++;
330 q_vector->rx.next_update = jiffies + 1;
331 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
332 q_vector->ring_mask |= BIT(r_idx);
333 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
334 q_vector->rx.current_itr);
335 q_vector->rx.current_itr = q_vector->rx.target_itr;
336}
337
338
339
340
341
342
343
344static void
345iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
346{
347 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
348 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
349 struct iavf_hw *hw = &adapter->hw;
350
351 tx_ring->q_vector = q_vector;
352 tx_ring->next = q_vector->tx.ring;
353 tx_ring->vsi = &adapter->vsi;
354 q_vector->tx.ring = tx_ring;
355 q_vector->tx.count++;
356 q_vector->tx.next_update = jiffies + 1;
357 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
358 q_vector->num_ringpairs++;
359 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
360 q_vector->tx.target_itr);
361 q_vector->tx.current_itr = q_vector->tx.target_itr;
362}
363
364
365
366
367
368
369
370
371
372
373
374static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
375{
376 int rings_remaining = adapter->num_active_queues;
377 int ridx = 0, vidx = 0;
378 int q_vectors;
379
380 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
381
382 for (; ridx < rings_remaining; ridx++) {
383 iavf_map_vector_to_rxq(adapter, vidx, ridx);
384 iavf_map_vector_to_txq(adapter, vidx, ridx);
385
386
387
388
389 if (++vidx >= q_vectors)
390 vidx = 0;
391 }
392
393 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
394}
395
396
397
398
399
400
401
402
403
404static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
405 const cpumask_t *mask)
406{
407 struct iavf_q_vector *q_vector =
408 container_of(notify, struct iavf_q_vector, affinity_notify);
409
410 cpumask_copy(&q_vector->affinity_mask, mask);
411}
412
413
414
415
416
417
418
419
420
421static void iavf_irq_affinity_release(struct kref *ref) {}
422
423
424
425
426
427
428
429
430
431static int
432iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
433{
434 unsigned int vector, q_vectors;
435 unsigned int rx_int_idx = 0, tx_int_idx = 0;
436 int irq_num, err;
437 int cpu;
438
439 iavf_irq_disable(adapter);
440
441 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
442
443 for (vector = 0; vector < q_vectors; vector++) {
444 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
445
446 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
447
448 if (q_vector->tx.ring && q_vector->rx.ring) {
449 snprintf(q_vector->name, sizeof(q_vector->name),
450 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
451 tx_int_idx++;
452 } else if (q_vector->rx.ring) {
453 snprintf(q_vector->name, sizeof(q_vector->name),
454 "iavf-%s-rx-%d", basename, rx_int_idx++);
455 } else if (q_vector->tx.ring) {
456 snprintf(q_vector->name, sizeof(q_vector->name),
457 "iavf-%s-tx-%d", basename, tx_int_idx++);
458 } else {
459
460 continue;
461 }
462 err = request_irq(irq_num,
463 iavf_msix_clean_rings,
464 0,
465 q_vector->name,
466 q_vector);
467 if (err) {
468 dev_info(&adapter->pdev->dev,
469 "Request_irq failed, error: %d\n", err);
470 goto free_queue_irqs;
471 }
472
473 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
474 q_vector->affinity_notify.release =
475 iavf_irq_affinity_release;
476 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
477
478
479
480
481 cpu = cpumask_local_spread(q_vector->v_idx, -1);
482 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
483 }
484
485 return 0;
486
487free_queue_irqs:
488 while (vector) {
489 vector--;
490 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
491 irq_set_affinity_notifier(irq_num, NULL);
492 irq_set_affinity_hint(irq_num, NULL);
493 free_irq(irq_num, &adapter->q_vectors[vector]);
494 }
495 return err;
496}
497
498
499
500
501
502
503
504
505
506static int iavf_request_misc_irq(struct iavf_adapter *adapter)
507{
508 struct net_device *netdev = adapter->netdev;
509 int err;
510
511 snprintf(adapter->misc_vector_name,
512 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
513 dev_name(&adapter->pdev->dev));
514 err = request_irq(adapter->msix_entries[0].vector,
515 &iavf_msix_aq, 0,
516 adapter->misc_vector_name, netdev);
517 if (err) {
518 dev_err(&adapter->pdev->dev,
519 "request_irq for %s failed: %d\n",
520 adapter->misc_vector_name, err);
521 free_irq(adapter->msix_entries[0].vector, netdev);
522 }
523 return err;
524}
525
526
527
528
529
530
531
532static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
533{
534 int vector, irq_num, q_vectors;
535
536 if (!adapter->msix_entries)
537 return;
538
539 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
540
541 for (vector = 0; vector < q_vectors; vector++) {
542 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
543 irq_set_affinity_notifier(irq_num, NULL);
544 irq_set_affinity_hint(irq_num, NULL);
545 free_irq(irq_num, &adapter->q_vectors[vector]);
546 }
547}
548
549
550
551
552
553
554
555static void iavf_free_misc_irq(struct iavf_adapter *adapter)
556{
557 struct net_device *netdev = adapter->netdev;
558
559 if (!adapter->msix_entries)
560 return;
561
562 free_irq(adapter->msix_entries[0].vector, netdev);
563}
564
565
566
567
568
569
570
571static void iavf_configure_tx(struct iavf_adapter *adapter)
572{
573 struct iavf_hw *hw = &adapter->hw;
574 int i;
575
576 for (i = 0; i < adapter->num_active_queues; i++)
577 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
578}
579
580
581
582
583
584
585
586static void iavf_configure_rx(struct iavf_adapter *adapter)
587{
588 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
589 struct iavf_hw *hw = &adapter->hw;
590 int i;
591
592
593#if (PAGE_SIZE < 8192)
594 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
595 struct net_device *netdev = adapter->netdev;
596
597
598
599
600
601 rx_buf_len = IAVF_RXBUFFER_3072;
602
603
604
605
606
607 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
608 (netdev->mtu <= ETH_DATA_LEN))
609 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
610 }
611#endif
612
613 for (i = 0; i < adapter->num_active_queues; i++) {
614 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
615 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
616
617 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
618 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
619 else
620 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
621 }
622}
623
624
625
626
627
628
629
630
631
632static struct
633iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
634{
635 struct iavf_vlan_filter *f;
636
637 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
638 if (vlan == f->vlan)
639 return f;
640 }
641 return NULL;
642}
643
644
645
646
647
648
649
650
651static struct
652iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
653{
654 struct iavf_vlan_filter *f = NULL;
655
656 spin_lock_bh(&adapter->mac_vlan_list_lock);
657
658 f = iavf_find_vlan(adapter, vlan);
659 if (!f) {
660 f = kzalloc(sizeof(*f), GFP_KERNEL);
661 if (!f)
662 goto clearout;
663
664 f->vlan = vlan;
665
666 INIT_LIST_HEAD(&f->list);
667 list_add(&f->list, &adapter->vlan_filter_list);
668 f->add = true;
669 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
670 }
671
672clearout:
673 spin_unlock_bh(&adapter->mac_vlan_list_lock);
674 return f;
675}
676
677
678
679
680
681
682static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
683{
684 struct iavf_vlan_filter *f;
685
686 spin_lock_bh(&adapter->mac_vlan_list_lock);
687
688 f = iavf_find_vlan(adapter, vlan);
689 if (f) {
690 f->remove = true;
691 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
692 }
693
694 spin_unlock_bh(&adapter->mac_vlan_list_lock);
695}
696
697
698
699
700
701
702
703static int iavf_vlan_rx_add_vid(struct net_device *netdev,
704 __always_unused __be16 proto, u16 vid)
705{
706 struct iavf_adapter *adapter = netdev_priv(netdev);
707
708 if (!VLAN_ALLOWED(adapter))
709 return -EIO;
710 if (iavf_add_vlan(adapter, vid) == NULL)
711 return -ENOMEM;
712 return 0;
713}
714
715
716
717
718
719
720
721static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
722 __always_unused __be16 proto, u16 vid)
723{
724 struct iavf_adapter *adapter = netdev_priv(netdev);
725
726 if (VLAN_ALLOWED(adapter)) {
727 iavf_del_vlan(adapter, vid);
728 return 0;
729 }
730 return -EIO;
731}
732
733
734
735
736
737
738
739
740
741static struct
742iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
743 const u8 *macaddr)
744{
745 struct iavf_mac_filter *f;
746
747 if (!macaddr)
748 return NULL;
749
750 list_for_each_entry(f, &adapter->mac_filter_list, list) {
751 if (ether_addr_equal(macaddr, f->macaddr))
752 return f;
753 }
754 return NULL;
755}
756
757
758
759
760
761
762
763
764static struct
765iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
766 const u8 *macaddr)
767{
768 struct iavf_mac_filter *f;
769
770 if (!macaddr)
771 return NULL;
772
773 f = iavf_find_filter(adapter, macaddr);
774 if (!f) {
775 f = kzalloc(sizeof(*f), GFP_ATOMIC);
776 if (!f)
777 return f;
778
779 ether_addr_copy(f->macaddr, macaddr);
780
781 list_add_tail(&f->list, &adapter->mac_filter_list);
782 f->add = true;
783 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
784 } else {
785 f->remove = false;
786 }
787
788 return f;
789}
790
791
792
793
794
795
796
797
798static int iavf_set_mac(struct net_device *netdev, void *p)
799{
800 struct iavf_adapter *adapter = netdev_priv(netdev);
801 struct iavf_hw *hw = &adapter->hw;
802 struct iavf_mac_filter *f;
803 struct sockaddr *addr = p;
804
805 if (!is_valid_ether_addr(addr->sa_data))
806 return -EADDRNOTAVAIL;
807
808 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
809 return 0;
810
811 if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF)
812 return -EPERM;
813
814 spin_lock_bh(&adapter->mac_vlan_list_lock);
815
816 f = iavf_find_filter(adapter, hw->mac.addr);
817 if (f) {
818 f->remove = true;
819 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
820 }
821
822 f = iavf_add_filter(adapter, addr->sa_data);
823
824 spin_unlock_bh(&adapter->mac_vlan_list_lock);
825
826 if (f) {
827 ether_addr_copy(hw->mac.addr, addr->sa_data);
828 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
829 }
830
831 return (f == NULL) ? -ENOMEM : 0;
832}
833
834
835
836
837
838
839
840
841
842static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
843{
844 struct iavf_adapter *adapter = netdev_priv(netdev);
845
846 if (iavf_add_filter(adapter, addr))
847 return 0;
848 else
849 return -ENOMEM;
850}
851
852
853
854
855
856
857
858
859
860static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
861{
862 struct iavf_adapter *adapter = netdev_priv(netdev);
863 struct iavf_mac_filter *f;
864
865
866
867
868
869
870 if (ether_addr_equal(addr, netdev->dev_addr))
871 return 0;
872
873 f = iavf_find_filter(adapter, addr);
874 if (f) {
875 f->remove = true;
876 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
877 }
878 return 0;
879}
880
881
882
883
884
885static void iavf_set_rx_mode(struct net_device *netdev)
886{
887 struct iavf_adapter *adapter = netdev_priv(netdev);
888
889 spin_lock_bh(&adapter->mac_vlan_list_lock);
890 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
891 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
892 spin_unlock_bh(&adapter->mac_vlan_list_lock);
893
894 if (netdev->flags & IFF_PROMISC &&
895 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
896 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
897 else if (!(netdev->flags & IFF_PROMISC) &&
898 adapter->flags & IAVF_FLAG_PROMISC_ON)
899 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
900
901 if (netdev->flags & IFF_ALLMULTI &&
902 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
903 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
904 else if (!(netdev->flags & IFF_ALLMULTI) &&
905 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
906 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
907}
908
909
910
911
912
913static void iavf_napi_enable_all(struct iavf_adapter *adapter)
914{
915 int q_idx;
916 struct iavf_q_vector *q_vector;
917 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
918
919 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
920 struct napi_struct *napi;
921
922 q_vector = &adapter->q_vectors[q_idx];
923 napi = &q_vector->napi;
924 napi_enable(napi);
925 }
926}
927
928
929
930
931
932static void iavf_napi_disable_all(struct iavf_adapter *adapter)
933{
934 int q_idx;
935 struct iavf_q_vector *q_vector;
936 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
937
938 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
939 q_vector = &adapter->q_vectors[q_idx];
940 napi_disable(&q_vector->napi);
941 }
942}
943
944
945
946
947
948static void iavf_configure(struct iavf_adapter *adapter)
949{
950 struct net_device *netdev = adapter->netdev;
951 int i;
952
953 iavf_set_rx_mode(netdev);
954
955 iavf_configure_tx(adapter);
956 iavf_configure_rx(adapter);
957 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
958
959 for (i = 0; i < adapter->num_active_queues; i++) {
960 struct iavf_ring *ring = &adapter->rx_rings[i];
961
962 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
963 }
964}
965
966
967
968
969
970
971
972static void iavf_up_complete(struct iavf_adapter *adapter)
973{
974 adapter->state = __IAVF_RUNNING;
975 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
976
977 iavf_napi_enable_all(adapter);
978
979 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
980 if (CLIENT_ENABLED(adapter))
981 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
982 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
983}
984
985
986
987
988
989
990
991void iavf_down(struct iavf_adapter *adapter)
992{
993 struct net_device *netdev = adapter->netdev;
994 struct iavf_vlan_filter *vlf;
995 struct iavf_mac_filter *f;
996 struct iavf_cloud_filter *cf;
997
998 if (adapter->state <= __IAVF_DOWN_PENDING)
999 return;
1000
1001 netif_carrier_off(netdev);
1002 netif_tx_disable(netdev);
1003 adapter->link_up = false;
1004 iavf_napi_disable_all(adapter);
1005 iavf_irq_disable(adapter);
1006
1007 spin_lock_bh(&adapter->mac_vlan_list_lock);
1008
1009
1010 __dev_uc_unsync(adapter->netdev, NULL);
1011 __dev_mc_unsync(adapter->netdev, NULL);
1012
1013
1014 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1015 f->remove = true;
1016 }
1017
1018
1019 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1020 vlf->remove = true;
1021 }
1022
1023 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1024
1025
1026 spin_lock_bh(&adapter->cloud_filter_list_lock);
1027 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1028 cf->del = true;
1029 }
1030 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1031
1032 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1033 adapter->state != __IAVF_RESETTING) {
1034
1035 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1036
1037
1038
1039
1040 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1041 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1042 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1043 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1044 }
1045
1046 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static int
1059iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1060{
1061 int err, vector_threshold;
1062
1063
1064
1065
1066
1067
1068 vector_threshold = MIN_MSIX_COUNT;
1069
1070
1071
1072
1073
1074
1075 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1076 vector_threshold, vectors);
1077 if (err < 0) {
1078 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1079 kfree(adapter->msix_entries);
1080 adapter->msix_entries = NULL;
1081 return err;
1082 }
1083
1084
1085
1086
1087
1088 adapter->num_msix_vectors = err;
1089 return 0;
1090}
1091
1092
1093
1094
1095
1096
1097
1098static void iavf_free_queues(struct iavf_adapter *adapter)
1099{
1100 if (!adapter->vsi_res)
1101 return;
1102 adapter->num_active_queues = 0;
1103 kfree(adapter->tx_rings);
1104 adapter->tx_rings = NULL;
1105 kfree(adapter->rx_rings);
1106 adapter->rx_rings = NULL;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117static int iavf_alloc_queues(struct iavf_adapter *adapter)
1118{
1119 int i, num_active_queues;
1120
1121
1122
1123
1124
1125
1126 if (adapter->num_req_queues)
1127 num_active_queues = adapter->num_req_queues;
1128 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1129 adapter->num_tc)
1130 num_active_queues = adapter->ch_config.total_qps;
1131 else
1132 num_active_queues = min_t(int,
1133 adapter->vsi_res->num_queue_pairs,
1134 (int)(num_online_cpus()));
1135
1136
1137 adapter->tx_rings = kcalloc(num_active_queues,
1138 sizeof(struct iavf_ring), GFP_KERNEL);
1139 if (!adapter->tx_rings)
1140 goto err_out;
1141 adapter->rx_rings = kcalloc(num_active_queues,
1142 sizeof(struct iavf_ring), GFP_KERNEL);
1143 if (!adapter->rx_rings)
1144 goto err_out;
1145
1146 for (i = 0; i < num_active_queues; i++) {
1147 struct iavf_ring *tx_ring;
1148 struct iavf_ring *rx_ring;
1149
1150 tx_ring = &adapter->tx_rings[i];
1151
1152 tx_ring->queue_index = i;
1153 tx_ring->netdev = adapter->netdev;
1154 tx_ring->dev = &adapter->pdev->dev;
1155 tx_ring->count = adapter->tx_desc_count;
1156 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1157 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1158 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1159
1160 rx_ring = &adapter->rx_rings[i];
1161 rx_ring->queue_index = i;
1162 rx_ring->netdev = adapter->netdev;
1163 rx_ring->dev = &adapter->pdev->dev;
1164 rx_ring->count = adapter->rx_desc_count;
1165 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1166 }
1167
1168 adapter->num_active_queues = num_active_queues;
1169
1170 return 0;
1171
1172err_out:
1173 iavf_free_queues(adapter);
1174 return -ENOMEM;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1185{
1186 int vector, v_budget;
1187 int pairs = 0;
1188 int err = 0;
1189
1190 if (!adapter->vsi_res) {
1191 err = -EIO;
1192 goto out;
1193 }
1194 pairs = adapter->num_active_queues;
1195
1196
1197
1198
1199
1200
1201 v_budget = min_t(int, pairs + NONQ_VECS,
1202 (int)adapter->vf_res->max_vectors);
1203
1204 adapter->msix_entries = kcalloc(v_budget,
1205 sizeof(struct msix_entry), GFP_KERNEL);
1206 if (!adapter->msix_entries) {
1207 err = -ENOMEM;
1208 goto out;
1209 }
1210
1211 for (vector = 0; vector < v_budget; vector++)
1212 adapter->msix_entries[vector].entry = vector;
1213
1214 err = iavf_acquire_msix_vectors(adapter, v_budget);
1215
1216out:
1217 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1218 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1219 return err;
1220}
1221
1222
1223
1224
1225
1226
1227
1228static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1229{
1230 struct i40e_aqc_get_set_rss_key_data *rss_key =
1231 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1232 struct iavf_hw *hw = &adapter->hw;
1233 int ret = 0;
1234
1235 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1236
1237 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1238 adapter->current_op);
1239 return -EBUSY;
1240 }
1241
1242 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1243 if (ret) {
1244 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1245 iavf_stat_str(hw, ret),
1246 iavf_aq_str(hw, hw->aq.asq_last_status));
1247 return ret;
1248
1249 }
1250
1251 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1252 adapter->rss_lut, adapter->rss_lut_size);
1253 if (ret) {
1254 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1255 iavf_stat_str(hw, ret),
1256 iavf_aq_str(hw, hw->aq.asq_last_status));
1257 }
1258
1259 return ret;
1260
1261}
1262
1263
1264
1265
1266
1267
1268
1269static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1270{
1271 struct iavf_hw *hw = &adapter->hw;
1272 u32 *dw;
1273 u16 i;
1274
1275 dw = (u32 *)adapter->rss_key;
1276 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1277 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1278
1279 dw = (u32 *)adapter->rss_lut;
1280 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1281 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1282
1283 iavf_flush(hw);
1284
1285 return 0;
1286}
1287
1288
1289
1290
1291
1292
1293
1294int iavf_config_rss(struct iavf_adapter *adapter)
1295{
1296
1297 if (RSS_PF(adapter)) {
1298 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1299 IAVF_FLAG_AQ_SET_RSS_KEY;
1300 return 0;
1301 } else if (RSS_AQ(adapter)) {
1302 return iavf_config_rss_aq(adapter);
1303 } else {
1304 return iavf_config_rss_reg(adapter);
1305 }
1306}
1307
1308
1309
1310
1311
1312static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1313{
1314 u16 i;
1315
1316 for (i = 0; i < adapter->rss_lut_size; i++)
1317 adapter->rss_lut[i] = i % adapter->num_active_queues;
1318}
1319
1320
1321
1322
1323
1324
1325
1326static int iavf_init_rss(struct iavf_adapter *adapter)
1327{
1328 struct iavf_hw *hw = &adapter->hw;
1329 int ret;
1330
1331 if (!RSS_PF(adapter)) {
1332
1333 if (adapter->vf_res->vf_cap_flags &
1334 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1335 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1336 else
1337 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1338
1339 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1340 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1341 }
1342
1343 iavf_fill_rss_lut(adapter);
1344 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1345 ret = iavf_config_rss(adapter);
1346
1347 return ret;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1358{
1359 int q_idx = 0, num_q_vectors;
1360 struct iavf_q_vector *q_vector;
1361
1362 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1363 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1364 GFP_KERNEL);
1365 if (!adapter->q_vectors)
1366 return -ENOMEM;
1367
1368 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1369 q_vector = &adapter->q_vectors[q_idx];
1370 q_vector->adapter = adapter;
1371 q_vector->vsi = &adapter->vsi;
1372 q_vector->v_idx = q_idx;
1373 q_vector->reg_idx = q_idx;
1374 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1375 netif_napi_add(adapter->netdev, &q_vector->napi,
1376 iavf_napi_poll, NAPI_POLL_WEIGHT);
1377 }
1378
1379 return 0;
1380}
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1391{
1392 int q_idx, num_q_vectors;
1393 int napi_vectors;
1394
1395 if (!adapter->q_vectors)
1396 return;
1397
1398 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1399 napi_vectors = adapter->num_active_queues;
1400
1401 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1402 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1403
1404 if (q_idx < napi_vectors)
1405 netif_napi_del(&q_vector->napi);
1406 }
1407 kfree(adapter->q_vectors);
1408 adapter->q_vectors = NULL;
1409}
1410
1411
1412
1413
1414
1415
1416void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1417{
1418 if (!adapter->msix_entries)
1419 return;
1420
1421 pci_disable_msix(adapter->pdev);
1422 kfree(adapter->msix_entries);
1423 adapter->msix_entries = NULL;
1424}
1425
1426
1427
1428
1429
1430
1431int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1432{
1433 int err;
1434
1435 err = iavf_alloc_queues(adapter);
1436 if (err) {
1437 dev_err(&adapter->pdev->dev,
1438 "Unable to allocate memory for queues\n");
1439 goto err_alloc_queues;
1440 }
1441
1442 rtnl_lock();
1443 err = iavf_set_interrupt_capability(adapter);
1444 rtnl_unlock();
1445 if (err) {
1446 dev_err(&adapter->pdev->dev,
1447 "Unable to setup interrupt capabilities\n");
1448 goto err_set_interrupt;
1449 }
1450
1451 err = iavf_alloc_q_vectors(adapter);
1452 if (err) {
1453 dev_err(&adapter->pdev->dev,
1454 "Unable to allocate memory for queue vectors\n");
1455 goto err_alloc_q_vectors;
1456 }
1457
1458
1459
1460
1461
1462
1463 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1464 adapter->num_tc)
1465 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1466 adapter->num_tc);
1467
1468 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1469 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1470 adapter->num_active_queues);
1471
1472 return 0;
1473err_alloc_q_vectors:
1474 iavf_reset_interrupt_capability(adapter);
1475err_set_interrupt:
1476 iavf_free_queues(adapter);
1477err_alloc_queues:
1478 return err;
1479}
1480
1481
1482
1483
1484
1485static void iavf_free_rss(struct iavf_adapter *adapter)
1486{
1487 kfree(adapter->rss_key);
1488 adapter->rss_key = NULL;
1489
1490 kfree(adapter->rss_lut);
1491 adapter->rss_lut = NULL;
1492}
1493
1494
1495
1496
1497
1498
1499
1500static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1501{
1502 struct net_device *netdev = adapter->netdev;
1503 int err;
1504
1505 if (netif_running(netdev))
1506 iavf_free_traffic_irqs(adapter);
1507 iavf_free_misc_irq(adapter);
1508 iavf_reset_interrupt_capability(adapter);
1509 iavf_free_q_vectors(adapter);
1510 iavf_free_queues(adapter);
1511
1512 err = iavf_init_interrupt_scheme(adapter);
1513 if (err)
1514 goto err;
1515
1516 netif_tx_stop_all_queues(netdev);
1517
1518 err = iavf_request_misc_irq(adapter);
1519 if (err)
1520 goto err;
1521
1522 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1523
1524 iavf_map_rings_to_vectors(adapter);
1525
1526 if (RSS_AQ(adapter))
1527 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1528 else
1529 err = iavf_init_rss(adapter);
1530err:
1531 return err;
1532}
1533
1534
1535
1536
1537
1538static void iavf_watchdog_timer(struct timer_list *t)
1539{
1540 struct iavf_adapter *adapter = from_timer(adapter, t,
1541 watchdog_timer);
1542
1543 schedule_work(&adapter->watchdog_task);
1544
1545}
1546
1547
1548
1549
1550
1551static void iavf_watchdog_task(struct work_struct *work)
1552{
1553 struct iavf_adapter *adapter = container_of(work,
1554 struct iavf_adapter,
1555 watchdog_task);
1556 struct iavf_hw *hw = &adapter->hw;
1557 u32 reg_val;
1558
1559 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1560 goto restart_watchdog;
1561
1562 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
1563 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1564 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1565 if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1566 (reg_val == VIRTCHNL_VFR_COMPLETED)) {
1567
1568 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1569 adapter->state = __IAVF_STARTUP;
1570 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1571 schedule_delayed_work(&adapter->init_task, 10);
1572 clear_bit(__IAVF_IN_CRITICAL_TASK,
1573 &adapter->crit_section);
1574
1575
1576
1577
1578
1579 return;
1580 }
1581 adapter->aq_required = 0;
1582 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1583 goto watchdog_done;
1584 }
1585
1586 if ((adapter->state < __IAVF_DOWN) ||
1587 (adapter->flags & IAVF_FLAG_RESET_PENDING))
1588 goto watchdog_done;
1589
1590
1591 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1592 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) {
1593 adapter->state = __IAVF_RESETTING;
1594 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1595 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1596 schedule_work(&adapter->reset_task);
1597 adapter->aq_required = 0;
1598 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1599 goto watchdog_done;
1600 }
1601
1602
1603
1604
1605 if (adapter->current_op) {
1606 if (!iavf_asq_done(hw)) {
1607 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1608 iavf_send_api_ver(adapter);
1609 }
1610 goto watchdog_done;
1611 }
1612 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) {
1613 iavf_send_vf_config_msg(adapter);
1614 goto watchdog_done;
1615 }
1616
1617 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1618 iavf_disable_queues(adapter);
1619 goto watchdog_done;
1620 }
1621
1622 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1623 iavf_map_queues(adapter);
1624 goto watchdog_done;
1625 }
1626
1627 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1628 iavf_add_ether_addrs(adapter);
1629 goto watchdog_done;
1630 }
1631
1632 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1633 iavf_add_vlans(adapter);
1634 goto watchdog_done;
1635 }
1636
1637 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1638 iavf_del_ether_addrs(adapter);
1639 goto watchdog_done;
1640 }
1641
1642 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1643 iavf_del_vlans(adapter);
1644 goto watchdog_done;
1645 }
1646
1647 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1648 iavf_enable_vlan_stripping(adapter);
1649 goto watchdog_done;
1650 }
1651
1652 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1653 iavf_disable_vlan_stripping(adapter);
1654 goto watchdog_done;
1655 }
1656
1657 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1658 iavf_configure_queues(adapter);
1659 goto watchdog_done;
1660 }
1661
1662 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1663 iavf_enable_queues(adapter);
1664 goto watchdog_done;
1665 }
1666
1667 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1668
1669
1670
1671
1672 iavf_init_rss(adapter);
1673 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1674 goto watchdog_done;
1675 }
1676 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1677 iavf_get_hena(adapter);
1678 goto watchdog_done;
1679 }
1680 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1681 iavf_set_hena(adapter);
1682 goto watchdog_done;
1683 }
1684 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1685 iavf_set_rss_key(adapter);
1686 goto watchdog_done;
1687 }
1688 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1689 iavf_set_rss_lut(adapter);
1690 goto watchdog_done;
1691 }
1692
1693 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1694 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1695 FLAG_VF_MULTICAST_PROMISC);
1696 goto watchdog_done;
1697 }
1698
1699 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1700 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1701 goto watchdog_done;
1702 }
1703
1704 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1705 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1706 iavf_set_promiscuous(adapter, 0);
1707 goto watchdog_done;
1708 }
1709
1710 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1711 iavf_enable_channels(adapter);
1712 goto watchdog_done;
1713 }
1714
1715 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1716 iavf_disable_channels(adapter);
1717 goto watchdog_done;
1718 }
1719
1720 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1721 iavf_add_cloud_filter(adapter);
1722 goto watchdog_done;
1723 }
1724
1725 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1726 iavf_del_cloud_filter(adapter);
1727 goto watchdog_done;
1728 }
1729
1730 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1731
1732 if (adapter->state == __IAVF_RUNNING)
1733 iavf_request_stats(adapter);
1734watchdog_done:
1735 if (adapter->state == __IAVF_RUNNING)
1736 iavf_detect_recover_hung(&adapter->vsi);
1737 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1738restart_watchdog:
1739 if (adapter->state == __IAVF_REMOVE)
1740 return;
1741 if (adapter->aq_required)
1742 mod_timer(&adapter->watchdog_timer,
1743 jiffies + msecs_to_jiffies(20));
1744 else
1745 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1746 schedule_work(&adapter->adminq_task);
1747}
1748
1749static void iavf_disable_vf(struct iavf_adapter *adapter)
1750{
1751 struct iavf_mac_filter *f, *ftmp;
1752 struct iavf_vlan_filter *fv, *fvtmp;
1753 struct iavf_cloud_filter *cf, *cftmp;
1754
1755 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
1756
1757
1758
1759
1760
1761 if (adapter->state == __IAVF_RUNNING) {
1762 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1763 netif_carrier_off(adapter->netdev);
1764 netif_tx_disable(adapter->netdev);
1765 adapter->link_up = false;
1766 iavf_napi_disable_all(adapter);
1767 iavf_irq_disable(adapter);
1768 iavf_free_traffic_irqs(adapter);
1769 iavf_free_all_tx_resources(adapter);
1770 iavf_free_all_rx_resources(adapter);
1771 }
1772
1773 spin_lock_bh(&adapter->mac_vlan_list_lock);
1774
1775
1776 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1777 list_del(&f->list);
1778 kfree(f);
1779 }
1780
1781 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1782 list_del(&fv->list);
1783 kfree(fv);
1784 }
1785
1786 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1787
1788 spin_lock_bh(&adapter->cloud_filter_list_lock);
1789 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1790 list_del(&cf->list);
1791 kfree(cf);
1792 adapter->num_cloud_filters--;
1793 }
1794 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1795
1796 iavf_free_misc_irq(adapter);
1797 iavf_reset_interrupt_capability(adapter);
1798 iavf_free_queues(adapter);
1799 iavf_free_q_vectors(adapter);
1800 kfree(adapter->vf_res);
1801 iavf_shutdown_adminq(&adapter->hw);
1802 adapter->netdev->flags &= ~IFF_UP;
1803 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1804 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1805 adapter->state = __IAVF_DOWN;
1806 wake_up(&adapter->down_waitqueue);
1807 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1808}
1809
1810#define IAVF_RESET_WAIT_MS 10
1811#define IAVF_RESET_WAIT_COUNT 500
1812
1813
1814
1815
1816
1817
1818
1819
1820static void iavf_reset_task(struct work_struct *work)
1821{
1822 struct iavf_adapter *adapter = container_of(work,
1823 struct iavf_adapter,
1824 reset_task);
1825 struct virtchnl_vf_resource *vfres = adapter->vf_res;
1826 struct net_device *netdev = adapter->netdev;
1827 struct iavf_hw *hw = &adapter->hw;
1828 struct iavf_vlan_filter *vlf;
1829 struct iavf_cloud_filter *cf;
1830 struct iavf_mac_filter *f;
1831 u32 reg_val;
1832 int i = 0, err;
1833 bool running;
1834
1835
1836
1837
1838 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
1839 return;
1840
1841 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
1842 &adapter->crit_section))
1843 usleep_range(500, 1000);
1844 if (CLIENT_ENABLED(adapter)) {
1845 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
1846 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
1847 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1848 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
1849 cancel_delayed_work_sync(&adapter->client_task);
1850 iavf_notify_client_close(&adapter->vsi, true);
1851 }
1852 iavf_misc_irq_disable(adapter);
1853 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
1854 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
1855
1856
1857
1858 iavf_shutdown_adminq(hw);
1859 iavf_init_adminq(hw);
1860 iavf_request_reset(adapter);
1861 }
1862 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1863
1864
1865 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1866 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
1867 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1868 if (!reg_val)
1869 break;
1870 usleep_range(5000, 10000);
1871 }
1872 if (i == IAVF_RESET_WAIT_COUNT) {
1873 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1874 goto continue_reset;
1875 }
1876
1877
1878 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1879
1880 msleep(IAVF_RESET_WAIT_MS);
1881
1882 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1883 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1884 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
1885 break;
1886 }
1887
1888 pci_set_master(adapter->pdev);
1889
1890 if (i == IAVF_RESET_WAIT_COUNT) {
1891 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1892 reg_val);
1893 iavf_disable_vf(adapter);
1894 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
1895 return;
1896 }
1897
1898continue_reset:
1899
1900
1901
1902
1903 running = ((adapter->state == __IAVF_RUNNING) ||
1904 (adapter->state == __IAVF_RESETTING));
1905
1906 if (running) {
1907 netif_carrier_off(netdev);
1908 netif_tx_stop_all_queues(netdev);
1909 adapter->link_up = false;
1910 iavf_napi_disable_all(adapter);
1911 }
1912 iavf_irq_disable(adapter);
1913
1914 adapter->state = __IAVF_RESETTING;
1915 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1916
1917
1918
1919
1920 iavf_free_all_rx_resources(adapter);
1921 iavf_free_all_tx_resources(adapter);
1922
1923 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
1924
1925 iavf_shutdown_adminq(hw);
1926 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1927 err = iavf_init_adminq(hw);
1928 if (err)
1929 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1930 err);
1931 adapter->aq_required = 0;
1932
1933 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1934 err = iavf_reinit_interrupt_scheme(adapter);
1935 if (err)
1936 goto reset_err;
1937 }
1938
1939 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
1940 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
1941
1942 spin_lock_bh(&adapter->mac_vlan_list_lock);
1943
1944
1945 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1946 f->add = true;
1947 }
1948
1949 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1950 vlf->add = true;
1951 }
1952
1953 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1954
1955
1956 spin_lock_bh(&adapter->cloud_filter_list_lock);
1957 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1958 adapter->num_tc) {
1959 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1960 cf->add = true;
1961 }
1962 }
1963 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1964
1965 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1966 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
1967 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1968 iavf_misc_irq_enable(adapter);
1969
1970 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1971
1972
1973
1974
1975 if (running) {
1976
1977 err = iavf_setup_all_tx_resources(adapter);
1978 if (err)
1979 goto reset_err;
1980
1981
1982 err = iavf_setup_all_rx_resources(adapter);
1983 if (err)
1984 goto reset_err;
1985
1986 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1987 err = iavf_request_traffic_irqs(adapter, netdev->name);
1988 if (err)
1989 goto reset_err;
1990
1991 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1992 }
1993
1994 iavf_configure(adapter);
1995
1996 iavf_up_complete(adapter);
1997
1998 iavf_irq_enable(adapter, true);
1999 } else {
2000 adapter->state = __IAVF_DOWN;
2001 wake_up(&adapter->down_waitqueue);
2002 }
2003 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2004 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2005
2006 return;
2007reset_err:
2008 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2009 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2010 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2011 iavf_close(netdev);
2012}
2013
2014
2015
2016
2017
2018static void iavf_adminq_task(struct work_struct *work)
2019{
2020 struct iavf_adapter *adapter =
2021 container_of(work, struct iavf_adapter, adminq_task);
2022 struct iavf_hw *hw = &adapter->hw;
2023 struct i40e_arq_event_info event;
2024 enum virtchnl_ops v_op;
2025 iavf_status ret, v_ret;
2026 u32 val, oldval;
2027 u16 pending;
2028
2029 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2030 goto out;
2031
2032 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2033 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2034 if (!event.msg_buf)
2035 goto out;
2036
2037 do {
2038 ret = iavf_clean_arq_element(hw, &event, &pending);
2039 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2040 v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
2041
2042 if (ret || !v_op)
2043 break;
2044
2045 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2046 event.msg_len);
2047 if (pending != 0)
2048 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2049 } while (pending);
2050
2051 if ((adapter->flags &
2052 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2053 adapter->state == __IAVF_RESETTING)
2054 goto freedom;
2055
2056
2057 val = rd32(hw, hw->aq.arq.len);
2058 if (val == 0xdeadbeef)
2059 goto freedom;
2060 oldval = val;
2061 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2062 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2063 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2064 }
2065 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2066 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2067 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2068 }
2069 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2070 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2071 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2072 }
2073 if (oldval != val)
2074 wr32(hw, hw->aq.arq.len, val);
2075
2076 val = rd32(hw, hw->aq.asq.len);
2077 oldval = val;
2078 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2079 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2080 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2081 }
2082 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2083 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2084 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2085 }
2086 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2087 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2088 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2089 }
2090 if (oldval != val)
2091 wr32(hw, hw->aq.asq.len, val);
2092
2093freedom:
2094 kfree(event.msg_buf);
2095out:
2096
2097 iavf_misc_irq_enable(adapter);
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107static void iavf_client_task(struct work_struct *work)
2108{
2109 struct iavf_adapter *adapter =
2110 container_of(work, struct iavf_adapter, client_task.work);
2111
2112
2113
2114
2115
2116 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
2117 return;
2118
2119 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2120 iavf_client_subtask(adapter);
2121 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2122 goto out;
2123 }
2124 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2125 iavf_notify_client_l2_params(&adapter->vsi);
2126 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2127 goto out;
2128 }
2129 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2130 iavf_notify_client_close(&adapter->vsi, false);
2131 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2132 goto out;
2133 }
2134 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2135 iavf_notify_client_open(&adapter->vsi);
2136 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2137 }
2138out:
2139 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2140}
2141
2142
2143
2144
2145
2146
2147
2148void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2149{
2150 int i;
2151
2152 if (!adapter->tx_rings)
2153 return;
2154
2155 for (i = 0; i < adapter->num_active_queues; i++)
2156 if (adapter->tx_rings[i].desc)
2157 iavf_free_tx_resources(&adapter->tx_rings[i]);
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2171{
2172 int i, err = 0;
2173
2174 for (i = 0; i < adapter->num_active_queues; i++) {
2175 adapter->tx_rings[i].count = adapter->tx_desc_count;
2176 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2177 if (!err)
2178 continue;
2179 dev_err(&adapter->pdev->dev,
2180 "Allocation for Tx Queue %u failed\n", i);
2181 break;
2182 }
2183
2184 return err;
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2198{
2199 int i, err = 0;
2200
2201 for (i = 0; i < adapter->num_active_queues; i++) {
2202 adapter->rx_rings[i].count = adapter->rx_desc_count;
2203 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2204 if (!err)
2205 continue;
2206 dev_err(&adapter->pdev->dev,
2207 "Allocation for Rx Queue %u failed\n", i);
2208 break;
2209 }
2210 return err;
2211}
2212
2213
2214
2215
2216
2217
2218
2219void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2220{
2221 int i;
2222
2223 if (!adapter->rx_rings)
2224 return;
2225
2226 for (i = 0; i < adapter->num_active_queues; i++)
2227 if (adapter->rx_rings[i].desc)
2228 iavf_free_rx_resources(&adapter->rx_rings[i]);
2229}
2230
2231
2232
2233
2234
2235
2236static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2237 u64 max_tx_rate)
2238{
2239 int speed = 0, ret = 0;
2240
2241 switch (adapter->link_speed) {
2242 case I40E_LINK_SPEED_40GB:
2243 speed = 40000;
2244 break;
2245 case I40E_LINK_SPEED_25GB:
2246 speed = 25000;
2247 break;
2248 case I40E_LINK_SPEED_20GB:
2249 speed = 20000;
2250 break;
2251 case I40E_LINK_SPEED_10GB:
2252 speed = 10000;
2253 break;
2254 case I40E_LINK_SPEED_1GB:
2255 speed = 1000;
2256 break;
2257 case I40E_LINK_SPEED_100MB:
2258 speed = 100;
2259 break;
2260 default:
2261 break;
2262 }
2263
2264 if (max_tx_rate > speed) {
2265 dev_err(&adapter->pdev->dev,
2266 "Invalid tx rate specified\n");
2267 ret = -EINVAL;
2268 }
2269
2270 return ret;
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2283 struct tc_mqprio_qopt_offload *mqprio_qopt)
2284{
2285 u64 total_max_rate = 0;
2286 int i, num_qps = 0;
2287 u64 tx_rate = 0;
2288 int ret = 0;
2289
2290 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2291 mqprio_qopt->qopt.num_tc < 1)
2292 return -EINVAL;
2293
2294 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2295 if (!mqprio_qopt->qopt.count[i] ||
2296 mqprio_qopt->qopt.offset[i] != num_qps)
2297 return -EINVAL;
2298 if (mqprio_qopt->min_rate[i]) {
2299 dev_err(&adapter->pdev->dev,
2300 "Invalid min tx rate (greater than 0) specified\n");
2301 return -EINVAL;
2302 }
2303
2304 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2305 IAVF_MBPS_DIVISOR);
2306 total_max_rate += tx_rate;
2307 num_qps += mqprio_qopt->qopt.count[i];
2308 }
2309 if (num_qps > IAVF_MAX_REQ_QUEUES)
2310 return -EINVAL;
2311
2312 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2313 return ret;
2314}
2315
2316
2317
2318
2319
2320static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2321{
2322 struct iavf_cloud_filter *cf, *cftmp;
2323
2324 spin_lock_bh(&adapter->cloud_filter_list_lock);
2325 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2326 list) {
2327 list_del(&cf->list);
2328 kfree(cf);
2329 adapter->num_cloud_filters--;
2330 }
2331 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2332}
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2346{
2347 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2348 struct iavf_adapter *adapter = netdev_priv(netdev);
2349 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2350 u8 num_tc = 0, total_qps = 0;
2351 int ret = 0, netdev_tc = 0;
2352 u64 max_tx_rate;
2353 u16 mode;
2354 int i;
2355
2356 num_tc = mqprio_qopt->qopt.num_tc;
2357 mode = mqprio_qopt->mode;
2358
2359
2360 if (!mqprio_qopt->qopt.hw) {
2361 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2362
2363 netdev_reset_tc(netdev);
2364 adapter->num_tc = 0;
2365 netif_tx_stop_all_queues(netdev);
2366 netif_tx_disable(netdev);
2367 iavf_del_all_cloud_filters(adapter);
2368 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2369 goto exit;
2370 } else {
2371 return -EINVAL;
2372 }
2373 }
2374
2375
2376 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2377 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2378 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2379 return -EOPNOTSUPP;
2380 }
2381 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2382 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2383 return -EINVAL;
2384 }
2385
2386 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2387 if (ret)
2388 return ret;
2389
2390 if (adapter->num_tc == num_tc)
2391 return 0;
2392 adapter->num_tc = num_tc;
2393
2394 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2395 if (i < num_tc) {
2396 adapter->ch_config.ch_info[i].count =
2397 mqprio_qopt->qopt.count[i];
2398 adapter->ch_config.ch_info[i].offset =
2399 mqprio_qopt->qopt.offset[i];
2400 total_qps += mqprio_qopt->qopt.count[i];
2401 max_tx_rate = mqprio_qopt->max_rate[i];
2402
2403 max_tx_rate = div_u64(max_tx_rate,
2404 IAVF_MBPS_DIVISOR);
2405 adapter->ch_config.ch_info[i].max_tx_rate =
2406 max_tx_rate;
2407 } else {
2408 adapter->ch_config.ch_info[i].count = 1;
2409 adapter->ch_config.ch_info[i].offset = 0;
2410 }
2411 }
2412 adapter->ch_config.total_qps = total_qps;
2413 netif_tx_stop_all_queues(netdev);
2414 netif_tx_disable(netdev);
2415 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2416 netdev_reset_tc(netdev);
2417
2418 netdev_set_num_tc(adapter->netdev, num_tc);
2419 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2420 u16 qcount = mqprio_qopt->qopt.count[i];
2421 u16 qoffset = mqprio_qopt->qopt.offset[i];
2422
2423 if (i < num_tc)
2424 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2425 qoffset);
2426 }
2427 }
2428exit:
2429 return ret;
2430}
2431
2432
2433
2434
2435
2436
2437
2438static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2439 struct tc_cls_flower_offload *f,
2440 struct iavf_cloud_filter *filter)
2441{
2442 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
2443 struct flow_dissector *dissector = rule->match.dissector;
2444 u16 n_proto_mask = 0;
2445 u16 n_proto_key = 0;
2446 u8 field_flags = 0;
2447 u16 addr_type = 0;
2448 u16 n_proto = 0;
2449 int i = 0;
2450 struct virtchnl_filter *vf = &filter->f;
2451
2452 if (dissector->used_keys &
2453 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2454 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2455 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2456 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2457 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2458 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2459 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2460 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2461 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2462 dissector->used_keys);
2463 return -EOPNOTSUPP;
2464 }
2465
2466 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2467 struct flow_match_enc_keyid match;
2468
2469 flow_rule_match_enc_keyid(rule, &match);
2470 if (match.mask->keyid != 0)
2471 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2472 }
2473
2474 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2475 struct flow_match_basic match;
2476
2477 flow_rule_match_basic(rule, &match);
2478 n_proto_key = ntohs(match.key->n_proto);
2479 n_proto_mask = ntohs(match.mask->n_proto);
2480
2481 if (n_proto_key == ETH_P_ALL) {
2482 n_proto_key = 0;
2483 n_proto_mask = 0;
2484 }
2485 n_proto = n_proto_key & n_proto_mask;
2486 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2487 return -EINVAL;
2488 if (n_proto == ETH_P_IPV6) {
2489
2490 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2491 }
2492
2493 if (match.key->ip_proto != IPPROTO_TCP) {
2494 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2495 return -EINVAL;
2496 }
2497 }
2498
2499 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2500 struct flow_match_eth_addrs match;
2501
2502 flow_rule_match_eth_addrs(rule, &match);
2503
2504
2505 if (!is_zero_ether_addr(match.mask->dst)) {
2506 if (is_broadcast_ether_addr(match.mask->dst)) {
2507 field_flags |= IAVF_CLOUD_FIELD_OMAC;
2508 } else {
2509 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2510 match.mask->dst);
2511 return I40E_ERR_CONFIG;
2512 }
2513 }
2514
2515 if (!is_zero_ether_addr(match.mask->src)) {
2516 if (is_broadcast_ether_addr(match.mask->src)) {
2517 field_flags |= IAVF_CLOUD_FIELD_IMAC;
2518 } else {
2519 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2520 match.mask->src);
2521 return I40E_ERR_CONFIG;
2522 }
2523 }
2524
2525 if (!is_zero_ether_addr(match.key->dst))
2526 if (is_valid_ether_addr(match.key->dst) ||
2527 is_multicast_ether_addr(match.key->dst)) {
2528
2529 for (i = 0; i < ETH_ALEN; i++)
2530 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2531 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2532 match.key->dst);
2533 }
2534
2535 if (!is_zero_ether_addr(match.key->src))
2536 if (is_valid_ether_addr(match.key->src) ||
2537 is_multicast_ether_addr(match.key->src)) {
2538
2539 for (i = 0; i < ETH_ALEN; i++)
2540 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2541 ether_addr_copy(vf->data.tcp_spec.src_mac,
2542 match.key->src);
2543 }
2544 }
2545
2546 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2547 struct flow_match_vlan match;
2548
2549 flow_rule_match_vlan(rule, &match);
2550 if (match.mask->vlan_id) {
2551 if (match.mask->vlan_id == VLAN_VID_MASK) {
2552 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2553 } else {
2554 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2555 match.mask->vlan_id);
2556 return I40E_ERR_CONFIG;
2557 }
2558 }
2559 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2560 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
2561 }
2562
2563 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2564 struct flow_match_control match;
2565
2566 flow_rule_match_control(rule, &match);
2567 addr_type = match.key->addr_type;
2568 }
2569
2570 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2571 struct flow_match_ipv4_addrs match;
2572
2573 flow_rule_match_ipv4_addrs(rule, &match);
2574 if (match.mask->dst) {
2575 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
2576 field_flags |= IAVF_CLOUD_FIELD_IIP;
2577 } else {
2578 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2579 be32_to_cpu(match.mask->dst));
2580 return I40E_ERR_CONFIG;
2581 }
2582 }
2583
2584 if (match.mask->src) {
2585 if (match.mask->src == cpu_to_be32(0xffffffff)) {
2586 field_flags |= IAVF_CLOUD_FIELD_IIP;
2587 } else {
2588 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2589 be32_to_cpu(match.mask->dst));
2590 return I40E_ERR_CONFIG;
2591 }
2592 }
2593
2594 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
2595 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2596 return I40E_ERR_CONFIG;
2597 }
2598 if (match.key->dst) {
2599 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2600 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
2601 }
2602 if (match.key->src) {
2603 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2604 vf->data.tcp_spec.src_ip[0] = match.key->src;
2605 }
2606 }
2607
2608 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2609 struct flow_match_ipv6_addrs match;
2610
2611 flow_rule_match_ipv6_addrs(rule, &match);
2612
2613
2614 if (ipv6_addr_any(&match.mask->dst)) {
2615 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2616 IPV6_ADDR_ANY);
2617 return I40E_ERR_CONFIG;
2618 }
2619
2620
2621
2622
2623 if (ipv6_addr_loopback(&match.key->dst) ||
2624 ipv6_addr_loopback(&match.key->src)) {
2625 dev_err(&adapter->pdev->dev,
2626 "ipv6 addr should not be loopback\n");
2627 return I40E_ERR_CONFIG;
2628 }
2629 if (!ipv6_addr_any(&match.mask->dst) ||
2630 !ipv6_addr_any(&match.mask->src))
2631 field_flags |= IAVF_CLOUD_FIELD_IIP;
2632
2633 for (i = 0; i < 4; i++)
2634 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2635 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
2636 sizeof(vf->data.tcp_spec.dst_ip));
2637 for (i = 0; i < 4; i++)
2638 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2639 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
2640 sizeof(vf->data.tcp_spec.src_ip));
2641 }
2642 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2643 struct flow_match_ports match;
2644
2645 flow_rule_match_ports(rule, &match);
2646 if (match.mask->src) {
2647 if (match.mask->src == cpu_to_be16(0xffff)) {
2648 field_flags |= IAVF_CLOUD_FIELD_IIP;
2649 } else {
2650 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2651 be16_to_cpu(match.mask->src));
2652 return I40E_ERR_CONFIG;
2653 }
2654 }
2655
2656 if (match.mask->dst) {
2657 if (match.mask->dst == cpu_to_be16(0xffff)) {
2658 field_flags |= IAVF_CLOUD_FIELD_IIP;
2659 } else {
2660 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2661 be16_to_cpu(match.mask->dst));
2662 return I40E_ERR_CONFIG;
2663 }
2664 }
2665 if (match.key->dst) {
2666 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2667 vf->data.tcp_spec.dst_port = match.key->dst;
2668 }
2669
2670 if (match.key->src) {
2671 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2672 vf->data.tcp_spec.src_port = match.key->src;
2673 }
2674 }
2675 vf->field_flags = field_flags;
2676
2677 return 0;
2678}
2679
2680
2681
2682
2683
2684
2685
2686static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2687 struct iavf_cloud_filter *filter)
2688{
2689 if (tc == 0)
2690 return 0;
2691 if (tc < adapter->num_tc) {
2692 if (!filter->f.data.tcp_spec.dst_port) {
2693 dev_err(&adapter->pdev->dev,
2694 "Specify destination port to redirect to traffic class other than TC0\n");
2695 return -EINVAL;
2696 }
2697 }
2698
2699 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2700 filter->f.action_meta = tc;
2701 return 0;
2702}
2703
2704
2705
2706
2707
2708
2709static int iavf_configure_clsflower(struct iavf_adapter *adapter,
2710 struct tc_cls_flower_offload *cls_flower)
2711{
2712 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2713 struct iavf_cloud_filter *filter = NULL;
2714 int err = -EINVAL, count = 50;
2715
2716 if (tc < 0) {
2717 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2718 return -EINVAL;
2719 }
2720
2721 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2722 if (!filter)
2723 return -ENOMEM;
2724
2725 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2726 &adapter->crit_section)) {
2727 if (--count == 0)
2728 goto err;
2729 udelay(1);
2730 }
2731
2732 filter->cookie = cls_flower->cookie;
2733
2734
2735 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2736
2737 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2738 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
2739 if (err < 0)
2740 goto err;
2741
2742 err = iavf_handle_tclass(adapter, tc, filter);
2743 if (err < 0)
2744 goto err;
2745
2746
2747 spin_lock_bh(&adapter->cloud_filter_list_lock);
2748 list_add_tail(&filter->list, &adapter->cloud_filter_list);
2749 adapter->num_cloud_filters++;
2750 filter->add = true;
2751 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2752 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2753err:
2754 if (err)
2755 kfree(filter);
2756
2757 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2758 return err;
2759}
2760
2761
2762
2763
2764
2765
2766
2767
2768static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
2769 unsigned long *cookie)
2770{
2771 struct iavf_cloud_filter *filter = NULL;
2772
2773 if (!cookie)
2774 return NULL;
2775
2776 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
2777 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
2778 return filter;
2779 }
2780 return NULL;
2781}
2782
2783
2784
2785
2786
2787
2788static int iavf_delete_clsflower(struct iavf_adapter *adapter,
2789 struct tc_cls_flower_offload *cls_flower)
2790{
2791 struct iavf_cloud_filter *filter = NULL;
2792 int err = 0;
2793
2794 spin_lock_bh(&adapter->cloud_filter_list_lock);
2795 filter = iavf_find_cf(adapter, &cls_flower->cookie);
2796 if (filter) {
2797 filter->del = true;
2798 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
2799 } else {
2800 err = -EINVAL;
2801 }
2802 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2803
2804 return err;
2805}
2806
2807
2808
2809
2810
2811
2812static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
2813 struct tc_cls_flower_offload *cls_flower)
2814{
2815 if (cls_flower->common.chain_index)
2816 return -EOPNOTSUPP;
2817
2818 switch (cls_flower->command) {
2819 case TC_CLSFLOWER_REPLACE:
2820 return iavf_configure_clsflower(adapter, cls_flower);
2821 case TC_CLSFLOWER_DESTROY:
2822 return iavf_delete_clsflower(adapter, cls_flower);
2823 case TC_CLSFLOWER_STATS:
2824 return -EOPNOTSUPP;
2825 default:
2826 return -EOPNOTSUPP;
2827 }
2828}
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2839 void *cb_priv)
2840{
2841 switch (type) {
2842 case TC_SETUP_CLSFLOWER:
2843 return iavf_setup_tc_cls_flower(cb_priv, type_data);
2844 default:
2845 return -EOPNOTSUPP;
2846 }
2847}
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857static int iavf_setup_tc_block(struct net_device *dev,
2858 struct tc_block_offload *f)
2859{
2860 struct iavf_adapter *adapter = netdev_priv(dev);
2861
2862 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2863 return -EOPNOTSUPP;
2864
2865 switch (f->command) {
2866 case TC_BLOCK_BIND:
2867 return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
2868 adapter, adapter, f->extack);
2869 case TC_BLOCK_UNBIND:
2870 tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
2871 adapter);
2872 return 0;
2873 default:
2874 return -EOPNOTSUPP;
2875 }
2876}
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2890 void *type_data)
2891{
2892 switch (type) {
2893 case TC_SETUP_QDISC_MQPRIO:
2894 return __iavf_setup_tc(netdev, type_data);
2895 case TC_SETUP_BLOCK:
2896 return iavf_setup_tc_block(netdev, type_data);
2897 default:
2898 return -EOPNOTSUPP;
2899 }
2900}
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914static int iavf_open(struct net_device *netdev)
2915{
2916 struct iavf_adapter *adapter = netdev_priv(netdev);
2917 int err;
2918
2919 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
2920 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2921 return -EIO;
2922 }
2923
2924 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2925 &adapter->crit_section))
2926 usleep_range(500, 1000);
2927
2928 if (adapter->state != __IAVF_DOWN) {
2929 err = -EBUSY;
2930 goto err_unlock;
2931 }
2932
2933
2934 err = iavf_setup_all_tx_resources(adapter);
2935 if (err)
2936 goto err_setup_tx;
2937
2938
2939 err = iavf_setup_all_rx_resources(adapter);
2940 if (err)
2941 goto err_setup_rx;
2942
2943
2944 err = iavf_request_traffic_irqs(adapter, netdev->name);
2945 if (err)
2946 goto err_req_irq;
2947
2948 spin_lock_bh(&adapter->mac_vlan_list_lock);
2949
2950 iavf_add_filter(adapter, adapter->hw.mac.addr);
2951
2952 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2953
2954 iavf_configure(adapter);
2955
2956 iavf_up_complete(adapter);
2957
2958 iavf_irq_enable(adapter, true);
2959
2960 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2961
2962 return 0;
2963
2964err_req_irq:
2965 iavf_down(adapter);
2966 iavf_free_traffic_irqs(adapter);
2967err_setup_rx:
2968 iavf_free_all_rx_resources(adapter);
2969err_setup_tx:
2970 iavf_free_all_tx_resources(adapter);
2971err_unlock:
2972 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2973
2974 return err;
2975}
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988static int iavf_close(struct net_device *netdev)
2989{
2990 struct iavf_adapter *adapter = netdev_priv(netdev);
2991 int status;
2992
2993 if (adapter->state <= __IAVF_DOWN_PENDING)
2994 return 0;
2995
2996 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2997 &adapter->crit_section))
2998 usleep_range(500, 1000);
2999
3000 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3001 if (CLIENT_ENABLED(adapter))
3002 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3003
3004 iavf_down(adapter);
3005 adapter->state = __IAVF_DOWN_PENDING;
3006 iavf_free_traffic_irqs(adapter);
3007
3008 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 status = wait_event_timeout(adapter->down_waitqueue,
3022 adapter->state == __IAVF_DOWN,
3023 msecs_to_jiffies(200));
3024 if (!status)
3025 netdev_warn(netdev, "Device resources not yet released\n");
3026 return 0;
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3037{
3038 struct iavf_adapter *adapter = netdev_priv(netdev);
3039
3040 netdev->mtu = new_mtu;
3041 if (CLIENT_ENABLED(adapter)) {
3042 iavf_notify_client_l2_params(&adapter->vsi);
3043 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3044 }
3045 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3046 schedule_work(&adapter->reset_task);
3047
3048 return 0;
3049}
3050
3051
3052
3053
3054
3055
3056
3057static int iavf_set_features(struct net_device *netdev,
3058 netdev_features_t features)
3059{
3060 struct iavf_adapter *adapter = netdev_priv(netdev);
3061
3062
3063
3064
3065 if (!VLAN_ALLOWED(adapter)) {
3066 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3067 return -EINVAL;
3068 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3069 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3070 adapter->aq_required |=
3071 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3072 else
3073 adapter->aq_required |=
3074 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3075 }
3076
3077 return 0;
3078}
3079
3080
3081
3082
3083
3084
3085
3086static netdev_features_t iavf_features_check(struct sk_buff *skb,
3087 struct net_device *dev,
3088 netdev_features_t features)
3089{
3090 size_t len;
3091
3092
3093
3094
3095
3096 if (skb->ip_summed != CHECKSUM_PARTIAL)
3097 return features;
3098
3099
3100
3101
3102 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3103 features &= ~NETIF_F_GSO_MASK;
3104
3105
3106 len = skb_network_header(skb) - skb->data;
3107 if (len & ~(63 * 2))
3108 goto out_err;
3109
3110
3111 len = skb_transport_header(skb) - skb_network_header(skb);
3112 if (len & ~(127 * 4))
3113 goto out_err;
3114
3115 if (skb->encapsulation) {
3116
3117 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3118 if (len & ~(127 * 2))
3119 goto out_err;
3120
3121
3122 len = skb_inner_transport_header(skb) -
3123 skb_inner_network_header(skb);
3124 if (len & ~(127 * 4))
3125 goto out_err;
3126 }
3127
3128
3129
3130
3131
3132
3133 return features;
3134out_err:
3135 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3136}
3137
3138
3139
3140
3141
3142
3143
3144
3145static netdev_features_t iavf_fix_features(struct net_device *netdev,
3146 netdev_features_t features)
3147{
3148 struct iavf_adapter *adapter = netdev_priv(netdev);
3149
3150 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3151 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3152 NETIF_F_HW_VLAN_CTAG_RX |
3153 NETIF_F_HW_VLAN_CTAG_FILTER);
3154
3155 return features;
3156}
3157
3158static const struct net_device_ops iavf_netdev_ops = {
3159 .ndo_open = iavf_open,
3160 .ndo_stop = iavf_close,
3161 .ndo_start_xmit = iavf_xmit_frame,
3162 .ndo_set_rx_mode = iavf_set_rx_mode,
3163 .ndo_validate_addr = eth_validate_addr,
3164 .ndo_set_mac_address = iavf_set_mac,
3165 .ndo_change_mtu = iavf_change_mtu,
3166 .ndo_tx_timeout = iavf_tx_timeout,
3167 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3168 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3169 .ndo_features_check = iavf_features_check,
3170 .ndo_fix_features = iavf_fix_features,
3171 .ndo_set_features = iavf_set_features,
3172 .ndo_setup_tc = iavf_setup_tc,
3173};
3174
3175
3176
3177
3178
3179
3180
3181static int iavf_check_reset_complete(struct iavf_hw *hw)
3182{
3183 u32 rstat;
3184 int i;
3185
3186 for (i = 0; i < 100; i++) {
3187 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3188 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3189 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3190 (rstat == VIRTCHNL_VFR_COMPLETED))
3191 return 0;
3192 usleep_range(10, 20);
3193 }
3194 return -EBUSY;
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204int iavf_process_config(struct iavf_adapter *adapter)
3205{
3206 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3207 int i, num_req_queues = adapter->num_req_queues;
3208 struct net_device *netdev = adapter->netdev;
3209 struct iavf_vsi *vsi = &adapter->vsi;
3210 netdev_features_t hw_enc_features;
3211 netdev_features_t hw_features;
3212
3213
3214 for (i = 0; i < vfres->num_vsis; i++) {
3215 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3216 adapter->vsi_res = &vfres->vsi_res[i];
3217 }
3218 if (!adapter->vsi_res) {
3219 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3220 return -ENODEV;
3221 }
3222
3223 if (num_req_queues &&
3224 num_req_queues != adapter->vsi_res->num_queue_pairs) {
3225
3226
3227
3228
3229 dev_err(&adapter->pdev->dev,
3230 "Requested %d queues, but PF only gave us %d.\n",
3231 num_req_queues,
3232 adapter->vsi_res->num_queue_pairs);
3233 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3234 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3235 iavf_schedule_reset(adapter);
3236 return -ENODEV;
3237 }
3238 adapter->num_req_queues = 0;
3239
3240 hw_enc_features = NETIF_F_SG |
3241 NETIF_F_IP_CSUM |
3242 NETIF_F_IPV6_CSUM |
3243 NETIF_F_HIGHDMA |
3244 NETIF_F_SOFT_FEATURES |
3245 NETIF_F_TSO |
3246 NETIF_F_TSO_ECN |
3247 NETIF_F_TSO6 |
3248 NETIF_F_SCTP_CRC |
3249 NETIF_F_RXHASH |
3250 NETIF_F_RXCSUM |
3251 0;
3252
3253
3254
3255
3256 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3257 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
3258 NETIF_F_GSO_GRE |
3259 NETIF_F_GSO_GRE_CSUM |
3260 NETIF_F_GSO_IPXIP4 |
3261 NETIF_F_GSO_IPXIP6 |
3262 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3263 NETIF_F_GSO_PARTIAL |
3264 0;
3265
3266 if (!(vfres->vf_cap_flags &
3267 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3268 netdev->gso_partial_features |=
3269 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3270
3271 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3272 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3273 netdev->hw_enc_features |= hw_enc_features;
3274 }
3275
3276 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3277
3278
3279
3280
3281 hw_features = hw_enc_features;
3282
3283
3284 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3285 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3286 NETIF_F_HW_VLAN_CTAG_RX);
3287
3288 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3289 hw_features |= NETIF_F_HW_TC;
3290
3291 netdev->hw_features |= hw_features;
3292
3293 netdev->features |= hw_features;
3294
3295 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3296 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3297
3298 netdev->priv_flags |= IFF_UNICAST_FLT;
3299
3300
3301
3302
3303 if (netdev->wanted_features) {
3304 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3305 netdev->mtu < 576)
3306 netdev->features &= ~NETIF_F_TSO;
3307 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3308 netdev->mtu < 576)
3309 netdev->features &= ~NETIF_F_TSO6;
3310 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3311 netdev->features &= ~NETIF_F_TSO_ECN;
3312 if (!(netdev->wanted_features & NETIF_F_GRO))
3313 netdev->features &= ~NETIF_F_GRO;
3314 if (!(netdev->wanted_features & NETIF_F_GSO))
3315 netdev->features &= ~NETIF_F_GSO;
3316 }
3317
3318 adapter->vsi.id = adapter->vsi_res->vsi_id;
3319
3320 adapter->vsi.back = adapter;
3321 adapter->vsi.base_vector = 1;
3322 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3323 vsi->netdev = adapter->netdev;
3324 vsi->qs_handle = adapter->vsi_res->qset_handle;
3325 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3326 adapter->rss_key_size = vfres->rss_key_size;
3327 adapter->rss_lut_size = vfres->rss_lut_size;
3328 } else {
3329 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3330 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3331 }
3332
3333 return 0;
3334}
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348static void iavf_init_task(struct work_struct *work)
3349{
3350 struct iavf_adapter *adapter = container_of(work,
3351 struct iavf_adapter,
3352 init_task.work);
3353 struct net_device *netdev = adapter->netdev;
3354 struct iavf_hw *hw = &adapter->hw;
3355 struct pci_dev *pdev = adapter->pdev;
3356 int err, bufsz;
3357
3358 switch (adapter->state) {
3359 case __IAVF_STARTUP:
3360
3361 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
3362 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3363 err = iavf_set_mac_type(hw);
3364 if (err) {
3365 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
3366 err);
3367 goto err;
3368 }
3369 err = iavf_check_reset_complete(hw);
3370 if (err) {
3371 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
3372 err);
3373 goto err;
3374 }
3375 hw->aq.num_arq_entries = IAVF_AQ_LEN;
3376 hw->aq.num_asq_entries = IAVF_AQ_LEN;
3377 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3378 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3379
3380 err = iavf_init_adminq(hw);
3381 if (err) {
3382 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
3383 err);
3384 goto err;
3385 }
3386 err = iavf_send_api_ver(adapter);
3387 if (err) {
3388 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
3389 iavf_shutdown_adminq(hw);
3390 goto err;
3391 }
3392 adapter->state = __IAVF_INIT_VERSION_CHECK;
3393 goto restart;
3394 case __IAVF_INIT_VERSION_CHECK:
3395 if (!iavf_asq_done(hw)) {
3396 dev_err(&pdev->dev, "Admin queue command never completed\n");
3397 iavf_shutdown_adminq(hw);
3398 adapter->state = __IAVF_STARTUP;
3399 goto err;
3400 }
3401
3402
3403 err = iavf_verify_api_ver(adapter);
3404 if (err) {
3405 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
3406 err = iavf_send_api_ver(adapter);
3407 else
3408 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
3409 adapter->pf_version.major,
3410 adapter->pf_version.minor,
3411 VIRTCHNL_VERSION_MAJOR,
3412 VIRTCHNL_VERSION_MINOR);
3413 goto err;
3414 }
3415 err = iavf_send_vf_config_msg(adapter);
3416 if (err) {
3417 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
3418 err);
3419 goto err;
3420 }
3421 adapter->state = __IAVF_INIT_GET_RESOURCES;
3422 goto restart;
3423 case __IAVF_INIT_GET_RESOURCES:
3424
3425 if (!adapter->vf_res) {
3426 bufsz = sizeof(struct virtchnl_vf_resource) +
3427 (IAVF_MAX_VF_VSI *
3428 sizeof(struct virtchnl_vsi_resource));
3429 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
3430 if (!adapter->vf_res)
3431 goto err;
3432 }
3433 err = iavf_get_vf_config(adapter);
3434 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
3435 err = iavf_send_vf_config_msg(adapter);
3436 goto err;
3437 } else if (err == I40E_ERR_PARAM) {
3438
3439
3440
3441
3442 iavf_shutdown_adminq(hw);
3443 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
3444 return;
3445 }
3446 if (err) {
3447 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
3448 err);
3449 goto err_alloc;
3450 }
3451 adapter->state = __IAVF_INIT_SW;
3452 break;
3453 default:
3454 goto err_alloc;
3455 }
3456
3457 if (iavf_process_config(adapter))
3458 goto err_alloc;
3459 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3460
3461 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
3462
3463 netdev->netdev_ops = &iavf_netdev_ops;
3464 iavf_set_ethtool_ops(netdev);
3465 netdev->watchdog_timeo = 5 * HZ;
3466
3467
3468 netdev->min_mtu = ETH_MIN_MTU;
3469 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
3470
3471 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
3472 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
3473 adapter->hw.mac.addr);
3474 eth_hw_addr_random(netdev);
3475 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
3476 } else {
3477 adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF;
3478 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
3479 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
3480 }
3481
3482 timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0);
3483 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3484
3485 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
3486 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
3487 err = iavf_init_interrupt_scheme(adapter);
3488 if (err)
3489 goto err_sw_init;
3490 iavf_map_rings_to_vectors(adapter);
3491 if (adapter->vf_res->vf_cap_flags &
3492 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
3493 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
3494
3495 err = iavf_request_misc_irq(adapter);
3496 if (err)
3497 goto err_sw_init;
3498
3499 netif_carrier_off(netdev);
3500 adapter->link_up = false;
3501
3502 if (!adapter->netdev_registered) {
3503 err = register_netdev(netdev);
3504 if (err)
3505 goto err_register;
3506 }
3507
3508 adapter->netdev_registered = true;
3509
3510 netif_tx_stop_all_queues(netdev);
3511 if (CLIENT_ALLOWED(adapter)) {
3512 err = iavf_lan_add_device(adapter);
3513 if (err)
3514 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
3515 err);
3516 }
3517
3518 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
3519 if (netdev->features & NETIF_F_GRO)
3520 dev_info(&pdev->dev, "GRO is enabled\n");
3521
3522 adapter->state = __IAVF_DOWN;
3523 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3524 iavf_misc_irq_enable(adapter);
3525 wake_up(&adapter->down_waitqueue);
3526
3527 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
3528 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
3529 if (!adapter->rss_key || !adapter->rss_lut)
3530 goto err_mem;
3531
3532 if (RSS_AQ(adapter)) {
3533 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3534 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
3535 } else {
3536 iavf_init_rss(adapter);
3537 }
3538 return;
3539restart:
3540 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
3541 return;
3542err_mem:
3543 iavf_free_rss(adapter);
3544err_register:
3545 iavf_free_misc_irq(adapter);
3546err_sw_init:
3547 iavf_reset_interrupt_capability(adapter);
3548err_alloc:
3549 kfree(adapter->vf_res);
3550 adapter->vf_res = NULL;
3551err:
3552
3553 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3554 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
3555 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3556 iavf_shutdown_adminq(hw);
3557 adapter->state = __IAVF_STARTUP;
3558 schedule_delayed_work(&adapter->init_task, HZ * 5);
3559 return;
3560 }
3561 schedule_delayed_work(&adapter->init_task, HZ);
3562}
3563
3564
3565
3566
3567
3568static void iavf_shutdown(struct pci_dev *pdev)
3569{
3570 struct net_device *netdev = pci_get_drvdata(pdev);
3571 struct iavf_adapter *adapter = netdev_priv(netdev);
3572
3573 netif_device_detach(netdev);
3574
3575 if (netif_running(netdev))
3576 iavf_close(netdev);
3577
3578
3579 adapter->state = __IAVF_REMOVE;
3580 adapter->aq_required = 0;
3581
3582#ifdef CONFIG_PM
3583 pci_save_state(pdev);
3584
3585#endif
3586 pci_disable_device(pdev);
3587}
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3601{
3602 struct net_device *netdev;
3603 struct iavf_adapter *adapter = NULL;
3604 struct iavf_hw *hw = NULL;
3605 int err;
3606
3607 err = pci_enable_device(pdev);
3608 if (err)
3609 return err;
3610
3611 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3612 if (err) {
3613 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3614 if (err) {
3615 dev_err(&pdev->dev,
3616 "DMA configuration failed: 0x%x\n", err);
3617 goto err_dma;
3618 }
3619 }
3620
3621 err = pci_request_regions(pdev, iavf_driver_name);
3622 if (err) {
3623 dev_err(&pdev->dev,
3624 "pci_request_regions failed 0x%x\n", err);
3625 goto err_pci_reg;
3626 }
3627
3628 pci_enable_pcie_error_reporting(pdev);
3629
3630 pci_set_master(pdev);
3631
3632 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3633 IAVF_MAX_REQ_QUEUES);
3634 if (!netdev) {
3635 err = -ENOMEM;
3636 goto err_alloc_etherdev;
3637 }
3638
3639 SET_NETDEV_DEV(netdev, &pdev->dev);
3640
3641 pci_set_drvdata(pdev, netdev);
3642 adapter = netdev_priv(netdev);
3643
3644 adapter->netdev = netdev;
3645 adapter->pdev = pdev;
3646
3647 hw = &adapter->hw;
3648 hw->back = adapter;
3649
3650 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3651 adapter->state = __IAVF_STARTUP;
3652
3653
3654 pci_save_state(pdev);
3655
3656 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3657 pci_resource_len(pdev, 0));
3658 if (!hw->hw_addr) {
3659 err = -EIO;
3660 goto err_ioremap;
3661 }
3662 hw->vendor_id = pdev->vendor;
3663 hw->device_id = pdev->device;
3664 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3665 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3666 hw->subsystem_device_id = pdev->subsystem_device;
3667 hw->bus.device = PCI_SLOT(pdev->devfn);
3668 hw->bus.func = PCI_FUNC(pdev->devfn);
3669 hw->bus.bus_id = pdev->bus->number;
3670
3671
3672
3673
3674 mutex_init(&hw->aq.asq_mutex);
3675 mutex_init(&hw->aq.arq_mutex);
3676
3677 spin_lock_init(&adapter->mac_vlan_list_lock);
3678 spin_lock_init(&adapter->cloud_filter_list_lock);
3679
3680 INIT_LIST_HEAD(&adapter->mac_filter_list);
3681 INIT_LIST_HEAD(&adapter->vlan_filter_list);
3682 INIT_LIST_HEAD(&adapter->cloud_filter_list);
3683
3684 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3685 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3686 INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3687 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3688 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
3689 schedule_delayed_work(&adapter->init_task,
3690 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3691
3692
3693 init_waitqueue_head(&adapter->down_waitqueue);
3694
3695 return 0;
3696
3697err_ioremap:
3698 free_netdev(netdev);
3699err_alloc_etherdev:
3700 pci_release_regions(pdev);
3701err_pci_reg:
3702err_dma:
3703 pci_disable_device(pdev);
3704 return err;
3705}
3706
3707#ifdef CONFIG_PM
3708
3709
3710
3711
3712
3713
3714
3715static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
3716{
3717 struct net_device *netdev = pci_get_drvdata(pdev);
3718 struct iavf_adapter *adapter = netdev_priv(netdev);
3719 int retval = 0;
3720
3721 netif_device_detach(netdev);
3722
3723 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3724 &adapter->crit_section))
3725 usleep_range(500, 1000);
3726
3727 if (netif_running(netdev)) {
3728 rtnl_lock();
3729 iavf_down(adapter);
3730 rtnl_unlock();
3731 }
3732 iavf_free_misc_irq(adapter);
3733 iavf_reset_interrupt_capability(adapter);
3734
3735 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3736
3737 retval = pci_save_state(pdev);
3738 if (retval)
3739 return retval;
3740
3741 pci_disable_device(pdev);
3742
3743 return 0;
3744}
3745
3746
3747
3748
3749
3750
3751
3752static int iavf_resume(struct pci_dev *pdev)
3753{
3754 struct iavf_adapter *adapter = pci_get_drvdata(pdev);
3755 struct net_device *netdev = adapter->netdev;
3756 u32 err;
3757
3758 pci_set_power_state(pdev, PCI_D0);
3759 pci_restore_state(pdev);
3760
3761
3762
3763 pci_save_state(pdev);
3764
3765 err = pci_enable_device_mem(pdev);
3766 if (err) {
3767 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3768 return err;
3769 }
3770 pci_set_master(pdev);
3771
3772 rtnl_lock();
3773 err = iavf_set_interrupt_capability(adapter);
3774 if (err) {
3775 rtnl_unlock();
3776 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3777 return err;
3778 }
3779 err = iavf_request_misc_irq(adapter);
3780 rtnl_unlock();
3781 if (err) {
3782 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3783 return err;
3784 }
3785
3786 schedule_work(&adapter->reset_task);
3787
3788 netif_device_attach(netdev);
3789
3790 return err;
3791}
3792
3793#endif
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803static void iavf_remove(struct pci_dev *pdev)
3804{
3805 struct net_device *netdev = pci_get_drvdata(pdev);
3806 struct iavf_adapter *adapter = netdev_priv(netdev);
3807 struct iavf_vlan_filter *vlf, *vlftmp;
3808 struct iavf_mac_filter *f, *ftmp;
3809 struct iavf_cloud_filter *cf, *cftmp;
3810 struct iavf_hw *hw = &adapter->hw;
3811 int err;
3812
3813 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
3814 cancel_delayed_work_sync(&adapter->init_task);
3815 cancel_work_sync(&adapter->reset_task);
3816 cancel_delayed_work_sync(&adapter->client_task);
3817 if (adapter->netdev_registered) {
3818 unregister_netdev(netdev);
3819 adapter->netdev_registered = false;
3820 }
3821 if (CLIENT_ALLOWED(adapter)) {
3822 err = iavf_lan_del_device(adapter);
3823 if (err)
3824 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3825 err);
3826 }
3827
3828
3829 adapter->state = __IAVF_REMOVE;
3830 adapter->aq_required = 0;
3831 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3832 iavf_request_reset(adapter);
3833 msleep(50);
3834
3835 if (!iavf_asq_done(hw)) {
3836 iavf_request_reset(adapter);
3837 msleep(50);
3838 }
3839 iavf_free_all_tx_resources(adapter);
3840 iavf_free_all_rx_resources(adapter);
3841 iavf_misc_irq_disable(adapter);
3842 iavf_free_misc_irq(adapter);
3843 iavf_reset_interrupt_capability(adapter);
3844 iavf_free_q_vectors(adapter);
3845
3846 if (adapter->watchdog_timer.function)
3847 del_timer_sync(&adapter->watchdog_timer);
3848
3849 cancel_work_sync(&adapter->adminq_task);
3850
3851 iavf_free_rss(adapter);
3852
3853 if (hw->aq.asq.count)
3854 iavf_shutdown_adminq(hw);
3855
3856
3857 mutex_destroy(&hw->aq.arq_mutex);
3858 mutex_destroy(&hw->aq.asq_mutex);
3859
3860 iounmap(hw->hw_addr);
3861 pci_release_regions(pdev);
3862 iavf_free_all_tx_resources(adapter);
3863 iavf_free_all_rx_resources(adapter);
3864 iavf_free_queues(adapter);
3865 kfree(adapter->vf_res);
3866 spin_lock_bh(&adapter->mac_vlan_list_lock);
3867
3868
3869
3870 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3871 list_del(&f->list);
3872 kfree(f);
3873 }
3874 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3875 list) {
3876 list_del(&vlf->list);
3877 kfree(vlf);
3878 }
3879
3880 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3881
3882 spin_lock_bh(&adapter->cloud_filter_list_lock);
3883 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3884 list_del(&cf->list);
3885 kfree(cf);
3886 }
3887 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3888
3889 free_netdev(netdev);
3890
3891 pci_disable_pcie_error_reporting(pdev);
3892
3893 pci_disable_device(pdev);
3894}
3895
3896static struct pci_driver iavf_driver = {
3897 .name = iavf_driver_name,
3898 .id_table = iavf_pci_tbl,
3899 .probe = iavf_probe,
3900 .remove = iavf_remove,
3901#ifdef CONFIG_PM
3902 .suspend = iavf_suspend,
3903 .resume = iavf_resume,
3904#endif
3905 .shutdown = iavf_shutdown,
3906};
3907
3908
3909
3910
3911
3912
3913
3914static int __init iavf_init_module(void)
3915{
3916 int ret;
3917
3918 pr_info("iavf: %s - version %s\n", iavf_driver_string,
3919 iavf_driver_version);
3920
3921 pr_info("%s\n", iavf_copyright);
3922
3923 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3924 iavf_driver_name);
3925 if (!iavf_wq) {
3926 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
3927 return -ENOMEM;
3928 }
3929 ret = pci_register_driver(&iavf_driver);
3930 return ret;
3931}
3932
3933module_init(iavf_init_module);
3934
3935
3936
3937
3938
3939
3940
3941static void __exit iavf_exit_module(void)
3942{
3943 pci_unregister_driver(&iavf_driver);
3944 destroy_workqueue(iavf_wq);
3945}
3946
3947module_exit(iavf_exit_module);
3948
3949
3950