1
2
3
4#include "iavf.h"
5#include "iavf_prototype.h"
6#include "iavf_client.h"
7
8
9
10
11#define CREATE_TRACE_POINTS
12#include "iavf_trace.h"
13
14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16static int iavf_close(struct net_device *netdev);
17
18char iavf_driver_name[] = "iavf";
19static const char iavf_driver_string[] =
20 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
21
22#define DRV_KERN "-k"
23
24#define DRV_VERSION_MAJOR 3
25#define DRV_VERSION_MINOR 2
26#define DRV_VERSION_BUILD 3
27#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
28 __stringify(DRV_VERSION_MINOR) "." \
29 __stringify(DRV_VERSION_BUILD) \
30 DRV_KERN
31const char iavf_driver_version[] = DRV_VERSION;
32static const char iavf_copyright[] =
33 "Copyright (c) 2013 - 2018 Intel Corporation.";
34
35
36
37
38
39
40
41
42
43static const struct pci_device_id iavf_pci_tbl[] = {
44 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
45 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
46 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
47 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
48
49 {0, }
50};
51
52MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
53
54MODULE_ALIAS("i40evf");
55MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
56MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
57MODULE_LICENSE("GPL v2");
58MODULE_VERSION(DRV_VERSION);
59
60static struct workqueue_struct *iavf_wq;
61
62
63
64
65
66
67
68
69iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
70 struct iavf_dma_mem *mem,
71 u64 size, u32 alignment)
72{
73 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
74
75 if (!mem)
76 return I40E_ERR_PARAM;
77
78 mem->size = ALIGN(size, alignment);
79 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
80 (dma_addr_t *)&mem->pa, GFP_KERNEL);
81 if (mem->va)
82 return 0;
83 else
84 return I40E_ERR_NO_MEMORY;
85}
86
87
88
89
90
91
92iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
93{
94 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
95
96 if (!mem || !mem->va)
97 return I40E_ERR_PARAM;
98 dma_free_coherent(&adapter->pdev->dev, mem->size,
99 mem->va, (dma_addr_t)mem->pa);
100 return 0;
101}
102
103
104
105
106
107
108
109iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
110 struct iavf_virt_mem *mem, u32 size)
111{
112 if (!mem)
113 return I40E_ERR_PARAM;
114
115 mem->size = size;
116 mem->va = kzalloc(size, GFP_KERNEL);
117
118 if (mem->va)
119 return 0;
120 else
121 return I40E_ERR_NO_MEMORY;
122}
123
124
125
126
127
128
129iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
130{
131 if (!mem)
132 return I40E_ERR_PARAM;
133
134
135 kfree(mem->va);
136
137 return 0;
138}
139
140
141
142
143
144
145
146void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
147{
148 char buf[512];
149 va_list argptr;
150
151 if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
152 return;
153
154 va_start(argptr, fmt_str);
155 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
156 va_end(argptr);
157
158
159 pr_info("%s", buf);
160}
161
162
163
164
165
166void iavf_schedule_reset(struct iavf_adapter *adapter)
167{
168 if (!(adapter->flags &
169 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
170 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
171 schedule_work(&adapter->reset_task);
172 }
173}
174
175
176
177
178
179static void iavf_tx_timeout(struct net_device *netdev)
180{
181 struct iavf_adapter *adapter = netdev_priv(netdev);
182
183 adapter->tx_timeout_count++;
184 iavf_schedule_reset(adapter);
185}
186
187
188
189
190
191static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
192{
193 struct iavf_hw *hw = &adapter->hw;
194
195 if (!adapter->msix_entries)
196 return;
197
198 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
199
200 iavf_flush(hw);
201
202 synchronize_irq(adapter->msix_entries[0].vector);
203}
204
205
206
207
208
209static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
210{
211 struct iavf_hw *hw = &adapter->hw;
212
213 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
214 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
215 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
216
217 iavf_flush(hw);
218}
219
220
221
222
223
224static void iavf_irq_disable(struct iavf_adapter *adapter)
225{
226 int i;
227 struct iavf_hw *hw = &adapter->hw;
228
229 if (!adapter->msix_entries)
230 return;
231
232 for (i = 1; i < adapter->num_msix_vectors; i++) {
233 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
234 synchronize_irq(adapter->msix_entries[i].vector);
235 }
236 iavf_flush(hw);
237}
238
239
240
241
242
243
244void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
245{
246 struct iavf_hw *hw = &adapter->hw;
247 int i;
248
249 for (i = 1; i < adapter->num_msix_vectors; i++) {
250 if (mask & BIT(i - 1)) {
251 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
252 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
253 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
254 }
255 }
256}
257
258
259
260
261
262
263void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
264{
265 struct iavf_hw *hw = &adapter->hw;
266
267 iavf_misc_irq_enable(adapter);
268 iavf_irq_enable_queues(adapter, ~0);
269
270 if (flush)
271 iavf_flush(hw);
272}
273
274
275
276
277
278
279static irqreturn_t iavf_msix_aq(int irq, void *data)
280{
281 struct net_device *netdev = data;
282 struct iavf_adapter *adapter = netdev_priv(netdev);
283 struct iavf_hw *hw = &adapter->hw;
284
285
286 rd32(hw, IAVF_VFINT_ICR01);
287 rd32(hw, IAVF_VFINT_ICR0_ENA1);
288
289
290 schedule_work(&adapter->adminq_task);
291
292 return IRQ_HANDLED;
293}
294
295
296
297
298
299
300static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
301{
302 struct iavf_q_vector *q_vector = data;
303
304 if (!q_vector->tx.ring && !q_vector->rx.ring)
305 return IRQ_HANDLED;
306
307 napi_schedule_irqoff(&q_vector->napi);
308
309 return IRQ_HANDLED;
310}
311
312
313
314
315
316
317
318static void
319iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
320{
321 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
322 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
323 struct iavf_hw *hw = &adapter->hw;
324
325 rx_ring->q_vector = q_vector;
326 rx_ring->next = q_vector->rx.ring;
327 rx_ring->vsi = &adapter->vsi;
328 q_vector->rx.ring = rx_ring;
329 q_vector->rx.count++;
330 q_vector->rx.next_update = jiffies + 1;
331 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
332 q_vector->ring_mask |= BIT(r_idx);
333 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
334 q_vector->rx.current_itr);
335 q_vector->rx.current_itr = q_vector->rx.target_itr;
336}
337
338
339
340
341
342
343
344static void
345iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
346{
347 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
348 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
349 struct iavf_hw *hw = &adapter->hw;
350
351 tx_ring->q_vector = q_vector;
352 tx_ring->next = q_vector->tx.ring;
353 tx_ring->vsi = &adapter->vsi;
354 q_vector->tx.ring = tx_ring;
355 q_vector->tx.count++;
356 q_vector->tx.next_update = jiffies + 1;
357 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
358 q_vector->num_ringpairs++;
359 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
360 q_vector->tx.target_itr);
361 q_vector->tx.current_itr = q_vector->tx.target_itr;
362}
363
364
365
366
367
368
369
370
371
372
373
374static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
375{
376 int rings_remaining = adapter->num_active_queues;
377 int ridx = 0, vidx = 0;
378 int q_vectors;
379
380 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
381
382 for (; ridx < rings_remaining; ridx++) {
383 iavf_map_vector_to_rxq(adapter, vidx, ridx);
384 iavf_map_vector_to_txq(adapter, vidx, ridx);
385
386
387
388
389 if (++vidx >= q_vectors)
390 vidx = 0;
391 }
392
393 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
394}
395
396
397
398
399
400
401
402
403
404static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
405 const cpumask_t *mask)
406{
407 struct iavf_q_vector *q_vector =
408 container_of(notify, struct iavf_q_vector, affinity_notify);
409
410 cpumask_copy(&q_vector->affinity_mask, mask);
411}
412
413
414
415
416
417
418
419
420
421static void iavf_irq_affinity_release(struct kref *ref) {}
422
423
424
425
426
427
428
429
430
431static int
432iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
433{
434 unsigned int vector, q_vectors;
435 unsigned int rx_int_idx = 0, tx_int_idx = 0;
436 int irq_num, err;
437 int cpu;
438
439 iavf_irq_disable(adapter);
440
441 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
442
443 for (vector = 0; vector < q_vectors; vector++) {
444 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
445
446 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
447
448 if (q_vector->tx.ring && q_vector->rx.ring) {
449 snprintf(q_vector->name, sizeof(q_vector->name),
450 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
451 tx_int_idx++;
452 } else if (q_vector->rx.ring) {
453 snprintf(q_vector->name, sizeof(q_vector->name),
454 "iavf-%s-rx-%d", basename, rx_int_idx++);
455 } else if (q_vector->tx.ring) {
456 snprintf(q_vector->name, sizeof(q_vector->name),
457 "iavf-%s-tx-%d", basename, tx_int_idx++);
458 } else {
459
460 continue;
461 }
462 err = request_irq(irq_num,
463 iavf_msix_clean_rings,
464 0,
465 q_vector->name,
466 q_vector);
467 if (err) {
468 dev_info(&adapter->pdev->dev,
469 "Request_irq failed, error: %d\n", err);
470 goto free_queue_irqs;
471 }
472
473 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
474 q_vector->affinity_notify.release =
475 iavf_irq_affinity_release;
476 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
477
478
479
480
481 cpu = cpumask_local_spread(q_vector->v_idx, -1);
482 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
483 }
484
485 return 0;
486
487free_queue_irqs:
488 while (vector) {
489 vector--;
490 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
491 irq_set_affinity_notifier(irq_num, NULL);
492 irq_set_affinity_hint(irq_num, NULL);
493 free_irq(irq_num, &adapter->q_vectors[vector]);
494 }
495 return err;
496}
497
498
499
500
501
502
503
504
505
506static int iavf_request_misc_irq(struct iavf_adapter *adapter)
507{
508 struct net_device *netdev = adapter->netdev;
509 int err;
510
511 snprintf(adapter->misc_vector_name,
512 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
513 dev_name(&adapter->pdev->dev));
514 err = request_irq(adapter->msix_entries[0].vector,
515 &iavf_msix_aq, 0,
516 adapter->misc_vector_name, netdev);
517 if (err) {
518 dev_err(&adapter->pdev->dev,
519 "request_irq for %s failed: %d\n",
520 adapter->misc_vector_name, err);
521 free_irq(adapter->msix_entries[0].vector, netdev);
522 }
523 return err;
524}
525
526
527
528
529
530
531
532static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
533{
534 int vector, irq_num, q_vectors;
535
536 if (!adapter->msix_entries)
537 return;
538
539 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
540
541 for (vector = 0; vector < q_vectors; vector++) {
542 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
543 irq_set_affinity_notifier(irq_num, NULL);
544 irq_set_affinity_hint(irq_num, NULL);
545 free_irq(irq_num, &adapter->q_vectors[vector]);
546 }
547}
548
549
550
551
552
553
554
555static void iavf_free_misc_irq(struct iavf_adapter *adapter)
556{
557 struct net_device *netdev = adapter->netdev;
558
559 if (!adapter->msix_entries)
560 return;
561
562 free_irq(adapter->msix_entries[0].vector, netdev);
563}
564
565
566
567
568
569
570
571static void iavf_configure_tx(struct iavf_adapter *adapter)
572{
573 struct iavf_hw *hw = &adapter->hw;
574 int i;
575
576 for (i = 0; i < adapter->num_active_queues; i++)
577 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
578}
579
580
581
582
583
584
585
586static void iavf_configure_rx(struct iavf_adapter *adapter)
587{
588 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
589 struct iavf_hw *hw = &adapter->hw;
590 int i;
591
592
593#if (PAGE_SIZE < 8192)
594 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
595 struct net_device *netdev = adapter->netdev;
596
597
598
599
600
601 rx_buf_len = IAVF_RXBUFFER_3072;
602
603
604
605
606
607 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
608 (netdev->mtu <= ETH_DATA_LEN))
609 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
610 }
611#endif
612
613 for (i = 0; i < adapter->num_active_queues; i++) {
614 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
615 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
616
617 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
618 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
619 else
620 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
621 }
622}
623
624
625
626
627
628
629
630
631
632static struct
633iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
634{
635 struct iavf_vlan_filter *f;
636
637 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
638 if (vlan == f->vlan)
639 return f;
640 }
641 return NULL;
642}
643
644
645
646
647
648
649
650
651static struct
652iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
653{
654 struct iavf_vlan_filter *f = NULL;
655
656 spin_lock_bh(&adapter->mac_vlan_list_lock);
657
658 f = iavf_find_vlan(adapter, vlan);
659 if (!f) {
660 f = kzalloc(sizeof(*f), GFP_KERNEL);
661 if (!f)
662 goto clearout;
663
664 f->vlan = vlan;
665
666 INIT_LIST_HEAD(&f->list);
667 list_add(&f->list, &adapter->vlan_filter_list);
668 f->add = true;
669 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
670 }
671
672clearout:
673 spin_unlock_bh(&adapter->mac_vlan_list_lock);
674 return f;
675}
676
677
678
679
680
681
682static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
683{
684 struct iavf_vlan_filter *f;
685
686 spin_lock_bh(&adapter->mac_vlan_list_lock);
687
688 f = iavf_find_vlan(adapter, vlan);
689 if (f) {
690 f->remove = true;
691 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
692 }
693
694 spin_unlock_bh(&adapter->mac_vlan_list_lock);
695}
696
697
698
699
700
701
702
703static int iavf_vlan_rx_add_vid(struct net_device *netdev,
704 __always_unused __be16 proto, u16 vid)
705{
706 struct iavf_adapter *adapter = netdev_priv(netdev);
707
708 if (!VLAN_ALLOWED(adapter))
709 return -EIO;
710 if (iavf_add_vlan(adapter, vid) == NULL)
711 return -ENOMEM;
712 return 0;
713}
714
715
716
717
718
719
720
721static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
722 __always_unused __be16 proto, u16 vid)
723{
724 struct iavf_adapter *adapter = netdev_priv(netdev);
725
726 if (VLAN_ALLOWED(adapter)) {
727 iavf_del_vlan(adapter, vid);
728 return 0;
729 }
730 return -EIO;
731}
732
733
734
735
736
737
738
739
740
741static struct
742iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
743 const u8 *macaddr)
744{
745 struct iavf_mac_filter *f;
746
747 if (!macaddr)
748 return NULL;
749
750 list_for_each_entry(f, &adapter->mac_filter_list, list) {
751 if (ether_addr_equal(macaddr, f->macaddr))
752 return f;
753 }
754 return NULL;
755}
756
757
758
759
760
761
762
763
764static struct
765iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
766 const u8 *macaddr)
767{
768 struct iavf_mac_filter *f;
769
770 if (!macaddr)
771 return NULL;
772
773 f = iavf_find_filter(adapter, macaddr);
774 if (!f) {
775 f = kzalloc(sizeof(*f), GFP_ATOMIC);
776 if (!f)
777 return f;
778
779 ether_addr_copy(f->macaddr, macaddr);
780
781 list_add_tail(&f->list, &adapter->mac_filter_list);
782 f->add = true;
783 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
784 } else {
785 f->remove = false;
786 }
787
788 return f;
789}
790
791
792
793
794
795
796
797
798static int iavf_set_mac(struct net_device *netdev, void *p)
799{
800 struct iavf_adapter *adapter = netdev_priv(netdev);
801 struct iavf_hw *hw = &adapter->hw;
802 struct iavf_mac_filter *f;
803 struct sockaddr *addr = p;
804
805 if (!is_valid_ether_addr(addr->sa_data))
806 return -EADDRNOTAVAIL;
807
808 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
809 return 0;
810
811 spin_lock_bh(&adapter->mac_vlan_list_lock);
812
813 f = iavf_find_filter(adapter, hw->mac.addr);
814 if (f) {
815 f->remove = true;
816 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
817 }
818
819 f = iavf_add_filter(adapter, addr->sa_data);
820
821 spin_unlock_bh(&adapter->mac_vlan_list_lock);
822
823 if (f) {
824 ether_addr_copy(hw->mac.addr, addr->sa_data);
825 }
826
827 return (f == NULL) ? -ENOMEM : 0;
828}
829
830
831
832
833
834
835
836
837
838static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
839{
840 struct iavf_adapter *adapter = netdev_priv(netdev);
841
842 if (iavf_add_filter(adapter, addr))
843 return 0;
844 else
845 return -ENOMEM;
846}
847
848
849
850
851
852
853
854
855
856static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
857{
858 struct iavf_adapter *adapter = netdev_priv(netdev);
859 struct iavf_mac_filter *f;
860
861
862
863
864
865
866 if (ether_addr_equal(addr, netdev->dev_addr))
867 return 0;
868
869 f = iavf_find_filter(adapter, addr);
870 if (f) {
871 f->remove = true;
872 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
873 }
874 return 0;
875}
876
877
878
879
880
881static void iavf_set_rx_mode(struct net_device *netdev)
882{
883 struct iavf_adapter *adapter = netdev_priv(netdev);
884
885 spin_lock_bh(&adapter->mac_vlan_list_lock);
886 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
887 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
888 spin_unlock_bh(&adapter->mac_vlan_list_lock);
889
890 if (netdev->flags & IFF_PROMISC &&
891 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
892 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
893 else if (!(netdev->flags & IFF_PROMISC) &&
894 adapter->flags & IAVF_FLAG_PROMISC_ON)
895 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
896
897 if (netdev->flags & IFF_ALLMULTI &&
898 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
899 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
900 else if (!(netdev->flags & IFF_ALLMULTI) &&
901 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
902 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
903}
904
905
906
907
908
909static void iavf_napi_enable_all(struct iavf_adapter *adapter)
910{
911 int q_idx;
912 struct iavf_q_vector *q_vector;
913 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
914
915 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
916 struct napi_struct *napi;
917
918 q_vector = &adapter->q_vectors[q_idx];
919 napi = &q_vector->napi;
920 napi_enable(napi);
921 }
922}
923
924
925
926
927
928static void iavf_napi_disable_all(struct iavf_adapter *adapter)
929{
930 int q_idx;
931 struct iavf_q_vector *q_vector;
932 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
933
934 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
935 q_vector = &adapter->q_vectors[q_idx];
936 napi_disable(&q_vector->napi);
937 }
938}
939
940
941
942
943
944static void iavf_configure(struct iavf_adapter *adapter)
945{
946 struct net_device *netdev = adapter->netdev;
947 int i;
948
949 iavf_set_rx_mode(netdev);
950
951 iavf_configure_tx(adapter);
952 iavf_configure_rx(adapter);
953 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
954
955 for (i = 0; i < adapter->num_active_queues; i++) {
956 struct iavf_ring *ring = &adapter->rx_rings[i];
957
958 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
959 }
960}
961
962
963
964
965
966
967
968static void iavf_up_complete(struct iavf_adapter *adapter)
969{
970 adapter->state = __IAVF_RUNNING;
971 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
972
973 iavf_napi_enable_all(adapter);
974
975 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
976 if (CLIENT_ENABLED(adapter))
977 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
978 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
979}
980
981
982
983
984
985
986
987void iavf_down(struct iavf_adapter *adapter)
988{
989 struct net_device *netdev = adapter->netdev;
990 struct iavf_vlan_filter *vlf;
991 struct iavf_mac_filter *f;
992 struct iavf_cloud_filter *cf;
993
994 if (adapter->state <= __IAVF_DOWN_PENDING)
995 return;
996
997 netif_carrier_off(netdev);
998 netif_tx_disable(netdev);
999 adapter->link_up = false;
1000 iavf_napi_disable_all(adapter);
1001 iavf_irq_disable(adapter);
1002
1003 spin_lock_bh(&adapter->mac_vlan_list_lock);
1004
1005
1006 __dev_uc_unsync(adapter->netdev, NULL);
1007 __dev_mc_unsync(adapter->netdev, NULL);
1008
1009
1010 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1011 f->remove = true;
1012 }
1013
1014
1015 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1016 vlf->remove = true;
1017 }
1018
1019 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1020
1021
1022 spin_lock_bh(&adapter->cloud_filter_list_lock);
1023 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1024 cf->del = true;
1025 }
1026 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1027
1028 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1029 adapter->state != __IAVF_RESETTING) {
1030
1031 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1032
1033
1034
1035
1036 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1037 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1038 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1039 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1040 }
1041
1042 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static int
1055iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1056{
1057 int err, vector_threshold;
1058
1059
1060
1061
1062
1063
1064 vector_threshold = MIN_MSIX_COUNT;
1065
1066
1067
1068
1069
1070
1071 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1072 vector_threshold, vectors);
1073 if (err < 0) {
1074 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1075 kfree(adapter->msix_entries);
1076 adapter->msix_entries = NULL;
1077 return err;
1078 }
1079
1080
1081
1082
1083
1084 adapter->num_msix_vectors = err;
1085 return 0;
1086}
1087
1088
1089
1090
1091
1092
1093
1094static void iavf_free_queues(struct iavf_adapter *adapter)
1095{
1096 if (!adapter->vsi_res)
1097 return;
1098 adapter->num_active_queues = 0;
1099 kfree(adapter->tx_rings);
1100 adapter->tx_rings = NULL;
1101 kfree(adapter->rx_rings);
1102 adapter->rx_rings = NULL;
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113static int iavf_alloc_queues(struct iavf_adapter *adapter)
1114{
1115 int i, num_active_queues;
1116
1117
1118
1119
1120
1121
1122 if (adapter->num_req_queues)
1123 num_active_queues = adapter->num_req_queues;
1124 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1125 adapter->num_tc)
1126 num_active_queues = adapter->ch_config.total_qps;
1127 else
1128 num_active_queues = min_t(int,
1129 adapter->vsi_res->num_queue_pairs,
1130 (int)(num_online_cpus()));
1131
1132
1133 adapter->tx_rings = kcalloc(num_active_queues,
1134 sizeof(struct iavf_ring), GFP_KERNEL);
1135 if (!adapter->tx_rings)
1136 goto err_out;
1137 adapter->rx_rings = kcalloc(num_active_queues,
1138 sizeof(struct iavf_ring), GFP_KERNEL);
1139 if (!adapter->rx_rings)
1140 goto err_out;
1141
1142 for (i = 0; i < num_active_queues; i++) {
1143 struct iavf_ring *tx_ring;
1144 struct iavf_ring *rx_ring;
1145
1146 tx_ring = &adapter->tx_rings[i];
1147
1148 tx_ring->queue_index = i;
1149 tx_ring->netdev = adapter->netdev;
1150 tx_ring->dev = &adapter->pdev->dev;
1151 tx_ring->count = adapter->tx_desc_count;
1152 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1153 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1154 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1155
1156 rx_ring = &adapter->rx_rings[i];
1157 rx_ring->queue_index = i;
1158 rx_ring->netdev = adapter->netdev;
1159 rx_ring->dev = &adapter->pdev->dev;
1160 rx_ring->count = adapter->rx_desc_count;
1161 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1162 }
1163
1164 adapter->num_active_queues = num_active_queues;
1165
1166 return 0;
1167
1168err_out:
1169 iavf_free_queues(adapter);
1170 return -ENOMEM;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1181{
1182 int vector, v_budget;
1183 int pairs = 0;
1184 int err = 0;
1185
1186 if (!adapter->vsi_res) {
1187 err = -EIO;
1188 goto out;
1189 }
1190 pairs = adapter->num_active_queues;
1191
1192
1193
1194
1195
1196
1197 v_budget = min_t(int, pairs + NONQ_VECS,
1198 (int)adapter->vf_res->max_vectors);
1199
1200 adapter->msix_entries = kcalloc(v_budget,
1201 sizeof(struct msix_entry), GFP_KERNEL);
1202 if (!adapter->msix_entries) {
1203 err = -ENOMEM;
1204 goto out;
1205 }
1206
1207 for (vector = 0; vector < v_budget; vector++)
1208 adapter->msix_entries[vector].entry = vector;
1209
1210 err = iavf_acquire_msix_vectors(adapter, v_budget);
1211
1212out:
1213 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1214 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1215 return err;
1216}
1217
1218
1219
1220
1221
1222
1223
1224static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1225{
1226 struct i40e_aqc_get_set_rss_key_data *rss_key =
1227 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1228 struct iavf_hw *hw = &adapter->hw;
1229 int ret = 0;
1230
1231 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1232
1233 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1234 adapter->current_op);
1235 return -EBUSY;
1236 }
1237
1238 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1239 if (ret) {
1240 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1241 iavf_stat_str(hw, ret),
1242 iavf_aq_str(hw, hw->aq.asq_last_status));
1243 return ret;
1244
1245 }
1246
1247 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1248 adapter->rss_lut, adapter->rss_lut_size);
1249 if (ret) {
1250 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1251 iavf_stat_str(hw, ret),
1252 iavf_aq_str(hw, hw->aq.asq_last_status));
1253 }
1254
1255 return ret;
1256
1257}
1258
1259
1260
1261
1262
1263
1264
1265static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1266{
1267 struct iavf_hw *hw = &adapter->hw;
1268 u32 *dw;
1269 u16 i;
1270
1271 dw = (u32 *)adapter->rss_key;
1272 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1273 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1274
1275 dw = (u32 *)adapter->rss_lut;
1276 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1277 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1278
1279 iavf_flush(hw);
1280
1281 return 0;
1282}
1283
1284
1285
1286
1287
1288
1289
1290int iavf_config_rss(struct iavf_adapter *adapter)
1291{
1292
1293 if (RSS_PF(adapter)) {
1294 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1295 IAVF_FLAG_AQ_SET_RSS_KEY;
1296 return 0;
1297 } else if (RSS_AQ(adapter)) {
1298 return iavf_config_rss_aq(adapter);
1299 } else {
1300 return iavf_config_rss_reg(adapter);
1301 }
1302}
1303
1304
1305
1306
1307
1308static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1309{
1310 u16 i;
1311
1312 for (i = 0; i < adapter->rss_lut_size; i++)
1313 adapter->rss_lut[i] = i % adapter->num_active_queues;
1314}
1315
1316
1317
1318
1319
1320
1321
1322static int iavf_init_rss(struct iavf_adapter *adapter)
1323{
1324 struct iavf_hw *hw = &adapter->hw;
1325 int ret;
1326
1327 if (!RSS_PF(adapter)) {
1328
1329 if (adapter->vf_res->vf_cap_flags &
1330 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1331 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1332 else
1333 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1334
1335 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1336 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1337 }
1338
1339 iavf_fill_rss_lut(adapter);
1340 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1341 ret = iavf_config_rss(adapter);
1342
1343 return ret;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1354{
1355 int q_idx = 0, num_q_vectors;
1356 struct iavf_q_vector *q_vector;
1357
1358 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1359 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1360 GFP_KERNEL);
1361 if (!adapter->q_vectors)
1362 return -ENOMEM;
1363
1364 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1365 q_vector = &adapter->q_vectors[q_idx];
1366 q_vector->adapter = adapter;
1367 q_vector->vsi = &adapter->vsi;
1368 q_vector->v_idx = q_idx;
1369 q_vector->reg_idx = q_idx;
1370 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1371 netif_napi_add(adapter->netdev, &q_vector->napi,
1372 iavf_napi_poll, NAPI_POLL_WEIGHT);
1373 }
1374
1375 return 0;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1387{
1388 int q_idx, num_q_vectors;
1389 int napi_vectors;
1390
1391 if (!adapter->q_vectors)
1392 return;
1393
1394 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1395 napi_vectors = adapter->num_active_queues;
1396
1397 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1398 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1399
1400 if (q_idx < napi_vectors)
1401 netif_napi_del(&q_vector->napi);
1402 }
1403 kfree(adapter->q_vectors);
1404 adapter->q_vectors = NULL;
1405}
1406
1407
1408
1409
1410
1411
1412void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1413{
1414 if (!adapter->msix_entries)
1415 return;
1416
1417 pci_disable_msix(adapter->pdev);
1418 kfree(adapter->msix_entries);
1419 adapter->msix_entries = NULL;
1420}
1421
1422
1423
1424
1425
1426
1427int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1428{
1429 int err;
1430
1431 err = iavf_alloc_queues(adapter);
1432 if (err) {
1433 dev_err(&adapter->pdev->dev,
1434 "Unable to allocate memory for queues\n");
1435 goto err_alloc_queues;
1436 }
1437
1438 rtnl_lock();
1439 err = iavf_set_interrupt_capability(adapter);
1440 rtnl_unlock();
1441 if (err) {
1442 dev_err(&adapter->pdev->dev,
1443 "Unable to setup interrupt capabilities\n");
1444 goto err_set_interrupt;
1445 }
1446
1447 err = iavf_alloc_q_vectors(adapter);
1448 if (err) {
1449 dev_err(&adapter->pdev->dev,
1450 "Unable to allocate memory for queue vectors\n");
1451 goto err_alloc_q_vectors;
1452 }
1453
1454
1455
1456
1457
1458
1459 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1460 adapter->num_tc)
1461 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1462 adapter->num_tc);
1463
1464 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1465 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1466 adapter->num_active_queues);
1467
1468 return 0;
1469err_alloc_q_vectors:
1470 iavf_reset_interrupt_capability(adapter);
1471err_set_interrupt:
1472 iavf_free_queues(adapter);
1473err_alloc_queues:
1474 return err;
1475}
1476
1477
1478
1479
1480
1481static void iavf_free_rss(struct iavf_adapter *adapter)
1482{
1483 kfree(adapter->rss_key);
1484 adapter->rss_key = NULL;
1485
1486 kfree(adapter->rss_lut);
1487 adapter->rss_lut = NULL;
1488}
1489
1490
1491
1492
1493
1494
1495
1496static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1497{
1498 struct net_device *netdev = adapter->netdev;
1499 int err;
1500
1501 if (netif_running(netdev))
1502 iavf_free_traffic_irqs(adapter);
1503 iavf_free_misc_irq(adapter);
1504 iavf_reset_interrupt_capability(adapter);
1505 iavf_free_q_vectors(adapter);
1506 iavf_free_queues(adapter);
1507
1508 err = iavf_init_interrupt_scheme(adapter);
1509 if (err)
1510 goto err;
1511
1512 netif_tx_stop_all_queues(netdev);
1513
1514 err = iavf_request_misc_irq(adapter);
1515 if (err)
1516 goto err;
1517
1518 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1519
1520 iavf_map_rings_to_vectors(adapter);
1521
1522 if (RSS_AQ(adapter))
1523 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1524 else
1525 err = iavf_init_rss(adapter);
1526err:
1527 return err;
1528}
1529
1530
1531
1532
1533
1534static void iavf_watchdog_timer(struct timer_list *t)
1535{
1536 struct iavf_adapter *adapter = from_timer(adapter, t,
1537 watchdog_timer);
1538
1539 schedule_work(&adapter->watchdog_task);
1540
1541}
1542
1543
1544
1545
1546
1547static void iavf_watchdog_task(struct work_struct *work)
1548{
1549 struct iavf_adapter *adapter = container_of(work,
1550 struct iavf_adapter,
1551 watchdog_task);
1552 struct iavf_hw *hw = &adapter->hw;
1553 u32 reg_val;
1554
1555 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1556 goto restart_watchdog;
1557
1558 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
1559 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1560 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1561 if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1562 (reg_val == VIRTCHNL_VFR_COMPLETED)) {
1563
1564 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1565 adapter->state = __IAVF_STARTUP;
1566 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1567 schedule_delayed_work(&adapter->init_task, 10);
1568 clear_bit(__IAVF_IN_CRITICAL_TASK,
1569 &adapter->crit_section);
1570
1571
1572
1573
1574
1575 return;
1576 }
1577 adapter->aq_required = 0;
1578 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1579 goto watchdog_done;
1580 }
1581
1582 if ((adapter->state < __IAVF_DOWN) ||
1583 (adapter->flags & IAVF_FLAG_RESET_PENDING))
1584 goto watchdog_done;
1585
1586
1587 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1588 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) {
1589 adapter->state = __IAVF_RESETTING;
1590 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1591 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1592 schedule_work(&adapter->reset_task);
1593 adapter->aq_required = 0;
1594 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1595 goto watchdog_done;
1596 }
1597
1598
1599
1600
1601 if (adapter->current_op) {
1602 if (!iavf_asq_done(hw)) {
1603 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1604 iavf_send_api_ver(adapter);
1605 }
1606 goto watchdog_done;
1607 }
1608 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) {
1609 iavf_send_vf_config_msg(adapter);
1610 goto watchdog_done;
1611 }
1612
1613 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1614 iavf_disable_queues(adapter);
1615 goto watchdog_done;
1616 }
1617
1618 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1619 iavf_map_queues(adapter);
1620 goto watchdog_done;
1621 }
1622
1623 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1624 iavf_add_ether_addrs(adapter);
1625 goto watchdog_done;
1626 }
1627
1628 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1629 iavf_add_vlans(adapter);
1630 goto watchdog_done;
1631 }
1632
1633 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1634 iavf_del_ether_addrs(adapter);
1635 goto watchdog_done;
1636 }
1637
1638 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1639 iavf_del_vlans(adapter);
1640 goto watchdog_done;
1641 }
1642
1643 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1644 iavf_enable_vlan_stripping(adapter);
1645 goto watchdog_done;
1646 }
1647
1648 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1649 iavf_disable_vlan_stripping(adapter);
1650 goto watchdog_done;
1651 }
1652
1653 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1654 iavf_configure_queues(adapter);
1655 goto watchdog_done;
1656 }
1657
1658 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1659 iavf_enable_queues(adapter);
1660 goto watchdog_done;
1661 }
1662
1663 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1664
1665
1666
1667
1668 iavf_init_rss(adapter);
1669 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1670 goto watchdog_done;
1671 }
1672 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1673 iavf_get_hena(adapter);
1674 goto watchdog_done;
1675 }
1676 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1677 iavf_set_hena(adapter);
1678 goto watchdog_done;
1679 }
1680 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1681 iavf_set_rss_key(adapter);
1682 goto watchdog_done;
1683 }
1684 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1685 iavf_set_rss_lut(adapter);
1686 goto watchdog_done;
1687 }
1688
1689 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1690 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1691 FLAG_VF_MULTICAST_PROMISC);
1692 goto watchdog_done;
1693 }
1694
1695 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1696 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1697 goto watchdog_done;
1698 }
1699
1700 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1701 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1702 iavf_set_promiscuous(adapter, 0);
1703 goto watchdog_done;
1704 }
1705
1706 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1707 iavf_enable_channels(adapter);
1708 goto watchdog_done;
1709 }
1710
1711 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1712 iavf_disable_channels(adapter);
1713 goto watchdog_done;
1714 }
1715
1716 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1717 iavf_add_cloud_filter(adapter);
1718 goto watchdog_done;
1719 }
1720
1721 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1722 iavf_del_cloud_filter(adapter);
1723 goto watchdog_done;
1724 }
1725
1726 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1727
1728 if (adapter->state == __IAVF_RUNNING)
1729 iavf_request_stats(adapter);
1730watchdog_done:
1731 if (adapter->state == __IAVF_RUNNING)
1732 iavf_detect_recover_hung(&adapter->vsi);
1733 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1734restart_watchdog:
1735 if (adapter->state == __IAVF_REMOVE)
1736 return;
1737 if (adapter->aq_required)
1738 mod_timer(&adapter->watchdog_timer,
1739 jiffies + msecs_to_jiffies(20));
1740 else
1741 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1742 schedule_work(&adapter->adminq_task);
1743}
1744
1745static void iavf_disable_vf(struct iavf_adapter *adapter)
1746{
1747 struct iavf_mac_filter *f, *ftmp;
1748 struct iavf_vlan_filter *fv, *fvtmp;
1749 struct iavf_cloud_filter *cf, *cftmp;
1750
1751 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
1752
1753
1754
1755
1756
1757 if (adapter->state == __IAVF_RUNNING) {
1758 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1759 netif_carrier_off(adapter->netdev);
1760 netif_tx_disable(adapter->netdev);
1761 adapter->link_up = false;
1762 iavf_napi_disable_all(adapter);
1763 iavf_irq_disable(adapter);
1764 iavf_free_traffic_irqs(adapter);
1765 iavf_free_all_tx_resources(adapter);
1766 iavf_free_all_rx_resources(adapter);
1767 }
1768
1769 spin_lock_bh(&adapter->mac_vlan_list_lock);
1770
1771
1772 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1773 list_del(&f->list);
1774 kfree(f);
1775 }
1776
1777 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1778 list_del(&fv->list);
1779 kfree(fv);
1780 }
1781
1782 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1783
1784 spin_lock_bh(&adapter->cloud_filter_list_lock);
1785 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1786 list_del(&cf->list);
1787 kfree(cf);
1788 adapter->num_cloud_filters--;
1789 }
1790 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1791
1792 iavf_free_misc_irq(adapter);
1793 iavf_reset_interrupt_capability(adapter);
1794 iavf_free_queues(adapter);
1795 iavf_free_q_vectors(adapter);
1796 kfree(adapter->vf_res);
1797 iavf_shutdown_adminq(&adapter->hw);
1798 adapter->netdev->flags &= ~IFF_UP;
1799 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1800 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1801 adapter->state = __IAVF_DOWN;
1802 wake_up(&adapter->down_waitqueue);
1803 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1804}
1805
1806#define IAVF_RESET_WAIT_MS 10
1807#define IAVF_RESET_WAIT_COUNT 500
1808
1809
1810
1811
1812
1813
1814
1815
1816static void iavf_reset_task(struct work_struct *work)
1817{
1818 struct iavf_adapter *adapter = container_of(work,
1819 struct iavf_adapter,
1820 reset_task);
1821 struct virtchnl_vf_resource *vfres = adapter->vf_res;
1822 struct net_device *netdev = adapter->netdev;
1823 struct iavf_hw *hw = &adapter->hw;
1824 struct iavf_vlan_filter *vlf;
1825 struct iavf_cloud_filter *cf;
1826 struct iavf_mac_filter *f;
1827 u32 reg_val;
1828 int i = 0, err;
1829 bool running;
1830
1831
1832
1833
1834 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
1835 return;
1836
1837 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
1838 &adapter->crit_section))
1839 usleep_range(500, 1000);
1840 if (CLIENT_ENABLED(adapter)) {
1841 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
1842 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
1843 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1844 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
1845 cancel_delayed_work_sync(&adapter->client_task);
1846 iavf_notify_client_close(&adapter->vsi, true);
1847 }
1848 iavf_misc_irq_disable(adapter);
1849 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
1850 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
1851
1852
1853
1854 iavf_shutdown_adminq(hw);
1855 iavf_init_adminq(hw);
1856 iavf_request_reset(adapter);
1857 }
1858 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1859
1860
1861 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1862 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
1863 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1864 if (!reg_val)
1865 break;
1866 usleep_range(5000, 10000);
1867 }
1868 if (i == IAVF_RESET_WAIT_COUNT) {
1869 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1870 goto continue_reset;
1871 }
1872
1873
1874 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1875
1876 msleep(IAVF_RESET_WAIT_MS);
1877
1878 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1879 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1880 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
1881 break;
1882 }
1883
1884 pci_set_master(adapter->pdev);
1885
1886 if (i == IAVF_RESET_WAIT_COUNT) {
1887 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1888 reg_val);
1889 iavf_disable_vf(adapter);
1890 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
1891 return;
1892 }
1893
1894continue_reset:
1895
1896
1897
1898
1899 running = ((adapter->state == __IAVF_RUNNING) ||
1900 (adapter->state == __IAVF_RESETTING));
1901
1902 if (running) {
1903 netif_carrier_off(netdev);
1904 netif_tx_stop_all_queues(netdev);
1905 adapter->link_up = false;
1906 iavf_napi_disable_all(adapter);
1907 }
1908 iavf_irq_disable(adapter);
1909
1910 adapter->state = __IAVF_RESETTING;
1911 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1912
1913
1914
1915
1916 iavf_free_all_rx_resources(adapter);
1917 iavf_free_all_tx_resources(adapter);
1918
1919 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
1920
1921 iavf_shutdown_adminq(hw);
1922 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1923 err = iavf_init_adminq(hw);
1924 if (err)
1925 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1926 err);
1927 adapter->aq_required = 0;
1928
1929 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1930 err = iavf_reinit_interrupt_scheme(adapter);
1931 if (err)
1932 goto reset_err;
1933 }
1934
1935 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
1936 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
1937
1938 spin_lock_bh(&adapter->mac_vlan_list_lock);
1939
1940
1941 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1942 f->add = true;
1943 }
1944
1945 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1946 vlf->add = true;
1947 }
1948
1949 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1950
1951
1952 spin_lock_bh(&adapter->cloud_filter_list_lock);
1953 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1954 adapter->num_tc) {
1955 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1956 cf->add = true;
1957 }
1958 }
1959 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1960
1961 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1962 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
1963 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1964 iavf_misc_irq_enable(adapter);
1965
1966 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1967
1968
1969
1970
1971 if (running) {
1972
1973 err = iavf_setup_all_tx_resources(adapter);
1974 if (err)
1975 goto reset_err;
1976
1977
1978 err = iavf_setup_all_rx_resources(adapter);
1979 if (err)
1980 goto reset_err;
1981
1982 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1983 err = iavf_request_traffic_irqs(adapter, netdev->name);
1984 if (err)
1985 goto reset_err;
1986
1987 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1988 }
1989
1990 iavf_configure(adapter);
1991
1992 iavf_up_complete(adapter);
1993
1994 iavf_irq_enable(adapter, true);
1995 } else {
1996 adapter->state = __IAVF_DOWN;
1997 wake_up(&adapter->down_waitqueue);
1998 }
1999 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2000 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2001
2002 return;
2003reset_err:
2004 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2005 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2006 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2007 iavf_close(netdev);
2008}
2009
2010
2011
2012
2013
2014static void iavf_adminq_task(struct work_struct *work)
2015{
2016 struct iavf_adapter *adapter =
2017 container_of(work, struct iavf_adapter, adminq_task);
2018 struct iavf_hw *hw = &adapter->hw;
2019 struct i40e_arq_event_info event;
2020 enum virtchnl_ops v_op;
2021 iavf_status ret, v_ret;
2022 u32 val, oldval;
2023 u16 pending;
2024
2025 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2026 goto out;
2027
2028 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2029 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2030 if (!event.msg_buf)
2031 goto out;
2032
2033 do {
2034 ret = iavf_clean_arq_element(hw, &event, &pending);
2035 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2036 v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
2037
2038 if (ret || !v_op)
2039 break;
2040
2041 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2042 event.msg_len);
2043 if (pending != 0)
2044 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2045 } while (pending);
2046
2047 if ((adapter->flags &
2048 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2049 adapter->state == __IAVF_RESETTING)
2050 goto freedom;
2051
2052
2053 val = rd32(hw, hw->aq.arq.len);
2054 if (val == 0xdeadbeef)
2055 goto freedom;
2056 oldval = val;
2057 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2058 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2059 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2060 }
2061 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2062 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2063 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2064 }
2065 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2066 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2067 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2068 }
2069 if (oldval != val)
2070 wr32(hw, hw->aq.arq.len, val);
2071
2072 val = rd32(hw, hw->aq.asq.len);
2073 oldval = val;
2074 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2075 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2076 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2077 }
2078 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2079 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2080 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2081 }
2082 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2083 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2084 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2085 }
2086 if (oldval != val)
2087 wr32(hw, hw->aq.asq.len, val);
2088
2089freedom:
2090 kfree(event.msg_buf);
2091out:
2092
2093 iavf_misc_irq_enable(adapter);
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103static void iavf_client_task(struct work_struct *work)
2104{
2105 struct iavf_adapter *adapter =
2106 container_of(work, struct iavf_adapter, client_task.work);
2107
2108
2109
2110
2111
2112 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
2113 return;
2114
2115 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2116 iavf_client_subtask(adapter);
2117 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2118 goto out;
2119 }
2120 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2121 iavf_notify_client_l2_params(&adapter->vsi);
2122 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2123 goto out;
2124 }
2125 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2126 iavf_notify_client_close(&adapter->vsi, false);
2127 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2128 goto out;
2129 }
2130 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2131 iavf_notify_client_open(&adapter->vsi);
2132 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2133 }
2134out:
2135 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2136}
2137
2138
2139
2140
2141
2142
2143
2144void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2145{
2146 int i;
2147
2148 if (!adapter->tx_rings)
2149 return;
2150
2151 for (i = 0; i < adapter->num_active_queues; i++)
2152 if (adapter->tx_rings[i].desc)
2153 iavf_free_tx_resources(&adapter->tx_rings[i]);
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2167{
2168 int i, err = 0;
2169
2170 for (i = 0; i < adapter->num_active_queues; i++) {
2171 adapter->tx_rings[i].count = adapter->tx_desc_count;
2172 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2173 if (!err)
2174 continue;
2175 dev_err(&adapter->pdev->dev,
2176 "Allocation for Tx Queue %u failed\n", i);
2177 break;
2178 }
2179
2180 return err;
2181}
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2194{
2195 int i, err = 0;
2196
2197 for (i = 0; i < adapter->num_active_queues; i++) {
2198 adapter->rx_rings[i].count = adapter->rx_desc_count;
2199 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2200 if (!err)
2201 continue;
2202 dev_err(&adapter->pdev->dev,
2203 "Allocation for Rx Queue %u failed\n", i);
2204 break;
2205 }
2206 return err;
2207}
2208
2209
2210
2211
2212
2213
2214
2215void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2216{
2217 int i;
2218
2219 if (!adapter->rx_rings)
2220 return;
2221
2222 for (i = 0; i < adapter->num_active_queues; i++)
2223 if (adapter->rx_rings[i].desc)
2224 iavf_free_rx_resources(&adapter->rx_rings[i]);
2225}
2226
2227
2228
2229
2230
2231
2232static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2233 u64 max_tx_rate)
2234{
2235 int speed = 0, ret = 0;
2236
2237 switch (adapter->link_speed) {
2238 case I40E_LINK_SPEED_40GB:
2239 speed = 40000;
2240 break;
2241 case I40E_LINK_SPEED_25GB:
2242 speed = 25000;
2243 break;
2244 case I40E_LINK_SPEED_20GB:
2245 speed = 20000;
2246 break;
2247 case I40E_LINK_SPEED_10GB:
2248 speed = 10000;
2249 break;
2250 case I40E_LINK_SPEED_1GB:
2251 speed = 1000;
2252 break;
2253 case I40E_LINK_SPEED_100MB:
2254 speed = 100;
2255 break;
2256 default:
2257 break;
2258 }
2259
2260 if (max_tx_rate > speed) {
2261 dev_err(&adapter->pdev->dev,
2262 "Invalid tx rate specified\n");
2263 ret = -EINVAL;
2264 }
2265
2266 return ret;
2267}
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2279 struct tc_mqprio_qopt_offload *mqprio_qopt)
2280{
2281 u64 total_max_rate = 0;
2282 int i, num_qps = 0;
2283 u64 tx_rate = 0;
2284 int ret = 0;
2285
2286 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2287 mqprio_qopt->qopt.num_tc < 1)
2288 return -EINVAL;
2289
2290 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2291 if (!mqprio_qopt->qopt.count[i] ||
2292 mqprio_qopt->qopt.offset[i] != num_qps)
2293 return -EINVAL;
2294 if (mqprio_qopt->min_rate[i]) {
2295 dev_err(&adapter->pdev->dev,
2296 "Invalid min tx rate (greater than 0) specified\n");
2297 return -EINVAL;
2298 }
2299
2300 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2301 IAVF_MBPS_DIVISOR);
2302 total_max_rate += tx_rate;
2303 num_qps += mqprio_qopt->qopt.count[i];
2304 }
2305 if (num_qps > IAVF_MAX_REQ_QUEUES)
2306 return -EINVAL;
2307
2308 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2309 return ret;
2310}
2311
2312
2313
2314
2315
2316static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2317{
2318 struct iavf_cloud_filter *cf, *cftmp;
2319
2320 spin_lock_bh(&adapter->cloud_filter_list_lock);
2321 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2322 list) {
2323 list_del(&cf->list);
2324 kfree(cf);
2325 adapter->num_cloud_filters--;
2326 }
2327 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2328}
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2342{
2343 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2344 struct iavf_adapter *adapter = netdev_priv(netdev);
2345 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2346 u8 num_tc = 0, total_qps = 0;
2347 int ret = 0, netdev_tc = 0;
2348 u64 max_tx_rate;
2349 u16 mode;
2350 int i;
2351
2352 num_tc = mqprio_qopt->qopt.num_tc;
2353 mode = mqprio_qopt->mode;
2354
2355
2356 if (!mqprio_qopt->qopt.hw) {
2357 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2358
2359 netdev_reset_tc(netdev);
2360 adapter->num_tc = 0;
2361 netif_tx_stop_all_queues(netdev);
2362 netif_tx_disable(netdev);
2363 iavf_del_all_cloud_filters(adapter);
2364 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2365 goto exit;
2366 } else {
2367 return -EINVAL;
2368 }
2369 }
2370
2371
2372 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2373 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2374 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2375 return -EOPNOTSUPP;
2376 }
2377 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2378 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2379 return -EINVAL;
2380 }
2381
2382 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2383 if (ret)
2384 return ret;
2385
2386 if (adapter->num_tc == num_tc)
2387 return 0;
2388 adapter->num_tc = num_tc;
2389
2390 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2391 if (i < num_tc) {
2392 adapter->ch_config.ch_info[i].count =
2393 mqprio_qopt->qopt.count[i];
2394 adapter->ch_config.ch_info[i].offset =
2395 mqprio_qopt->qopt.offset[i];
2396 total_qps += mqprio_qopt->qopt.count[i];
2397 max_tx_rate = mqprio_qopt->max_rate[i];
2398
2399 max_tx_rate = div_u64(max_tx_rate,
2400 IAVF_MBPS_DIVISOR);
2401 adapter->ch_config.ch_info[i].max_tx_rate =
2402 max_tx_rate;
2403 } else {
2404 adapter->ch_config.ch_info[i].count = 1;
2405 adapter->ch_config.ch_info[i].offset = 0;
2406 }
2407 }
2408 adapter->ch_config.total_qps = total_qps;
2409 netif_tx_stop_all_queues(netdev);
2410 netif_tx_disable(netdev);
2411 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2412 netdev_reset_tc(netdev);
2413
2414 netdev_set_num_tc(adapter->netdev, num_tc);
2415 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2416 u16 qcount = mqprio_qopt->qopt.count[i];
2417 u16 qoffset = mqprio_qopt->qopt.offset[i];
2418
2419 if (i < num_tc)
2420 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2421 qoffset);
2422 }
2423 }
2424exit:
2425 return ret;
2426}
2427
2428
2429
2430
2431
2432
2433
2434static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2435 struct tc_cls_flower_offload *f,
2436 struct iavf_cloud_filter *filter)
2437{
2438 u16 n_proto_mask = 0;
2439 u16 n_proto_key = 0;
2440 u8 field_flags = 0;
2441 u16 addr_type = 0;
2442 u16 n_proto = 0;
2443 int i = 0;
2444 struct virtchnl_filter *vf = &filter->f;
2445
2446 if (f->dissector->used_keys &
2447 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2448 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2449 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2450 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2451 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2452 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2453 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2454 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2455 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2456 f->dissector->used_keys);
2457 return -EOPNOTSUPP;
2458 }
2459
2460 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2461 struct flow_dissector_key_keyid *mask =
2462 skb_flow_dissector_target(f->dissector,
2463 FLOW_DISSECTOR_KEY_ENC_KEYID,
2464 f->mask);
2465
2466 if (mask->keyid != 0)
2467 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2468 }
2469
2470 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2471 struct flow_dissector_key_basic *key =
2472 skb_flow_dissector_target(f->dissector,
2473 FLOW_DISSECTOR_KEY_BASIC,
2474 f->key);
2475
2476 struct flow_dissector_key_basic *mask =
2477 skb_flow_dissector_target(f->dissector,
2478 FLOW_DISSECTOR_KEY_BASIC,
2479 f->mask);
2480 n_proto_key = ntohs(key->n_proto);
2481 n_proto_mask = ntohs(mask->n_proto);
2482
2483 if (n_proto_key == ETH_P_ALL) {
2484 n_proto_key = 0;
2485 n_proto_mask = 0;
2486 }
2487 n_proto = n_proto_key & n_proto_mask;
2488 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2489 return -EINVAL;
2490 if (n_proto == ETH_P_IPV6) {
2491
2492 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2493 }
2494
2495 if (key->ip_proto != IPPROTO_TCP) {
2496 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2497 return -EINVAL;
2498 }
2499 }
2500
2501 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2502 struct flow_dissector_key_eth_addrs *key =
2503 skb_flow_dissector_target(f->dissector,
2504 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2505 f->key);
2506
2507 struct flow_dissector_key_eth_addrs *mask =
2508 skb_flow_dissector_target(f->dissector,
2509 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2510 f->mask);
2511
2512 if (!is_zero_ether_addr(mask->dst)) {
2513 if (is_broadcast_ether_addr(mask->dst)) {
2514 field_flags |= IAVF_CLOUD_FIELD_OMAC;
2515 } else {
2516 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2517 mask->dst);
2518 return I40E_ERR_CONFIG;
2519 }
2520 }
2521
2522 if (!is_zero_ether_addr(mask->src)) {
2523 if (is_broadcast_ether_addr(mask->src)) {
2524 field_flags |= IAVF_CLOUD_FIELD_IMAC;
2525 } else {
2526 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2527 mask->src);
2528 return I40E_ERR_CONFIG;
2529 }
2530 }
2531
2532 if (!is_zero_ether_addr(key->dst))
2533 if (is_valid_ether_addr(key->dst) ||
2534 is_multicast_ether_addr(key->dst)) {
2535
2536 for (i = 0; i < ETH_ALEN; i++)
2537 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2538 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2539 key->dst);
2540 }
2541
2542 if (!is_zero_ether_addr(key->src))
2543 if (is_valid_ether_addr(key->src) ||
2544 is_multicast_ether_addr(key->src)) {
2545
2546 for (i = 0; i < ETH_ALEN; i++)
2547 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2548 ether_addr_copy(vf->data.tcp_spec.src_mac,
2549 key->src);
2550 }
2551 }
2552
2553 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2554 struct flow_dissector_key_vlan *key =
2555 skb_flow_dissector_target(f->dissector,
2556 FLOW_DISSECTOR_KEY_VLAN,
2557 f->key);
2558 struct flow_dissector_key_vlan *mask =
2559 skb_flow_dissector_target(f->dissector,
2560 FLOW_DISSECTOR_KEY_VLAN,
2561 f->mask);
2562
2563 if (mask->vlan_id) {
2564 if (mask->vlan_id == VLAN_VID_MASK) {
2565 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2566 } else {
2567 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2568 mask->vlan_id);
2569 return I40E_ERR_CONFIG;
2570 }
2571 }
2572 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2573 vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
2574 }
2575
2576 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
2577 struct flow_dissector_key_control *key =
2578 skb_flow_dissector_target(f->dissector,
2579 FLOW_DISSECTOR_KEY_CONTROL,
2580 f->key);
2581
2582 addr_type = key->addr_type;
2583 }
2584
2585 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2586 struct flow_dissector_key_ipv4_addrs *key =
2587 skb_flow_dissector_target(f->dissector,
2588 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2589 f->key);
2590 struct flow_dissector_key_ipv4_addrs *mask =
2591 skb_flow_dissector_target(f->dissector,
2592 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2593 f->mask);
2594
2595 if (mask->dst) {
2596 if (mask->dst == cpu_to_be32(0xffffffff)) {
2597 field_flags |= IAVF_CLOUD_FIELD_IIP;
2598 } else {
2599 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2600 be32_to_cpu(mask->dst));
2601 return I40E_ERR_CONFIG;
2602 }
2603 }
2604
2605 if (mask->src) {
2606 if (mask->src == cpu_to_be32(0xffffffff)) {
2607 field_flags |= IAVF_CLOUD_FIELD_IIP;
2608 } else {
2609 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2610 be32_to_cpu(mask->dst));
2611 return I40E_ERR_CONFIG;
2612 }
2613 }
2614
2615 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
2616 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2617 return I40E_ERR_CONFIG;
2618 }
2619 if (key->dst) {
2620 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2621 vf->data.tcp_spec.dst_ip[0] = key->dst;
2622 }
2623 if (key->src) {
2624 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2625 vf->data.tcp_spec.src_ip[0] = key->src;
2626 }
2627 }
2628
2629 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2630 struct flow_dissector_key_ipv6_addrs *key =
2631 skb_flow_dissector_target(f->dissector,
2632 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2633 f->key);
2634 struct flow_dissector_key_ipv6_addrs *mask =
2635 skb_flow_dissector_target(f->dissector,
2636 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2637 f->mask);
2638
2639
2640 if (ipv6_addr_any(&mask->dst)) {
2641 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2642 IPV6_ADDR_ANY);
2643 return I40E_ERR_CONFIG;
2644 }
2645
2646
2647
2648
2649 if (ipv6_addr_loopback(&key->dst) ||
2650 ipv6_addr_loopback(&key->src)) {
2651 dev_err(&adapter->pdev->dev,
2652 "ipv6 addr should not be loopback\n");
2653 return I40E_ERR_CONFIG;
2654 }
2655 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
2656 field_flags |= IAVF_CLOUD_FIELD_IIP;
2657
2658 for (i = 0; i < 4; i++)
2659 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2660 memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
2661 sizeof(vf->data.tcp_spec.dst_ip));
2662 for (i = 0; i < 4; i++)
2663 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2664 memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
2665 sizeof(vf->data.tcp_spec.src_ip));
2666 }
2667 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
2668 struct flow_dissector_key_ports *key =
2669 skb_flow_dissector_target(f->dissector,
2670 FLOW_DISSECTOR_KEY_PORTS,
2671 f->key);
2672 struct flow_dissector_key_ports *mask =
2673 skb_flow_dissector_target(f->dissector,
2674 FLOW_DISSECTOR_KEY_PORTS,
2675 f->mask);
2676
2677 if (mask->src) {
2678 if (mask->src == cpu_to_be16(0xffff)) {
2679 field_flags |= IAVF_CLOUD_FIELD_IIP;
2680 } else {
2681 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2682 be16_to_cpu(mask->src));
2683 return I40E_ERR_CONFIG;
2684 }
2685 }
2686
2687 if (mask->dst) {
2688 if (mask->dst == cpu_to_be16(0xffff)) {
2689 field_flags |= IAVF_CLOUD_FIELD_IIP;
2690 } else {
2691 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2692 be16_to_cpu(mask->dst));
2693 return I40E_ERR_CONFIG;
2694 }
2695 }
2696 if (key->dst) {
2697 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2698 vf->data.tcp_spec.dst_port = key->dst;
2699 }
2700
2701 if (key->src) {
2702 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2703 vf->data.tcp_spec.src_port = key->src;
2704 }
2705 }
2706 vf->field_flags = field_flags;
2707
2708 return 0;
2709}
2710
2711
2712
2713
2714
2715
2716
2717static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2718 struct iavf_cloud_filter *filter)
2719{
2720 if (tc == 0)
2721 return 0;
2722 if (tc < adapter->num_tc) {
2723 if (!filter->f.data.tcp_spec.dst_port) {
2724 dev_err(&adapter->pdev->dev,
2725 "Specify destination port to redirect to traffic class other than TC0\n");
2726 return -EINVAL;
2727 }
2728 }
2729
2730 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2731 filter->f.action_meta = tc;
2732 return 0;
2733}
2734
2735
2736
2737
2738
2739
2740static int iavf_configure_clsflower(struct iavf_adapter *adapter,
2741 struct tc_cls_flower_offload *cls_flower)
2742{
2743 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2744 struct iavf_cloud_filter *filter = NULL;
2745 int err = -EINVAL, count = 50;
2746
2747 if (tc < 0) {
2748 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2749 return -EINVAL;
2750 }
2751
2752 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2753 if (!filter)
2754 return -ENOMEM;
2755
2756 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2757 &adapter->crit_section)) {
2758 if (--count == 0)
2759 goto err;
2760 udelay(1);
2761 }
2762
2763 filter->cookie = cls_flower->cookie;
2764
2765
2766 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2767
2768 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2769 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
2770 if (err < 0)
2771 goto err;
2772
2773 err = iavf_handle_tclass(adapter, tc, filter);
2774 if (err < 0)
2775 goto err;
2776
2777
2778 spin_lock_bh(&adapter->cloud_filter_list_lock);
2779 list_add_tail(&filter->list, &adapter->cloud_filter_list);
2780 adapter->num_cloud_filters++;
2781 filter->add = true;
2782 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2783 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2784err:
2785 if (err)
2786 kfree(filter);
2787
2788 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2789 return err;
2790}
2791
2792
2793
2794
2795
2796
2797
2798
2799static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
2800 unsigned long *cookie)
2801{
2802 struct iavf_cloud_filter *filter = NULL;
2803
2804 if (!cookie)
2805 return NULL;
2806
2807 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
2808 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
2809 return filter;
2810 }
2811 return NULL;
2812}
2813
2814
2815
2816
2817
2818
2819static int iavf_delete_clsflower(struct iavf_adapter *adapter,
2820 struct tc_cls_flower_offload *cls_flower)
2821{
2822 struct iavf_cloud_filter *filter = NULL;
2823 int err = 0;
2824
2825 spin_lock_bh(&adapter->cloud_filter_list_lock);
2826 filter = iavf_find_cf(adapter, &cls_flower->cookie);
2827 if (filter) {
2828 filter->del = true;
2829 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
2830 } else {
2831 err = -EINVAL;
2832 }
2833 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2834
2835 return err;
2836}
2837
2838
2839
2840
2841
2842
2843static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
2844 struct tc_cls_flower_offload *cls_flower)
2845{
2846 if (cls_flower->common.chain_index)
2847 return -EOPNOTSUPP;
2848
2849 switch (cls_flower->command) {
2850 case TC_CLSFLOWER_REPLACE:
2851 return iavf_configure_clsflower(adapter, cls_flower);
2852 case TC_CLSFLOWER_DESTROY:
2853 return iavf_delete_clsflower(adapter, cls_flower);
2854 case TC_CLSFLOWER_STATS:
2855 return -EOPNOTSUPP;
2856 default:
2857 return -EOPNOTSUPP;
2858 }
2859}
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2870 void *cb_priv)
2871{
2872 switch (type) {
2873 case TC_SETUP_CLSFLOWER:
2874 return iavf_setup_tc_cls_flower(cb_priv, type_data);
2875 default:
2876 return -EOPNOTSUPP;
2877 }
2878}
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888static int iavf_setup_tc_block(struct net_device *dev,
2889 struct tc_block_offload *f)
2890{
2891 struct iavf_adapter *adapter = netdev_priv(dev);
2892
2893 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2894 return -EOPNOTSUPP;
2895
2896 switch (f->command) {
2897 case TC_BLOCK_BIND:
2898 return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
2899 adapter, adapter);
2900 case TC_BLOCK_UNBIND:
2901 tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
2902 adapter);
2903 return 0;
2904 default:
2905 return -EOPNOTSUPP;
2906 }
2907}
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2921 void *type_data)
2922{
2923 switch (type) {
2924 case TC_SETUP_QDISC_MQPRIO:
2925 return __iavf_setup_tc(netdev, type_data);
2926 case TC_SETUP_BLOCK:
2927 return iavf_setup_tc_block(netdev, type_data);
2928 default:
2929 return -EOPNOTSUPP;
2930 }
2931}
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945static int iavf_open(struct net_device *netdev)
2946{
2947 struct iavf_adapter *adapter = netdev_priv(netdev);
2948 int err;
2949
2950 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
2951 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2952 return -EIO;
2953 }
2954
2955 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2956 &adapter->crit_section))
2957 usleep_range(500, 1000);
2958
2959 if (adapter->state != __IAVF_DOWN) {
2960 err = -EBUSY;
2961 goto err_unlock;
2962 }
2963
2964
2965 err = iavf_setup_all_tx_resources(adapter);
2966 if (err)
2967 goto err_setup_tx;
2968
2969
2970 err = iavf_setup_all_rx_resources(adapter);
2971 if (err)
2972 goto err_setup_rx;
2973
2974
2975 err = iavf_request_traffic_irqs(adapter, netdev->name);
2976 if (err)
2977 goto err_req_irq;
2978
2979 spin_lock_bh(&adapter->mac_vlan_list_lock);
2980
2981 iavf_add_filter(adapter, adapter->hw.mac.addr);
2982
2983 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2984
2985 iavf_configure(adapter);
2986
2987 iavf_up_complete(adapter);
2988
2989 iavf_irq_enable(adapter, true);
2990
2991 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2992
2993 return 0;
2994
2995err_req_irq:
2996 iavf_down(adapter);
2997 iavf_free_traffic_irqs(adapter);
2998err_setup_rx:
2999 iavf_free_all_rx_resources(adapter);
3000err_setup_tx:
3001 iavf_free_all_tx_resources(adapter);
3002err_unlock:
3003 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3004
3005 return err;
3006}
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019static int iavf_close(struct net_device *netdev)
3020{
3021 struct iavf_adapter *adapter = netdev_priv(netdev);
3022 int status;
3023
3024 if (adapter->state <= __IAVF_DOWN_PENDING)
3025 return 0;
3026
3027 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3028 &adapter->crit_section))
3029 usleep_range(500, 1000);
3030
3031 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3032 if (CLIENT_ENABLED(adapter))
3033 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3034
3035 iavf_down(adapter);
3036 adapter->state = __IAVF_DOWN_PENDING;
3037 iavf_free_traffic_irqs(adapter);
3038
3039 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052 status = wait_event_timeout(adapter->down_waitqueue,
3053 adapter->state == __IAVF_DOWN,
3054 msecs_to_jiffies(200));
3055 if (!status)
3056 netdev_warn(netdev, "Device resources not yet released\n");
3057 return 0;
3058}
3059
3060
3061
3062
3063
3064
3065
3066
3067static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3068{
3069 struct iavf_adapter *adapter = netdev_priv(netdev);
3070
3071 netdev->mtu = new_mtu;
3072 if (CLIENT_ENABLED(adapter)) {
3073 iavf_notify_client_l2_params(&adapter->vsi);
3074 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3075 }
3076 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3077 schedule_work(&adapter->reset_task);
3078
3079 return 0;
3080}
3081
3082
3083
3084
3085
3086
3087
3088static int iavf_set_features(struct net_device *netdev,
3089 netdev_features_t features)
3090{
3091 struct iavf_adapter *adapter = netdev_priv(netdev);
3092
3093
3094
3095
3096 if (!VLAN_ALLOWED(adapter)) {
3097 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3098 return -EINVAL;
3099 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3100 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3101 adapter->aq_required |=
3102 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3103 else
3104 adapter->aq_required |=
3105 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3106 }
3107
3108 return 0;
3109}
3110
3111
3112
3113
3114
3115
3116
3117static netdev_features_t iavf_features_check(struct sk_buff *skb,
3118 struct net_device *dev,
3119 netdev_features_t features)
3120{
3121 size_t len;
3122
3123
3124
3125
3126
3127 if (skb->ip_summed != CHECKSUM_PARTIAL)
3128 return features;
3129
3130
3131
3132
3133 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3134 features &= ~NETIF_F_GSO_MASK;
3135
3136
3137 len = skb_network_header(skb) - skb->data;
3138 if (len & ~(63 * 2))
3139 goto out_err;
3140
3141
3142 len = skb_transport_header(skb) - skb_network_header(skb);
3143 if (len & ~(127 * 4))
3144 goto out_err;
3145
3146 if (skb->encapsulation) {
3147
3148 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3149 if (len & ~(127 * 2))
3150 goto out_err;
3151
3152
3153 len = skb_inner_transport_header(skb) -
3154 skb_inner_network_header(skb);
3155 if (len & ~(127 * 4))
3156 goto out_err;
3157 }
3158
3159
3160
3161
3162
3163
3164 return features;
3165out_err:
3166 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3167}
3168
3169
3170
3171
3172
3173
3174
3175
3176static netdev_features_t iavf_fix_features(struct net_device *netdev,
3177 netdev_features_t features)
3178{
3179 struct iavf_adapter *adapter = netdev_priv(netdev);
3180
3181 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3182 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3183 NETIF_F_HW_VLAN_CTAG_RX |
3184 NETIF_F_HW_VLAN_CTAG_FILTER);
3185
3186 return features;
3187}
3188
3189static const struct net_device_ops iavf_netdev_ops = {
3190 .ndo_size = sizeof(struct net_device_ops),
3191 .ndo_open = iavf_open,
3192 .ndo_stop = iavf_close,
3193 .ndo_start_xmit = iavf_xmit_frame,
3194 .ndo_set_rx_mode = iavf_set_rx_mode,
3195 .ndo_validate_addr = eth_validate_addr,
3196 .ndo_set_mac_address = iavf_set_mac,
3197 .extended.ndo_change_mtu = iavf_change_mtu,
3198 .ndo_tx_timeout = iavf_tx_timeout,
3199 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3200 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3201 .ndo_features_check = iavf_features_check,
3202 .ndo_fix_features = iavf_fix_features,
3203 .ndo_set_features = iavf_set_features,
3204 .extended.ndo_setup_tc_rh = iavf_setup_tc,
3205};
3206
3207
3208
3209
3210
3211
3212
3213static int iavf_check_reset_complete(struct iavf_hw *hw)
3214{
3215 u32 rstat;
3216 int i;
3217
3218 for (i = 0; i < 100; i++) {
3219 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3220 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3221 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3222 (rstat == VIRTCHNL_VFR_COMPLETED))
3223 return 0;
3224 usleep_range(10, 20);
3225 }
3226 return -EBUSY;
3227}
3228
3229
3230
3231
3232
3233
3234
3235
3236int iavf_process_config(struct iavf_adapter *adapter)
3237{
3238 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3239 int i, num_req_queues = adapter->num_req_queues;
3240 struct net_device *netdev = adapter->netdev;
3241 struct iavf_vsi *vsi = &adapter->vsi;
3242 netdev_features_t hw_enc_features;
3243 netdev_features_t hw_features;
3244
3245
3246 for (i = 0; i < vfres->num_vsis; i++) {
3247 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3248 adapter->vsi_res = &vfres->vsi_res[i];
3249 }
3250 if (!adapter->vsi_res) {
3251 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3252 return -ENODEV;
3253 }
3254
3255 if (num_req_queues &&
3256 num_req_queues != adapter->vsi_res->num_queue_pairs) {
3257
3258
3259
3260
3261 dev_err(&adapter->pdev->dev,
3262 "Requested %d queues, but PF only gave us %d.\n",
3263 num_req_queues,
3264 adapter->vsi_res->num_queue_pairs);
3265 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3266 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3267 iavf_schedule_reset(adapter);
3268 return -ENODEV;
3269 }
3270 adapter->num_req_queues = 0;
3271
3272 hw_enc_features = NETIF_F_SG |
3273 NETIF_F_IP_CSUM |
3274 NETIF_F_IPV6_CSUM |
3275 NETIF_F_HIGHDMA |
3276 NETIF_F_SOFT_FEATURES |
3277 NETIF_F_TSO |
3278 NETIF_F_TSO_ECN |
3279 NETIF_F_TSO6 |
3280 NETIF_F_SCTP_CRC |
3281 NETIF_F_RXHASH |
3282 NETIF_F_RXCSUM |
3283 0;
3284
3285
3286
3287
3288 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3289 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
3290 NETIF_F_GSO_GRE |
3291 NETIF_F_GSO_GRE_CSUM |
3292 NETIF_F_GSO_IPIP |
3293 NETIF_F_GSO_SIT |
3294 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3295 NETIF_F_GSO_PARTIAL |
3296 0;
3297
3298 if (!(vfres->vf_cap_flags &
3299 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3300 netdev->gso_partial_features |=
3301 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3302
3303 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3304 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3305 netdev->hw_enc_features |= hw_enc_features;
3306 }
3307
3308 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3309
3310
3311
3312
3313 hw_features = hw_enc_features;
3314
3315
3316 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3317 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3318 NETIF_F_HW_VLAN_CTAG_RX);
3319
3320 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3321 hw_features |= NETIF_F_HW_TC;
3322
3323 netdev->hw_features |= hw_features;
3324
3325 netdev->features |= hw_features;
3326
3327 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3328 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3329
3330 netdev->priv_flags |= IFF_UNICAST_FLT;
3331
3332
3333
3334
3335 if (netdev->wanted_features) {
3336 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3337 netdev->mtu < 576)
3338 netdev->features &= ~NETIF_F_TSO;
3339 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3340 netdev->mtu < 576)
3341 netdev->features &= ~NETIF_F_TSO6;
3342 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3343 netdev->features &= ~NETIF_F_TSO_ECN;
3344 if (!(netdev->wanted_features & NETIF_F_GRO))
3345 netdev->features &= ~NETIF_F_GRO;
3346 if (!(netdev->wanted_features & NETIF_F_GSO))
3347 netdev->features &= ~NETIF_F_GSO;
3348 }
3349
3350 adapter->vsi.id = adapter->vsi_res->vsi_id;
3351
3352 adapter->vsi.back = adapter;
3353 adapter->vsi.base_vector = 1;
3354 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3355 vsi->netdev = adapter->netdev;
3356 vsi->qs_handle = adapter->vsi_res->qset_handle;
3357 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3358 adapter->rss_key_size = vfres->rss_key_size;
3359 adapter->rss_lut_size = vfres->rss_lut_size;
3360 } else {
3361 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3362 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3363 }
3364
3365 return 0;
3366}
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380static void iavf_init_task(struct work_struct *work)
3381{
3382 struct iavf_adapter *adapter = container_of(work,
3383 struct iavf_adapter,
3384 init_task.work);
3385 struct net_device *netdev = adapter->netdev;
3386 struct iavf_hw *hw = &adapter->hw;
3387 struct pci_dev *pdev = adapter->pdev;
3388 int err, bufsz;
3389
3390 switch (adapter->state) {
3391 case __IAVF_STARTUP:
3392
3393 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
3394 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3395 err = iavf_set_mac_type(hw);
3396 if (err) {
3397 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
3398 err);
3399 goto err;
3400 }
3401 err = iavf_check_reset_complete(hw);
3402 if (err) {
3403 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
3404 err);
3405 goto err;
3406 }
3407 hw->aq.num_arq_entries = IAVF_AQ_LEN;
3408 hw->aq.num_asq_entries = IAVF_AQ_LEN;
3409 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3410 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3411
3412 err = iavf_init_adminq(hw);
3413 if (err) {
3414 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
3415 err);
3416 goto err;
3417 }
3418 err = iavf_send_api_ver(adapter);
3419 if (err) {
3420 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
3421 iavf_shutdown_adminq(hw);
3422 goto err;
3423 }
3424 adapter->state = __IAVF_INIT_VERSION_CHECK;
3425 goto restart;
3426 case __IAVF_INIT_VERSION_CHECK:
3427 if (!iavf_asq_done(hw)) {
3428 dev_err(&pdev->dev, "Admin queue command never completed\n");
3429 iavf_shutdown_adminq(hw);
3430 adapter->state = __IAVF_STARTUP;
3431 goto err;
3432 }
3433
3434
3435 err = iavf_verify_api_ver(adapter);
3436 if (err) {
3437 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
3438 err = iavf_send_api_ver(adapter);
3439 else
3440 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
3441 adapter->pf_version.major,
3442 adapter->pf_version.minor,
3443 VIRTCHNL_VERSION_MAJOR,
3444 VIRTCHNL_VERSION_MINOR);
3445 goto err;
3446 }
3447 err = iavf_send_vf_config_msg(adapter);
3448 if (err) {
3449 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
3450 err);
3451 goto err;
3452 }
3453 adapter->state = __IAVF_INIT_GET_RESOURCES;
3454 goto restart;
3455 case __IAVF_INIT_GET_RESOURCES:
3456
3457 if (!adapter->vf_res) {
3458 bufsz = sizeof(struct virtchnl_vf_resource) +
3459 (IAVF_MAX_VF_VSI *
3460 sizeof(struct virtchnl_vsi_resource));
3461 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
3462 if (!adapter->vf_res)
3463 goto err;
3464 }
3465 err = iavf_get_vf_config(adapter);
3466 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
3467 err = iavf_send_vf_config_msg(adapter);
3468 goto err;
3469 } else if (err == I40E_ERR_PARAM) {
3470
3471
3472
3473
3474 iavf_shutdown_adminq(hw);
3475 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
3476 return;
3477 }
3478 if (err) {
3479 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
3480 err);
3481 goto err_alloc;
3482 }
3483 adapter->state = __IAVF_INIT_SW;
3484 break;
3485 default:
3486 goto err_alloc;
3487 }
3488
3489 if (iavf_process_config(adapter))
3490 goto err_alloc;
3491 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3492
3493 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
3494
3495 netdev->netdev_ops = &iavf_netdev_ops;
3496 iavf_set_ethtool_ops(netdev);
3497 netdev->watchdog_timeo = 5 * HZ;
3498
3499
3500 netdev->extended->min_mtu = ETH_MIN_MTU;
3501 netdev->extended->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
3502
3503 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
3504 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
3505 adapter->hw.mac.addr);
3506 eth_hw_addr_random(netdev);
3507 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
3508 } else {
3509 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
3510 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
3511 }
3512
3513 timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0);
3514 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3515
3516 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
3517 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
3518 err = iavf_init_interrupt_scheme(adapter);
3519 if (err)
3520 goto err_sw_init;
3521 iavf_map_rings_to_vectors(adapter);
3522 if (adapter->vf_res->vf_cap_flags &
3523 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
3524 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
3525
3526 err = iavf_request_misc_irq(adapter);
3527 if (err)
3528 goto err_sw_init;
3529
3530 netif_carrier_off(netdev);
3531 adapter->link_up = false;
3532
3533 if (!adapter->netdev_registered) {
3534 err = register_netdev(netdev);
3535 if (err)
3536 goto err_register;
3537 }
3538
3539 adapter->netdev_registered = true;
3540
3541 netif_tx_stop_all_queues(netdev);
3542 if (CLIENT_ALLOWED(adapter)) {
3543 err = iavf_lan_add_device(adapter);
3544 if (err)
3545 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
3546 err);
3547 }
3548
3549 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
3550 if (netdev->features & NETIF_F_GRO)
3551 dev_info(&pdev->dev, "GRO is enabled\n");
3552
3553 adapter->state = __IAVF_DOWN;
3554 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3555 iavf_misc_irq_enable(adapter);
3556 wake_up(&adapter->down_waitqueue);
3557
3558 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
3559 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
3560 if (!adapter->rss_key || !adapter->rss_lut)
3561 goto err_mem;
3562
3563 if (RSS_AQ(adapter)) {
3564 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3565 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
3566 } else {
3567 iavf_init_rss(adapter);
3568 }
3569 return;
3570restart:
3571 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
3572 return;
3573err_mem:
3574 iavf_free_rss(adapter);
3575err_register:
3576 iavf_free_misc_irq(adapter);
3577err_sw_init:
3578 iavf_reset_interrupt_capability(adapter);
3579err_alloc:
3580 kfree(adapter->vf_res);
3581 adapter->vf_res = NULL;
3582err:
3583
3584 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3585 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
3586 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3587 iavf_shutdown_adminq(hw);
3588 adapter->state = __IAVF_STARTUP;
3589 schedule_delayed_work(&adapter->init_task, HZ * 5);
3590 return;
3591 }
3592 schedule_delayed_work(&adapter->init_task, HZ);
3593}
3594
3595
3596
3597
3598
3599static void iavf_shutdown(struct pci_dev *pdev)
3600{
3601 struct net_device *netdev = pci_get_drvdata(pdev);
3602 struct iavf_adapter *adapter = netdev_priv(netdev);
3603
3604 netif_device_detach(netdev);
3605
3606 if (netif_running(netdev))
3607 iavf_close(netdev);
3608
3609
3610 adapter->state = __IAVF_REMOVE;
3611 adapter->aq_required = 0;
3612
3613#ifdef CONFIG_PM
3614 pci_save_state(pdev);
3615
3616#endif
3617 pci_disable_device(pdev);
3618}
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3632{
3633 struct net_device *netdev;
3634 struct iavf_adapter *adapter = NULL;
3635 struct iavf_hw *hw = NULL;
3636 int err;
3637
3638 err = pci_enable_device(pdev);
3639 if (err)
3640 return err;
3641
3642 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3643 if (err) {
3644 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3645 if (err) {
3646 dev_err(&pdev->dev,
3647 "DMA configuration failed: 0x%x\n", err);
3648 goto err_dma;
3649 }
3650 }
3651
3652 err = pci_request_regions(pdev, iavf_driver_name);
3653 if (err) {
3654 dev_err(&pdev->dev,
3655 "pci_request_regions failed 0x%x\n", err);
3656 goto err_pci_reg;
3657 }
3658
3659 pci_enable_pcie_error_reporting(pdev);
3660
3661 pci_set_master(pdev);
3662
3663 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3664 IAVF_MAX_REQ_QUEUES);
3665 if (!netdev) {
3666 err = -ENOMEM;
3667 goto err_alloc_etherdev;
3668 }
3669
3670 SET_NETDEV_DEV(netdev, &pdev->dev);
3671
3672 pci_set_drvdata(pdev, netdev);
3673 adapter = netdev_priv(netdev);
3674
3675 adapter->netdev = netdev;
3676 adapter->pdev = pdev;
3677
3678 hw = &adapter->hw;
3679 hw->back = adapter;
3680
3681 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3682 adapter->state = __IAVF_STARTUP;
3683
3684
3685 pci_save_state(pdev);
3686
3687 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3688 pci_resource_len(pdev, 0));
3689 if (!hw->hw_addr) {
3690 err = -EIO;
3691 goto err_ioremap;
3692 }
3693 hw->vendor_id = pdev->vendor;
3694 hw->device_id = pdev->device;
3695 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3696 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3697 hw->subsystem_device_id = pdev->subsystem_device;
3698 hw->bus.device = PCI_SLOT(pdev->devfn);
3699 hw->bus.func = PCI_FUNC(pdev->devfn);
3700 hw->bus.bus_id = pdev->bus->number;
3701
3702
3703
3704
3705 mutex_init(&hw->aq.asq_mutex);
3706 mutex_init(&hw->aq.arq_mutex);
3707
3708 spin_lock_init(&adapter->mac_vlan_list_lock);
3709 spin_lock_init(&adapter->cloud_filter_list_lock);
3710
3711 INIT_LIST_HEAD(&adapter->mac_filter_list);
3712 INIT_LIST_HEAD(&adapter->vlan_filter_list);
3713 INIT_LIST_HEAD(&adapter->cloud_filter_list);
3714
3715 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3716 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3717 INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3718 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3719 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
3720 schedule_delayed_work(&adapter->init_task,
3721 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3722
3723
3724 init_waitqueue_head(&adapter->down_waitqueue);
3725
3726 return 0;
3727
3728err_ioremap:
3729 free_netdev(netdev);
3730err_alloc_etherdev:
3731 pci_release_regions(pdev);
3732err_pci_reg:
3733err_dma:
3734 pci_disable_device(pdev);
3735 return err;
3736}
3737
3738#ifdef CONFIG_PM
3739
3740
3741
3742
3743
3744
3745
3746static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
3747{
3748 struct net_device *netdev = pci_get_drvdata(pdev);
3749 struct iavf_adapter *adapter = netdev_priv(netdev);
3750 int retval = 0;
3751
3752 netif_device_detach(netdev);
3753
3754 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3755 &adapter->crit_section))
3756 usleep_range(500, 1000);
3757
3758 if (netif_running(netdev)) {
3759 rtnl_lock();
3760 iavf_down(adapter);
3761 rtnl_unlock();
3762 }
3763 iavf_free_misc_irq(adapter);
3764 iavf_reset_interrupt_capability(adapter);
3765
3766 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3767
3768 retval = pci_save_state(pdev);
3769 if (retval)
3770 return retval;
3771
3772 pci_disable_device(pdev);
3773
3774 return 0;
3775}
3776
3777
3778
3779
3780
3781
3782
3783static int iavf_resume(struct pci_dev *pdev)
3784{
3785 struct iavf_adapter *adapter = pci_get_drvdata(pdev);
3786 struct net_device *netdev = adapter->netdev;
3787 u32 err;
3788
3789 pci_set_power_state(pdev, PCI_D0);
3790 pci_restore_state(pdev);
3791
3792
3793
3794 pci_save_state(pdev);
3795
3796 err = pci_enable_device_mem(pdev);
3797 if (err) {
3798 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3799 return err;
3800 }
3801 pci_set_master(pdev);
3802
3803 rtnl_lock();
3804 err = iavf_set_interrupt_capability(adapter);
3805 if (err) {
3806 rtnl_unlock();
3807 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3808 return err;
3809 }
3810 err = iavf_request_misc_irq(adapter);
3811 rtnl_unlock();
3812 if (err) {
3813 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3814 return err;
3815 }
3816
3817 schedule_work(&adapter->reset_task);
3818
3819 netif_device_attach(netdev);
3820
3821 return err;
3822}
3823
3824#endif
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834static void iavf_remove(struct pci_dev *pdev)
3835{
3836 struct net_device *netdev = pci_get_drvdata(pdev);
3837 struct iavf_adapter *adapter = netdev_priv(netdev);
3838 struct iavf_vlan_filter *vlf, *vlftmp;
3839 struct iavf_mac_filter *f, *ftmp;
3840 struct iavf_cloud_filter *cf, *cftmp;
3841 struct iavf_hw *hw = &adapter->hw;
3842 int err;
3843
3844 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
3845 cancel_delayed_work_sync(&adapter->init_task);
3846 cancel_work_sync(&adapter->reset_task);
3847 cancel_delayed_work_sync(&adapter->client_task);
3848 if (adapter->netdev_registered) {
3849 unregister_netdev(netdev);
3850 adapter->netdev_registered = false;
3851 }
3852 if (CLIENT_ALLOWED(adapter)) {
3853 err = iavf_lan_del_device(adapter);
3854 if (err)
3855 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3856 err);
3857 }
3858
3859
3860 adapter->state = __IAVF_REMOVE;
3861 adapter->aq_required = 0;
3862 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3863 iavf_request_reset(adapter);
3864 msleep(50);
3865
3866 if (!iavf_asq_done(hw)) {
3867 iavf_request_reset(adapter);
3868 msleep(50);
3869 }
3870 iavf_free_all_tx_resources(adapter);
3871 iavf_free_all_rx_resources(adapter);
3872 iavf_misc_irq_disable(adapter);
3873 iavf_free_misc_irq(adapter);
3874 iavf_reset_interrupt_capability(adapter);
3875 iavf_free_q_vectors(adapter);
3876
3877 if (adapter->watchdog_timer.function)
3878 del_timer_sync(&adapter->watchdog_timer);
3879
3880 cancel_work_sync(&adapter->adminq_task);
3881
3882 iavf_free_rss(adapter);
3883
3884 if (hw->aq.asq.count)
3885 iavf_shutdown_adminq(hw);
3886
3887
3888 mutex_destroy(&hw->aq.arq_mutex);
3889 mutex_destroy(&hw->aq.asq_mutex);
3890
3891 iounmap(hw->hw_addr);
3892 pci_release_regions(pdev);
3893 iavf_free_all_tx_resources(adapter);
3894 iavf_free_all_rx_resources(adapter);
3895 iavf_free_queues(adapter);
3896 kfree(adapter->vf_res);
3897 spin_lock_bh(&adapter->mac_vlan_list_lock);
3898
3899
3900
3901 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3902 list_del(&f->list);
3903 kfree(f);
3904 }
3905 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3906 list) {
3907 list_del(&vlf->list);
3908 kfree(vlf);
3909 }
3910
3911 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3912
3913 spin_lock_bh(&adapter->cloud_filter_list_lock);
3914 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3915 list_del(&cf->list);
3916 kfree(cf);
3917 }
3918 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3919
3920 free_netdev(netdev);
3921
3922 pci_disable_pcie_error_reporting(pdev);
3923
3924 pci_disable_device(pdev);
3925}
3926
3927static struct pci_driver iavf_driver = {
3928 .name = iavf_driver_name,
3929 .id_table = iavf_pci_tbl,
3930 .probe = iavf_probe,
3931 .remove = iavf_remove,
3932#ifdef CONFIG_PM
3933 .suspend = iavf_suspend,
3934 .resume = iavf_resume,
3935#endif
3936 .shutdown = iavf_shutdown,
3937};
3938
3939
3940
3941
3942
3943
3944
3945static int __init iavf_init_module(void)
3946{
3947 int ret;
3948
3949 pr_info("iavf: %s - version %s\n", iavf_driver_string,
3950 iavf_driver_version);
3951
3952 pr_info("%s\n", iavf_copyright);
3953
3954 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3955 iavf_driver_name);
3956 if (!iavf_wq) {
3957 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
3958 return -ENOMEM;
3959 }
3960 ret = pci_register_driver(&iavf_driver);
3961 return ret;
3962}
3963
3964module_init(iavf_init_module);
3965
3966
3967
3968
3969
3970
3971
3972static void __exit iavf_exit_module(void)
3973{
3974 pci_unregister_driver(&iavf_driver);
3975 destroy_workqueue(iavf_wq);
3976}
3977
3978module_exit(iavf_exit_module);
3979
3980
3981