1
2
3
4#include "iavf.h"
5#include "iavf_prototype.h"
6#include "iavf_client.h"
7
8
9
10
11#define CREATE_TRACE_POINTS
12#include "iavf_trace.h"
13
14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16static int iavf_close(struct net_device *netdev);
17
18char iavf_driver_name[] = "iavf";
19static const char iavf_driver_string[] =
20 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
21
22#define DRV_KERN "-k"
23
24#define DRV_VERSION_MAJOR 3
25#define DRV_VERSION_MINOR 2
26#define DRV_VERSION_BUILD 3
27#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
28 __stringify(DRV_VERSION_MINOR) "." \
29 __stringify(DRV_VERSION_BUILD) \
30 DRV_KERN
31const char iavf_driver_version[] = DRV_VERSION;
32static const char iavf_copyright[] =
33 "Copyright (c) 2013 - 2018 Intel Corporation.";
34
35
36
37
38
39
40
41
42
43static const struct pci_device_id iavf_pci_tbl[] = {
44 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
45 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
46 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
47 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
48
49 {0, }
50};
51
52MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
53
54MODULE_ALIAS("i40evf");
55MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
56MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
57MODULE_LICENSE("GPL v2");
58MODULE_VERSION(DRV_VERSION);
59
60static struct workqueue_struct *iavf_wq;
61
62
63
64
65
66
67
68
69iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
70 struct iavf_dma_mem *mem,
71 u64 size, u32 alignment)
72{
73 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
74
75 if (!mem)
76 return I40E_ERR_PARAM;
77
78 mem->size = ALIGN(size, alignment);
79 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
80 (dma_addr_t *)&mem->pa, GFP_KERNEL);
81 if (mem->va)
82 return 0;
83 else
84 return I40E_ERR_NO_MEMORY;
85}
86
87
88
89
90
91
92iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
93{
94 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
95
96 if (!mem || !mem->va)
97 return I40E_ERR_PARAM;
98 dma_free_coherent(&adapter->pdev->dev, mem->size,
99 mem->va, (dma_addr_t)mem->pa);
100 return 0;
101}
102
103
104
105
106
107
108
109iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
110 struct iavf_virt_mem *mem, u32 size)
111{
112 if (!mem)
113 return I40E_ERR_PARAM;
114
115 mem->size = size;
116 mem->va = kzalloc(size, GFP_KERNEL);
117
118 if (mem->va)
119 return 0;
120 else
121 return I40E_ERR_NO_MEMORY;
122}
123
124
125
126
127
128
129iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
130{
131 if (!mem)
132 return I40E_ERR_PARAM;
133
134
135 kfree(mem->va);
136
137 return 0;
138}
139
140
141
142
143
144
145
146void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
147{
148 char buf[512];
149 va_list argptr;
150
151 if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
152 return;
153
154 va_start(argptr, fmt_str);
155 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
156 va_end(argptr);
157
158
159 pr_info("%s", buf);
160}
161
162
163
164
165
166void iavf_schedule_reset(struct iavf_adapter *adapter)
167{
168 if (!(adapter->flags &
169 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
170 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
171 schedule_work(&adapter->reset_task);
172 }
173}
174
175
176
177
178
179static void iavf_tx_timeout(struct net_device *netdev)
180{
181 struct iavf_adapter *adapter = netdev_priv(netdev);
182
183 adapter->tx_timeout_count++;
184 iavf_schedule_reset(adapter);
185}
186
187
188
189
190
191static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
192{
193 struct iavf_hw *hw = &adapter->hw;
194
195 if (!adapter->msix_entries)
196 return;
197
198 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
199
200 iavf_flush(hw);
201
202 synchronize_irq(adapter->msix_entries[0].vector);
203}
204
205
206
207
208
209static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
210{
211 struct iavf_hw *hw = &adapter->hw;
212
213 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
214 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
215 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
216
217 iavf_flush(hw);
218}
219
220
221
222
223
224static void iavf_irq_disable(struct iavf_adapter *adapter)
225{
226 int i;
227 struct iavf_hw *hw = &adapter->hw;
228
229 if (!adapter->msix_entries)
230 return;
231
232 for (i = 1; i < adapter->num_msix_vectors; i++) {
233 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
234 synchronize_irq(adapter->msix_entries[i].vector);
235 }
236 iavf_flush(hw);
237}
238
239
240
241
242
243
244void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
245{
246 struct iavf_hw *hw = &adapter->hw;
247 int i;
248
249 for (i = 1; i < adapter->num_msix_vectors; i++) {
250 if (mask & BIT(i - 1)) {
251 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
252 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
253 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
254 }
255 }
256}
257
258
259
260
261
262
263void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
264{
265 struct iavf_hw *hw = &adapter->hw;
266
267 iavf_misc_irq_enable(adapter);
268 iavf_irq_enable_queues(adapter, ~0);
269
270 if (flush)
271 iavf_flush(hw);
272}
273
274
275
276
277
278
279static irqreturn_t iavf_msix_aq(int irq, void *data)
280{
281 struct net_device *netdev = data;
282 struct iavf_adapter *adapter = netdev_priv(netdev);
283 struct iavf_hw *hw = &adapter->hw;
284
285
286 rd32(hw, IAVF_VFINT_ICR01);
287 rd32(hw, IAVF_VFINT_ICR0_ENA1);
288
289
290 schedule_work(&adapter->adminq_task);
291
292 return IRQ_HANDLED;
293}
294
295
296
297
298
299
300static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
301{
302 struct iavf_q_vector *q_vector = data;
303
304 if (!q_vector->tx.ring && !q_vector->rx.ring)
305 return IRQ_HANDLED;
306
307 napi_schedule_irqoff(&q_vector->napi);
308
309 return IRQ_HANDLED;
310}
311
312
313
314
315
316
317
318static void
319iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
320{
321 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
322 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
323 struct iavf_hw *hw = &adapter->hw;
324
325 rx_ring->q_vector = q_vector;
326 rx_ring->next = q_vector->rx.ring;
327 rx_ring->vsi = &adapter->vsi;
328 q_vector->rx.ring = rx_ring;
329 q_vector->rx.count++;
330 q_vector->rx.next_update = jiffies + 1;
331 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
332 q_vector->ring_mask |= BIT(r_idx);
333 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
334 q_vector->rx.current_itr);
335 q_vector->rx.current_itr = q_vector->rx.target_itr;
336}
337
338
339
340
341
342
343
344static void
345iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
346{
347 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
348 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
349 struct iavf_hw *hw = &adapter->hw;
350
351 tx_ring->q_vector = q_vector;
352 tx_ring->next = q_vector->tx.ring;
353 tx_ring->vsi = &adapter->vsi;
354 q_vector->tx.ring = tx_ring;
355 q_vector->tx.count++;
356 q_vector->tx.next_update = jiffies + 1;
357 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
358 q_vector->num_ringpairs++;
359 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
360 q_vector->tx.target_itr);
361 q_vector->tx.current_itr = q_vector->tx.target_itr;
362}
363
364
365
366
367
368
369
370
371
372
373
374static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
375{
376 int rings_remaining = adapter->num_active_queues;
377 int ridx = 0, vidx = 0;
378 int q_vectors;
379
380 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
381
382 for (; ridx < rings_remaining; ridx++) {
383 iavf_map_vector_to_rxq(adapter, vidx, ridx);
384 iavf_map_vector_to_txq(adapter, vidx, ridx);
385
386
387
388
389 if (++vidx >= q_vectors)
390 vidx = 0;
391 }
392
393 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
394}
395
396
397
398
399
400
401
402
403
404static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
405 const cpumask_t *mask)
406{
407 struct iavf_q_vector *q_vector =
408 container_of(notify, struct iavf_q_vector, affinity_notify);
409
410 cpumask_copy(&q_vector->affinity_mask, mask);
411}
412
413
414
415
416
417
418
419
420
421static void iavf_irq_affinity_release(struct kref *ref) {}
422
423
424
425
426
427
428
429
430
431static int
432iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
433{
434 unsigned int vector, q_vectors;
435 unsigned int rx_int_idx = 0, tx_int_idx = 0;
436 int irq_num, err;
437 int cpu;
438
439 iavf_irq_disable(adapter);
440
441 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
442
443 for (vector = 0; vector < q_vectors; vector++) {
444 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
445
446 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
447
448 if (q_vector->tx.ring && q_vector->rx.ring) {
449 snprintf(q_vector->name, sizeof(q_vector->name),
450 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
451 tx_int_idx++;
452 } else if (q_vector->rx.ring) {
453 snprintf(q_vector->name, sizeof(q_vector->name),
454 "iavf-%s-rx-%d", basename, rx_int_idx++);
455 } else if (q_vector->tx.ring) {
456 snprintf(q_vector->name, sizeof(q_vector->name),
457 "iavf-%s-tx-%d", basename, tx_int_idx++);
458 } else {
459
460 continue;
461 }
462 err = request_irq(irq_num,
463 iavf_msix_clean_rings,
464 0,
465 q_vector->name,
466 q_vector);
467 if (err) {
468 dev_info(&adapter->pdev->dev,
469 "Request_irq failed, error: %d\n", err);
470 goto free_queue_irqs;
471 }
472
473 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
474 q_vector->affinity_notify.release =
475 iavf_irq_affinity_release;
476 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
477
478
479
480
481 cpu = cpumask_local_spread(q_vector->v_idx, -1);
482 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
483 }
484
485 return 0;
486
487free_queue_irqs:
488 while (vector) {
489 vector--;
490 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
491 irq_set_affinity_notifier(irq_num, NULL);
492 irq_set_affinity_hint(irq_num, NULL);
493 free_irq(irq_num, &adapter->q_vectors[vector]);
494 }
495 return err;
496}
497
498
499
500
501
502
503
504
505
506static int iavf_request_misc_irq(struct iavf_adapter *adapter)
507{
508 struct net_device *netdev = adapter->netdev;
509 int err;
510
511 snprintf(adapter->misc_vector_name,
512 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
513 dev_name(&adapter->pdev->dev));
514 err = request_irq(adapter->msix_entries[0].vector,
515 &iavf_msix_aq, 0,
516 adapter->misc_vector_name, netdev);
517 if (err) {
518 dev_err(&adapter->pdev->dev,
519 "request_irq for %s failed: %d\n",
520 adapter->misc_vector_name, err);
521 free_irq(adapter->msix_entries[0].vector, netdev);
522 }
523 return err;
524}
525
526
527
528
529
530
531
532static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
533{
534 int vector, irq_num, q_vectors;
535
536 if (!adapter->msix_entries)
537 return;
538
539 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
540
541 for (vector = 0; vector < q_vectors; vector++) {
542 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
543 irq_set_affinity_notifier(irq_num, NULL);
544 irq_set_affinity_hint(irq_num, NULL);
545 free_irq(irq_num, &adapter->q_vectors[vector]);
546 }
547}
548
549
550
551
552
553
554
555static void iavf_free_misc_irq(struct iavf_adapter *adapter)
556{
557 struct net_device *netdev = adapter->netdev;
558
559 if (!adapter->msix_entries)
560 return;
561
562 free_irq(adapter->msix_entries[0].vector, netdev);
563}
564
565
566
567
568
569
570
571static void iavf_configure_tx(struct iavf_adapter *adapter)
572{
573 struct iavf_hw *hw = &adapter->hw;
574 int i;
575
576 for (i = 0; i < adapter->num_active_queues; i++)
577 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
578}
579
580
581
582
583
584
585
586static void iavf_configure_rx(struct iavf_adapter *adapter)
587{
588 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
589 struct iavf_hw *hw = &adapter->hw;
590 int i;
591
592
593#if (PAGE_SIZE < 8192)
594 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
595 struct net_device *netdev = adapter->netdev;
596
597
598
599
600
601 rx_buf_len = IAVF_RXBUFFER_3072;
602
603
604
605
606
607 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
608 (netdev->mtu <= ETH_DATA_LEN))
609 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
610 }
611#endif
612
613 for (i = 0; i < adapter->num_active_queues; i++) {
614 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
615 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
616
617 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
618 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
619 else
620 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
621 }
622}
623
624
625
626
627
628
629
630
631
632static struct
633iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
634{
635 struct iavf_vlan_filter *f;
636
637 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
638 if (vlan == f->vlan)
639 return f;
640 }
641 return NULL;
642}
643
644
645
646
647
648
649
650
651static struct
652iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
653{
654 struct iavf_vlan_filter *f = NULL;
655
656 spin_lock_bh(&adapter->mac_vlan_list_lock);
657
658 f = iavf_find_vlan(adapter, vlan);
659 if (!f) {
660 f = kzalloc(sizeof(*f), GFP_KERNEL);
661 if (!f)
662 goto clearout;
663
664 f->vlan = vlan;
665
666 INIT_LIST_HEAD(&f->list);
667 list_add(&f->list, &adapter->vlan_filter_list);
668 f->add = true;
669 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
670 }
671
672clearout:
673 spin_unlock_bh(&adapter->mac_vlan_list_lock);
674 return f;
675}
676
677
678
679
680
681
682static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
683{
684 struct iavf_vlan_filter *f;
685
686 spin_lock_bh(&adapter->mac_vlan_list_lock);
687
688 f = iavf_find_vlan(adapter, vlan);
689 if (f) {
690 f->remove = true;
691 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
692 }
693
694 spin_unlock_bh(&adapter->mac_vlan_list_lock);
695}
696
697
698
699
700
701
702
703static int iavf_vlan_rx_add_vid(struct net_device *netdev,
704 __always_unused __be16 proto, u16 vid)
705{
706 struct iavf_adapter *adapter = netdev_priv(netdev);
707
708 if (!VLAN_ALLOWED(adapter))
709 return -EIO;
710 if (iavf_add_vlan(adapter, vid) == NULL)
711 return -ENOMEM;
712 return 0;
713}
714
715
716
717
718
719
720
721static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
722 __always_unused __be16 proto, u16 vid)
723{
724 struct iavf_adapter *adapter = netdev_priv(netdev);
725
726 if (VLAN_ALLOWED(adapter)) {
727 iavf_del_vlan(adapter, vid);
728 return 0;
729 }
730 return -EIO;
731}
732
733
734
735
736
737
738
739
740
741static struct
742iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
743 const u8 *macaddr)
744{
745 struct iavf_mac_filter *f;
746
747 if (!macaddr)
748 return NULL;
749
750 list_for_each_entry(f, &adapter->mac_filter_list, list) {
751 if (ether_addr_equal(macaddr, f->macaddr))
752 return f;
753 }
754 return NULL;
755}
756
757
758
759
760
761
762
763
764static struct
765iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
766 const u8 *macaddr)
767{
768 struct iavf_mac_filter *f;
769
770 if (!macaddr)
771 return NULL;
772
773 f = iavf_find_filter(adapter, macaddr);
774 if (!f) {
775 f = kzalloc(sizeof(*f), GFP_ATOMIC);
776 if (!f)
777 return f;
778
779 ether_addr_copy(f->macaddr, macaddr);
780
781 list_add_tail(&f->list, &adapter->mac_filter_list);
782 f->add = true;
783 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
784 } else {
785 f->remove = false;
786 }
787
788 return f;
789}
790
791
792
793
794
795
796
797
798static int iavf_set_mac(struct net_device *netdev, void *p)
799{
800 struct iavf_adapter *adapter = netdev_priv(netdev);
801 struct iavf_hw *hw = &adapter->hw;
802 struct iavf_mac_filter *f;
803 struct sockaddr *addr = p;
804
805 if (!is_valid_ether_addr(addr->sa_data))
806 return -EADDRNOTAVAIL;
807
808 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
809 return 0;
810
811 if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF)
812 return -EPERM;
813
814 spin_lock_bh(&adapter->mac_vlan_list_lock);
815
816 f = iavf_find_filter(adapter, hw->mac.addr);
817 if (f) {
818 f->remove = true;
819 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
820 }
821
822 f = iavf_add_filter(adapter, addr->sa_data);
823
824 spin_unlock_bh(&adapter->mac_vlan_list_lock);
825
826 if (f) {
827 ether_addr_copy(hw->mac.addr, addr->sa_data);
828 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
829 }
830
831 return (f == NULL) ? -ENOMEM : 0;
832}
833
834
835
836
837
838
839
840
841
842static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
843{
844 struct iavf_adapter *adapter = netdev_priv(netdev);
845
846 if (iavf_add_filter(adapter, addr))
847 return 0;
848 else
849 return -ENOMEM;
850}
851
852
853
854
855
856
857
858
859
860static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
861{
862 struct iavf_adapter *adapter = netdev_priv(netdev);
863 struct iavf_mac_filter *f;
864
865
866
867
868
869
870 if (ether_addr_equal(addr, netdev->dev_addr))
871 return 0;
872
873 f = iavf_find_filter(adapter, addr);
874 if (f) {
875 f->remove = true;
876 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
877 }
878 return 0;
879}
880
881
882
883
884
885static void iavf_set_rx_mode(struct net_device *netdev)
886{
887 struct iavf_adapter *adapter = netdev_priv(netdev);
888
889 spin_lock_bh(&adapter->mac_vlan_list_lock);
890 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
891 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
892 spin_unlock_bh(&adapter->mac_vlan_list_lock);
893
894 if (netdev->flags & IFF_PROMISC &&
895 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
896 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
897 else if (!(netdev->flags & IFF_PROMISC) &&
898 adapter->flags & IAVF_FLAG_PROMISC_ON)
899 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
900
901 if (netdev->flags & IFF_ALLMULTI &&
902 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
903 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
904 else if (!(netdev->flags & IFF_ALLMULTI) &&
905 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
906 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
907}
908
909
910
911
912
913static void iavf_napi_enable_all(struct iavf_adapter *adapter)
914{
915 int q_idx;
916 struct iavf_q_vector *q_vector;
917 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
918
919 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
920 struct napi_struct *napi;
921
922 q_vector = &adapter->q_vectors[q_idx];
923 napi = &q_vector->napi;
924 napi_enable(napi);
925 }
926}
927
928
929
930
931
932static void iavf_napi_disable_all(struct iavf_adapter *adapter)
933{
934 int q_idx;
935 struct iavf_q_vector *q_vector;
936 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
937
938 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
939 q_vector = &adapter->q_vectors[q_idx];
940 napi_disable(&q_vector->napi);
941 }
942}
943
944
945
946
947
948static void iavf_configure(struct iavf_adapter *adapter)
949{
950 struct net_device *netdev = adapter->netdev;
951 int i;
952
953 iavf_set_rx_mode(netdev);
954
955 iavf_configure_tx(adapter);
956 iavf_configure_rx(adapter);
957 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
958
959 for (i = 0; i < adapter->num_active_queues; i++) {
960 struct iavf_ring *ring = &adapter->rx_rings[i];
961
962 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
963 }
964}
965
966
967
968
969
970
971
972static void iavf_up_complete(struct iavf_adapter *adapter)
973{
974 adapter->state = __IAVF_RUNNING;
975 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
976
977 iavf_napi_enable_all(adapter);
978
979 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
980 if (CLIENT_ENABLED(adapter))
981 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
982 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
983}
984
985
986
987
988
989
990
991void iavf_down(struct iavf_adapter *adapter)
992{
993 struct net_device *netdev = adapter->netdev;
994 struct iavf_vlan_filter *vlf;
995 struct iavf_mac_filter *f;
996 struct iavf_cloud_filter *cf;
997
998 if (adapter->state <= __IAVF_DOWN_PENDING)
999 return;
1000
1001 netif_carrier_off(netdev);
1002 netif_tx_disable(netdev);
1003 adapter->link_up = false;
1004 iavf_napi_disable_all(adapter);
1005 iavf_irq_disable(adapter);
1006
1007 spin_lock_bh(&adapter->mac_vlan_list_lock);
1008
1009
1010 __dev_uc_unsync(adapter->netdev, NULL);
1011 __dev_mc_unsync(adapter->netdev, NULL);
1012
1013
1014 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1015 f->remove = true;
1016 }
1017
1018
1019 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1020 vlf->remove = true;
1021 }
1022
1023 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1024
1025
1026 spin_lock_bh(&adapter->cloud_filter_list_lock);
1027 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1028 cf->del = true;
1029 }
1030 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1031
1032 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1033 adapter->state != __IAVF_RESETTING) {
1034
1035 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1036
1037
1038
1039
1040 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1041 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1042 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1043 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1044 }
1045
1046 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static int
1059iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1060{
1061 int err, vector_threshold;
1062
1063
1064
1065
1066
1067
1068 vector_threshold = MIN_MSIX_COUNT;
1069
1070
1071
1072
1073
1074
1075 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1076 vector_threshold, vectors);
1077 if (err < 0) {
1078 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1079 kfree(adapter->msix_entries);
1080 adapter->msix_entries = NULL;
1081 return err;
1082 }
1083
1084
1085
1086
1087
1088 adapter->num_msix_vectors = err;
1089 return 0;
1090}
1091
1092
1093
1094
1095
1096
1097
1098static void iavf_free_queues(struct iavf_adapter *adapter)
1099{
1100 if (!adapter->vsi_res)
1101 return;
1102 adapter->num_active_queues = 0;
1103 kfree(adapter->tx_rings);
1104 adapter->tx_rings = NULL;
1105 kfree(adapter->rx_rings);
1106 adapter->rx_rings = NULL;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117static int iavf_alloc_queues(struct iavf_adapter *adapter)
1118{
1119 int i, num_active_queues;
1120
1121
1122
1123
1124
1125
1126 if (adapter->num_req_queues)
1127 num_active_queues = adapter->num_req_queues;
1128 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1129 adapter->num_tc)
1130 num_active_queues = adapter->ch_config.total_qps;
1131 else
1132 num_active_queues = min_t(int,
1133 adapter->vsi_res->num_queue_pairs,
1134 (int)(num_online_cpus()));
1135
1136
1137 adapter->tx_rings = kcalloc(num_active_queues,
1138 sizeof(struct iavf_ring), GFP_KERNEL);
1139 if (!adapter->tx_rings)
1140 goto err_out;
1141 adapter->rx_rings = kcalloc(num_active_queues,
1142 sizeof(struct iavf_ring), GFP_KERNEL);
1143 if (!adapter->rx_rings)
1144 goto err_out;
1145
1146 for (i = 0; i < num_active_queues; i++) {
1147 struct iavf_ring *tx_ring;
1148 struct iavf_ring *rx_ring;
1149
1150 tx_ring = &adapter->tx_rings[i];
1151
1152 tx_ring->queue_index = i;
1153 tx_ring->netdev = adapter->netdev;
1154 tx_ring->dev = &adapter->pdev->dev;
1155 tx_ring->count = adapter->tx_desc_count;
1156 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1157 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1158 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1159
1160 rx_ring = &adapter->rx_rings[i];
1161 rx_ring->queue_index = i;
1162 rx_ring->netdev = adapter->netdev;
1163 rx_ring->dev = &adapter->pdev->dev;
1164 rx_ring->count = adapter->rx_desc_count;
1165 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1166 }
1167
1168 adapter->num_active_queues = num_active_queues;
1169
1170 return 0;
1171
1172err_out:
1173 iavf_free_queues(adapter);
1174 return -ENOMEM;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1185{
1186 int vector, v_budget;
1187 int pairs = 0;
1188 int err = 0;
1189
1190 if (!adapter->vsi_res) {
1191 err = -EIO;
1192 goto out;
1193 }
1194 pairs = adapter->num_active_queues;
1195
1196
1197
1198
1199
1200
1201 v_budget = min_t(int, pairs + NONQ_VECS,
1202 (int)adapter->vf_res->max_vectors);
1203
1204 adapter->msix_entries = kcalloc(v_budget,
1205 sizeof(struct msix_entry), GFP_KERNEL);
1206 if (!adapter->msix_entries) {
1207 err = -ENOMEM;
1208 goto out;
1209 }
1210
1211 for (vector = 0; vector < v_budget; vector++)
1212 adapter->msix_entries[vector].entry = vector;
1213
1214 err = iavf_acquire_msix_vectors(adapter, v_budget);
1215
1216out:
1217 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1218 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1219 return err;
1220}
1221
1222
1223
1224
1225
1226
1227
1228static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1229{
1230 struct i40e_aqc_get_set_rss_key_data *rss_key =
1231 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1232 struct iavf_hw *hw = &adapter->hw;
1233 int ret = 0;
1234
1235 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1236
1237 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1238 adapter->current_op);
1239 return -EBUSY;
1240 }
1241
1242 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1243 if (ret) {
1244 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1245 iavf_stat_str(hw, ret),
1246 iavf_aq_str(hw, hw->aq.asq_last_status));
1247 return ret;
1248
1249 }
1250
1251 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1252 adapter->rss_lut, adapter->rss_lut_size);
1253 if (ret) {
1254 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1255 iavf_stat_str(hw, ret),
1256 iavf_aq_str(hw, hw->aq.asq_last_status));
1257 }
1258
1259 return ret;
1260
1261}
1262
1263
1264
1265
1266
1267
1268
1269static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1270{
1271 struct iavf_hw *hw = &adapter->hw;
1272 u32 *dw;
1273 u16 i;
1274
1275 dw = (u32 *)adapter->rss_key;
1276 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1277 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1278
1279 dw = (u32 *)adapter->rss_lut;
1280 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1281 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1282
1283 iavf_flush(hw);
1284
1285 return 0;
1286}
1287
1288
1289
1290
1291
1292
1293
1294int iavf_config_rss(struct iavf_adapter *adapter)
1295{
1296
1297 if (RSS_PF(adapter)) {
1298 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1299 IAVF_FLAG_AQ_SET_RSS_KEY;
1300 return 0;
1301 } else if (RSS_AQ(adapter)) {
1302 return iavf_config_rss_aq(adapter);
1303 } else {
1304 return iavf_config_rss_reg(adapter);
1305 }
1306}
1307
1308
1309
1310
1311
1312static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1313{
1314 u16 i;
1315
1316 for (i = 0; i < adapter->rss_lut_size; i++)
1317 adapter->rss_lut[i] = i % adapter->num_active_queues;
1318}
1319
1320
1321
1322
1323
1324
1325
1326static int iavf_init_rss(struct iavf_adapter *adapter)
1327{
1328 struct iavf_hw *hw = &adapter->hw;
1329 int ret;
1330
1331 if (!RSS_PF(adapter)) {
1332
1333 if (adapter->vf_res->vf_cap_flags &
1334 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1335 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1336 else
1337 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1338
1339 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1340 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1341 }
1342
1343 iavf_fill_rss_lut(adapter);
1344 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1345 ret = iavf_config_rss(adapter);
1346
1347 return ret;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1358{
1359 int q_idx = 0, num_q_vectors;
1360 struct iavf_q_vector *q_vector;
1361
1362 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1363 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1364 GFP_KERNEL);
1365 if (!adapter->q_vectors)
1366 return -ENOMEM;
1367
1368 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1369 q_vector = &adapter->q_vectors[q_idx];
1370 q_vector->adapter = adapter;
1371 q_vector->vsi = &adapter->vsi;
1372 q_vector->v_idx = q_idx;
1373 q_vector->reg_idx = q_idx;
1374 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1375 netif_napi_add(adapter->netdev, &q_vector->napi,
1376 iavf_napi_poll, NAPI_POLL_WEIGHT);
1377 }
1378
1379 return 0;
1380}
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1391{
1392 int q_idx, num_q_vectors;
1393 int napi_vectors;
1394
1395 if (!adapter->q_vectors)
1396 return;
1397
1398 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1399 napi_vectors = adapter->num_active_queues;
1400
1401 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1402 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1403
1404 if (q_idx < napi_vectors)
1405 netif_napi_del(&q_vector->napi);
1406 }
1407 kfree(adapter->q_vectors);
1408 adapter->q_vectors = NULL;
1409}
1410
1411
1412
1413
1414
1415
1416void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1417{
1418 if (!adapter->msix_entries)
1419 return;
1420
1421 pci_disable_msix(adapter->pdev);
1422 kfree(adapter->msix_entries);
1423 adapter->msix_entries = NULL;
1424}
1425
1426
1427
1428
1429
1430
1431int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1432{
1433 int err;
1434
1435 err = iavf_alloc_queues(adapter);
1436 if (err) {
1437 dev_err(&adapter->pdev->dev,
1438 "Unable to allocate memory for queues\n");
1439 goto err_alloc_queues;
1440 }
1441
1442 rtnl_lock();
1443 err = iavf_set_interrupt_capability(adapter);
1444 rtnl_unlock();
1445 if (err) {
1446 dev_err(&adapter->pdev->dev,
1447 "Unable to setup interrupt capabilities\n");
1448 goto err_set_interrupt;
1449 }
1450
1451 err = iavf_alloc_q_vectors(adapter);
1452 if (err) {
1453 dev_err(&adapter->pdev->dev,
1454 "Unable to allocate memory for queue vectors\n");
1455 goto err_alloc_q_vectors;
1456 }
1457
1458
1459
1460
1461
1462
1463 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1464 adapter->num_tc)
1465 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1466 adapter->num_tc);
1467
1468 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1469 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1470 adapter->num_active_queues);
1471
1472 return 0;
1473err_alloc_q_vectors:
1474 iavf_reset_interrupt_capability(adapter);
1475err_set_interrupt:
1476 iavf_free_queues(adapter);
1477err_alloc_queues:
1478 return err;
1479}
1480
1481
1482
1483
1484
1485static void iavf_free_rss(struct iavf_adapter *adapter)
1486{
1487 kfree(adapter->rss_key);
1488 adapter->rss_key = NULL;
1489
1490 kfree(adapter->rss_lut);
1491 adapter->rss_lut = NULL;
1492}
1493
1494
1495
1496
1497
1498
1499
1500static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1501{
1502 struct net_device *netdev = adapter->netdev;
1503 int err;
1504
1505 if (netif_running(netdev))
1506 iavf_free_traffic_irqs(adapter);
1507 iavf_free_misc_irq(adapter);
1508 iavf_reset_interrupt_capability(adapter);
1509 iavf_free_q_vectors(adapter);
1510 iavf_free_queues(adapter);
1511
1512 err = iavf_init_interrupt_scheme(adapter);
1513 if (err)
1514 goto err;
1515
1516 netif_tx_stop_all_queues(netdev);
1517
1518 err = iavf_request_misc_irq(adapter);
1519 if (err)
1520 goto err;
1521
1522 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1523
1524 iavf_map_rings_to_vectors(adapter);
1525
1526 if (RSS_AQ(adapter))
1527 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1528 else
1529 err = iavf_init_rss(adapter);
1530err:
1531 return err;
1532}
1533
1534
1535
1536
1537
1538static void iavf_watchdog_timer(struct timer_list *t)
1539{
1540 struct iavf_adapter *adapter = from_timer(adapter, t,
1541 watchdog_timer);
1542
1543 schedule_work(&adapter->watchdog_task);
1544
1545}
1546
1547
1548
1549
1550
1551static void iavf_watchdog_task(struct work_struct *work)
1552{
1553 struct iavf_adapter *adapter = container_of(work,
1554 struct iavf_adapter,
1555 watchdog_task);
1556 struct iavf_hw *hw = &adapter->hw;
1557 u32 reg_val;
1558
1559 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1560 goto restart_watchdog;
1561
1562 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
1563 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1564 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1565 if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1566 (reg_val == VIRTCHNL_VFR_COMPLETED)) {
1567
1568 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1569 adapter->state = __IAVF_STARTUP;
1570 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1571 schedule_delayed_work(&adapter->init_task, 10);
1572 clear_bit(__IAVF_IN_CRITICAL_TASK,
1573 &adapter->crit_section);
1574
1575
1576
1577
1578
1579 return;
1580 }
1581 adapter->aq_required = 0;
1582 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1583 goto watchdog_done;
1584 }
1585
1586 if ((adapter->state < __IAVF_DOWN) ||
1587 (adapter->flags & IAVF_FLAG_RESET_PENDING))
1588 goto watchdog_done;
1589
1590
1591 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1592 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) {
1593 adapter->state = __IAVF_RESETTING;
1594 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1595 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1596 schedule_work(&adapter->reset_task);
1597 adapter->aq_required = 0;
1598 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1599 goto watchdog_done;
1600 }
1601
1602
1603
1604
1605 if (adapter->current_op) {
1606 if (!iavf_asq_done(hw)) {
1607 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1608 iavf_send_api_ver(adapter);
1609 }
1610 goto watchdog_done;
1611 }
1612 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) {
1613 iavf_send_vf_config_msg(adapter);
1614 goto watchdog_done;
1615 }
1616
1617 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1618 iavf_disable_queues(adapter);
1619 goto watchdog_done;
1620 }
1621
1622 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1623 iavf_map_queues(adapter);
1624 goto watchdog_done;
1625 }
1626
1627 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1628 iavf_add_ether_addrs(adapter);
1629 goto watchdog_done;
1630 }
1631
1632 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1633 iavf_add_vlans(adapter);
1634 goto watchdog_done;
1635 }
1636
1637 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1638 iavf_del_ether_addrs(adapter);
1639 goto watchdog_done;
1640 }
1641
1642 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1643 iavf_del_vlans(adapter);
1644 goto watchdog_done;
1645 }
1646
1647 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1648 iavf_enable_vlan_stripping(adapter);
1649 goto watchdog_done;
1650 }
1651
1652 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1653 iavf_disable_vlan_stripping(adapter);
1654 goto watchdog_done;
1655 }
1656
1657 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1658 iavf_configure_queues(adapter);
1659 goto watchdog_done;
1660 }
1661
1662 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1663 iavf_enable_queues(adapter);
1664 goto watchdog_done;
1665 }
1666
1667 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1668
1669
1670
1671
1672 iavf_init_rss(adapter);
1673 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1674 goto watchdog_done;
1675 }
1676 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1677 iavf_get_hena(adapter);
1678 goto watchdog_done;
1679 }
1680 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1681 iavf_set_hena(adapter);
1682 goto watchdog_done;
1683 }
1684 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1685 iavf_set_rss_key(adapter);
1686 goto watchdog_done;
1687 }
1688 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1689 iavf_set_rss_lut(adapter);
1690 goto watchdog_done;
1691 }
1692
1693 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1694 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1695 FLAG_VF_MULTICAST_PROMISC);
1696 goto watchdog_done;
1697 }
1698
1699 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1700 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1701 goto watchdog_done;
1702 }
1703
1704 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1705 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1706 iavf_set_promiscuous(adapter, 0);
1707 goto watchdog_done;
1708 }
1709
1710 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1711 iavf_enable_channels(adapter);
1712 goto watchdog_done;
1713 }
1714
1715 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1716 iavf_disable_channels(adapter);
1717 goto watchdog_done;
1718 }
1719
1720 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1721 iavf_add_cloud_filter(adapter);
1722 goto watchdog_done;
1723 }
1724
1725 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1726 iavf_del_cloud_filter(adapter);
1727 goto watchdog_done;
1728 }
1729
1730 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1731
1732 if (adapter->state == __IAVF_RUNNING)
1733 iavf_request_stats(adapter);
1734watchdog_done:
1735 if (adapter->state == __IAVF_RUNNING)
1736 iavf_detect_recover_hung(&adapter->vsi);
1737 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1738restart_watchdog:
1739 if (adapter->state == __IAVF_REMOVE)
1740 return;
1741 if (adapter->aq_required)
1742 mod_timer(&adapter->watchdog_timer,
1743 jiffies + msecs_to_jiffies(20));
1744 else
1745 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1746 schedule_work(&adapter->adminq_task);
1747}
1748
1749static void iavf_disable_vf(struct iavf_adapter *adapter)
1750{
1751 struct iavf_mac_filter *f, *ftmp;
1752 struct iavf_vlan_filter *fv, *fvtmp;
1753 struct iavf_cloud_filter *cf, *cftmp;
1754
1755 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
1756
1757
1758
1759
1760
1761 if (adapter->state == __IAVF_RUNNING) {
1762 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1763 netif_carrier_off(adapter->netdev);
1764 netif_tx_disable(adapter->netdev);
1765 adapter->link_up = false;
1766 iavf_napi_disable_all(adapter);
1767 iavf_irq_disable(adapter);
1768 iavf_free_traffic_irqs(adapter);
1769 iavf_free_all_tx_resources(adapter);
1770 iavf_free_all_rx_resources(adapter);
1771 }
1772
1773 spin_lock_bh(&adapter->mac_vlan_list_lock);
1774
1775
1776 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1777 list_del(&f->list);
1778 kfree(f);
1779 }
1780
1781 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1782 list_del(&fv->list);
1783 kfree(fv);
1784 }
1785
1786 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1787
1788 spin_lock_bh(&adapter->cloud_filter_list_lock);
1789 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1790 list_del(&cf->list);
1791 kfree(cf);
1792 adapter->num_cloud_filters--;
1793 }
1794 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1795
1796 iavf_free_misc_irq(adapter);
1797 iavf_reset_interrupt_capability(adapter);
1798 iavf_free_queues(adapter);
1799 iavf_free_q_vectors(adapter);
1800 kfree(adapter->vf_res);
1801 iavf_shutdown_adminq(&adapter->hw);
1802 adapter->netdev->flags &= ~IFF_UP;
1803 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1804 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1805 adapter->state = __IAVF_DOWN;
1806 wake_up(&adapter->down_waitqueue);
1807 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1808}
1809
1810#define IAVF_RESET_WAIT_MS 10
1811#define IAVF_RESET_WAIT_COUNT 500
1812
1813
1814
1815
1816
1817
1818
1819
1820static void iavf_reset_task(struct work_struct *work)
1821{
1822 struct iavf_adapter *adapter = container_of(work,
1823 struct iavf_adapter,
1824 reset_task);
1825 struct virtchnl_vf_resource *vfres = adapter->vf_res;
1826 struct net_device *netdev = adapter->netdev;
1827 struct iavf_hw *hw = &adapter->hw;
1828 struct iavf_vlan_filter *vlf;
1829 struct iavf_cloud_filter *cf;
1830 struct iavf_mac_filter *f;
1831 u32 reg_val;
1832 int i = 0, err;
1833 bool running;
1834
1835
1836
1837
1838 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
1839 return;
1840
1841 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
1842 &adapter->crit_section))
1843 usleep_range(500, 1000);
1844 if (CLIENT_ENABLED(adapter)) {
1845 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
1846 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
1847 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1848 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
1849 cancel_delayed_work_sync(&adapter->client_task);
1850 iavf_notify_client_close(&adapter->vsi, true);
1851 }
1852 iavf_misc_irq_disable(adapter);
1853 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
1854 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
1855
1856
1857
1858 iavf_shutdown_adminq(hw);
1859 iavf_init_adminq(hw);
1860 iavf_request_reset(adapter);
1861 }
1862 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1863
1864
1865 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1866 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
1867 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1868 if (!reg_val)
1869 break;
1870 usleep_range(5000, 10000);
1871 }
1872 if (i == IAVF_RESET_WAIT_COUNT) {
1873 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1874 goto continue_reset;
1875 }
1876
1877
1878 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1879
1880 msleep(IAVF_RESET_WAIT_MS);
1881
1882 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1883 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1884 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
1885 break;
1886 }
1887
1888 pci_set_master(adapter->pdev);
1889
1890 if (i == IAVF_RESET_WAIT_COUNT) {
1891 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1892 reg_val);
1893 iavf_disable_vf(adapter);
1894 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
1895 return;
1896 }
1897
1898continue_reset:
1899
1900
1901
1902
1903 running = ((adapter->state == __IAVF_RUNNING) ||
1904 (adapter->state == __IAVF_RESETTING));
1905
1906 if (running) {
1907 netif_carrier_off(netdev);
1908 netif_tx_stop_all_queues(netdev);
1909 adapter->link_up = false;
1910 iavf_napi_disable_all(adapter);
1911 }
1912 iavf_irq_disable(adapter);
1913
1914 adapter->state = __IAVF_RESETTING;
1915 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1916
1917
1918
1919
1920 iavf_free_all_rx_resources(adapter);
1921 iavf_free_all_tx_resources(adapter);
1922
1923 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
1924
1925 iavf_shutdown_adminq(hw);
1926 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1927 err = iavf_init_adminq(hw);
1928 if (err)
1929 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1930 err);
1931 adapter->aq_required = 0;
1932
1933 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1934 err = iavf_reinit_interrupt_scheme(adapter);
1935 if (err)
1936 goto reset_err;
1937 }
1938
1939 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
1940 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
1941
1942 spin_lock_bh(&adapter->mac_vlan_list_lock);
1943
1944
1945 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1946 f->add = true;
1947 }
1948
1949 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1950 vlf->add = true;
1951 }
1952
1953 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1954
1955
1956 spin_lock_bh(&adapter->cloud_filter_list_lock);
1957 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1958 adapter->num_tc) {
1959 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1960 cf->add = true;
1961 }
1962 }
1963 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1964
1965 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1966 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
1967 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1968 iavf_misc_irq_enable(adapter);
1969
1970 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1971
1972
1973
1974
1975 if (running) {
1976
1977 err = iavf_setup_all_tx_resources(adapter);
1978 if (err)
1979 goto reset_err;
1980
1981
1982 err = iavf_setup_all_rx_resources(adapter);
1983 if (err)
1984 goto reset_err;
1985
1986 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1987 err = iavf_request_traffic_irqs(adapter, netdev->name);
1988 if (err)
1989 goto reset_err;
1990
1991 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1992 }
1993
1994 iavf_configure(adapter);
1995
1996 iavf_up_complete(adapter);
1997
1998 iavf_irq_enable(adapter, true);
1999 } else {
2000 adapter->state = __IAVF_DOWN;
2001 wake_up(&adapter->down_waitqueue);
2002 }
2003 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2004 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2005
2006 return;
2007reset_err:
2008 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2009 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2010 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2011 iavf_close(netdev);
2012}
2013
2014
2015
2016
2017
2018static void iavf_adminq_task(struct work_struct *work)
2019{
2020 struct iavf_adapter *adapter =
2021 container_of(work, struct iavf_adapter, adminq_task);
2022 struct iavf_hw *hw = &adapter->hw;
2023 struct i40e_arq_event_info event;
2024 enum virtchnl_ops v_op;
2025 iavf_status ret, v_ret;
2026 u32 val, oldval;
2027 u16 pending;
2028
2029 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2030 goto out;
2031
2032 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2033 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2034 if (!event.msg_buf)
2035 goto out;
2036
2037 do {
2038 ret = iavf_clean_arq_element(hw, &event, &pending);
2039 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2040 v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
2041
2042 if (ret || !v_op)
2043 break;
2044
2045 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2046 event.msg_len);
2047 if (pending != 0)
2048 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2049 } while (pending);
2050
2051 if ((adapter->flags &
2052 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2053 adapter->state == __IAVF_RESETTING)
2054 goto freedom;
2055
2056
2057 val = rd32(hw, hw->aq.arq.len);
2058 if (val == 0xdeadbeef)
2059 goto freedom;
2060 oldval = val;
2061 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2062 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2063 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2064 }
2065 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2066 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2067 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2068 }
2069 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2070 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2071 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2072 }
2073 if (oldval != val)
2074 wr32(hw, hw->aq.arq.len, val);
2075
2076 val = rd32(hw, hw->aq.asq.len);
2077 oldval = val;
2078 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2079 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2080 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2081 }
2082 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2083 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2084 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2085 }
2086 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2087 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2088 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2089 }
2090 if (oldval != val)
2091 wr32(hw, hw->aq.asq.len, val);
2092
2093freedom:
2094 kfree(event.msg_buf);
2095out:
2096
2097 iavf_misc_irq_enable(adapter);
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107static void iavf_client_task(struct work_struct *work)
2108{
2109 struct iavf_adapter *adapter =
2110 container_of(work, struct iavf_adapter, client_task.work);
2111
2112
2113
2114
2115
2116 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
2117 return;
2118
2119 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2120 iavf_client_subtask(adapter);
2121 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2122 goto out;
2123 }
2124 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2125 iavf_notify_client_l2_params(&adapter->vsi);
2126 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2127 goto out;
2128 }
2129 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2130 iavf_notify_client_close(&adapter->vsi, false);
2131 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2132 goto out;
2133 }
2134 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2135 iavf_notify_client_open(&adapter->vsi);
2136 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2137 }
2138out:
2139 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2140}
2141
2142
2143
2144
2145
2146
2147
2148void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2149{
2150 int i;
2151
2152 if (!adapter->tx_rings)
2153 return;
2154
2155 for (i = 0; i < adapter->num_active_queues; i++)
2156 if (adapter->tx_rings[i].desc)
2157 iavf_free_tx_resources(&adapter->tx_rings[i]);
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2171{
2172 int i, err = 0;
2173
2174 for (i = 0; i < adapter->num_active_queues; i++) {
2175 adapter->tx_rings[i].count = adapter->tx_desc_count;
2176 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2177 if (!err)
2178 continue;
2179 dev_err(&adapter->pdev->dev,
2180 "Allocation for Tx Queue %u failed\n", i);
2181 break;
2182 }
2183
2184 return err;
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2198{
2199 int i, err = 0;
2200
2201 for (i = 0; i < adapter->num_active_queues; i++) {
2202 adapter->rx_rings[i].count = adapter->rx_desc_count;
2203 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2204 if (!err)
2205 continue;
2206 dev_err(&adapter->pdev->dev,
2207 "Allocation for Rx Queue %u failed\n", i);
2208 break;
2209 }
2210 return err;
2211}
2212
2213
2214
2215
2216
2217
2218
2219void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2220{
2221 int i;
2222
2223 if (!adapter->rx_rings)
2224 return;
2225
2226 for (i = 0; i < adapter->num_active_queues; i++)
2227 if (adapter->rx_rings[i].desc)
2228 iavf_free_rx_resources(&adapter->rx_rings[i]);
2229}
2230
2231
2232
2233
2234
2235
2236static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2237 u64 max_tx_rate)
2238{
2239 int speed = 0, ret = 0;
2240
2241 switch (adapter->link_speed) {
2242 case I40E_LINK_SPEED_40GB:
2243 speed = 40000;
2244 break;
2245 case I40E_LINK_SPEED_25GB:
2246 speed = 25000;
2247 break;
2248 case I40E_LINK_SPEED_20GB:
2249 speed = 20000;
2250 break;
2251 case I40E_LINK_SPEED_10GB:
2252 speed = 10000;
2253 break;
2254 case I40E_LINK_SPEED_1GB:
2255 speed = 1000;
2256 break;
2257 case I40E_LINK_SPEED_100MB:
2258 speed = 100;
2259 break;
2260 default:
2261 break;
2262 }
2263
2264 if (max_tx_rate > speed) {
2265 dev_err(&adapter->pdev->dev,
2266 "Invalid tx rate specified\n");
2267 ret = -EINVAL;
2268 }
2269
2270 return ret;
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2283 struct tc_mqprio_qopt_offload *mqprio_qopt)
2284{
2285 u64 total_max_rate = 0;
2286 int i, num_qps = 0;
2287 u64 tx_rate = 0;
2288 int ret = 0;
2289
2290 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2291 mqprio_qopt->qopt.num_tc < 1)
2292 return -EINVAL;
2293
2294 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2295 if (!mqprio_qopt->qopt.count[i] ||
2296 mqprio_qopt->qopt.offset[i] != num_qps)
2297 return -EINVAL;
2298 if (mqprio_qopt->min_rate[i]) {
2299 dev_err(&adapter->pdev->dev,
2300 "Invalid min tx rate (greater than 0) specified\n");
2301 return -EINVAL;
2302 }
2303
2304 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2305 IAVF_MBPS_DIVISOR);
2306 total_max_rate += tx_rate;
2307 num_qps += mqprio_qopt->qopt.count[i];
2308 }
2309 if (num_qps > IAVF_MAX_REQ_QUEUES)
2310 return -EINVAL;
2311
2312 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2313 return ret;
2314}
2315
2316
2317
2318
2319
2320static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2321{
2322 struct iavf_cloud_filter *cf, *cftmp;
2323
2324 spin_lock_bh(&adapter->cloud_filter_list_lock);
2325 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2326 list) {
2327 list_del(&cf->list);
2328 kfree(cf);
2329 adapter->num_cloud_filters--;
2330 }
2331 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2332}
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2346{
2347 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2348 struct iavf_adapter *adapter = netdev_priv(netdev);
2349 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2350 u8 num_tc = 0, total_qps = 0;
2351 int ret = 0, netdev_tc = 0;
2352 u64 max_tx_rate;
2353 u16 mode;
2354 int i;
2355
2356 num_tc = mqprio_qopt->qopt.num_tc;
2357 mode = mqprio_qopt->mode;
2358
2359
2360 if (!mqprio_qopt->qopt.hw) {
2361 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2362
2363 netdev_reset_tc(netdev);
2364 adapter->num_tc = 0;
2365 netif_tx_stop_all_queues(netdev);
2366 netif_tx_disable(netdev);
2367 iavf_del_all_cloud_filters(adapter);
2368 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2369 goto exit;
2370 } else {
2371 return -EINVAL;
2372 }
2373 }
2374
2375
2376 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2377 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2378 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2379 return -EOPNOTSUPP;
2380 }
2381 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2382 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2383 return -EINVAL;
2384 }
2385
2386 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2387 if (ret)
2388 return ret;
2389
2390 if (adapter->num_tc == num_tc)
2391 return 0;
2392 adapter->num_tc = num_tc;
2393
2394 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2395 if (i < num_tc) {
2396 adapter->ch_config.ch_info[i].count =
2397 mqprio_qopt->qopt.count[i];
2398 adapter->ch_config.ch_info[i].offset =
2399 mqprio_qopt->qopt.offset[i];
2400 total_qps += mqprio_qopt->qopt.count[i];
2401 max_tx_rate = mqprio_qopt->max_rate[i];
2402
2403 max_tx_rate = div_u64(max_tx_rate,
2404 IAVF_MBPS_DIVISOR);
2405 adapter->ch_config.ch_info[i].max_tx_rate =
2406 max_tx_rate;
2407 } else {
2408 adapter->ch_config.ch_info[i].count = 1;
2409 adapter->ch_config.ch_info[i].offset = 0;
2410 }
2411 }
2412 adapter->ch_config.total_qps = total_qps;
2413 netif_tx_stop_all_queues(netdev);
2414 netif_tx_disable(netdev);
2415 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2416 netdev_reset_tc(netdev);
2417
2418 netdev_set_num_tc(adapter->netdev, num_tc);
2419 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2420 u16 qcount = mqprio_qopt->qopt.count[i];
2421 u16 qoffset = mqprio_qopt->qopt.offset[i];
2422
2423 if (i < num_tc)
2424 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2425 qoffset);
2426 }
2427 }
2428exit:
2429 return ret;
2430}
2431
2432
2433
2434
2435
2436
2437
2438static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2439 struct tc_cls_flower_offload *f,
2440 struct iavf_cloud_filter *filter)
2441{
2442 u16 n_proto_mask = 0;
2443 u16 n_proto_key = 0;
2444 u8 field_flags = 0;
2445 u16 addr_type = 0;
2446 u16 n_proto = 0;
2447 int i = 0;
2448 struct virtchnl_filter *vf = &filter->f;
2449
2450 if (f->dissector->used_keys &
2451 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2452 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2453 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2454 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2455 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2456 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2457 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2458 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2459 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2460 f->dissector->used_keys);
2461 return -EOPNOTSUPP;
2462 }
2463
2464 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2465 struct flow_dissector_key_keyid *mask =
2466 skb_flow_dissector_target(f->dissector,
2467 FLOW_DISSECTOR_KEY_ENC_KEYID,
2468 f->mask);
2469
2470 if (mask->keyid != 0)
2471 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2472 }
2473
2474 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2475 struct flow_dissector_key_basic *key =
2476 skb_flow_dissector_target(f->dissector,
2477 FLOW_DISSECTOR_KEY_BASIC,
2478 f->key);
2479
2480 struct flow_dissector_key_basic *mask =
2481 skb_flow_dissector_target(f->dissector,
2482 FLOW_DISSECTOR_KEY_BASIC,
2483 f->mask);
2484 n_proto_key = ntohs(key->n_proto);
2485 n_proto_mask = ntohs(mask->n_proto);
2486
2487 if (n_proto_key == ETH_P_ALL) {
2488 n_proto_key = 0;
2489 n_proto_mask = 0;
2490 }
2491 n_proto = n_proto_key & n_proto_mask;
2492 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2493 return -EINVAL;
2494 if (n_proto == ETH_P_IPV6) {
2495
2496 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2497 }
2498
2499 if (key->ip_proto != IPPROTO_TCP) {
2500 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2501 return -EINVAL;
2502 }
2503 }
2504
2505 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2506 struct flow_dissector_key_eth_addrs *key =
2507 skb_flow_dissector_target(f->dissector,
2508 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2509 f->key);
2510
2511 struct flow_dissector_key_eth_addrs *mask =
2512 skb_flow_dissector_target(f->dissector,
2513 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2514 f->mask);
2515
2516 if (!is_zero_ether_addr(mask->dst)) {
2517 if (is_broadcast_ether_addr(mask->dst)) {
2518 field_flags |= IAVF_CLOUD_FIELD_OMAC;
2519 } else {
2520 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2521 mask->dst);
2522 return I40E_ERR_CONFIG;
2523 }
2524 }
2525
2526 if (!is_zero_ether_addr(mask->src)) {
2527 if (is_broadcast_ether_addr(mask->src)) {
2528 field_flags |= IAVF_CLOUD_FIELD_IMAC;
2529 } else {
2530 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2531 mask->src);
2532 return I40E_ERR_CONFIG;
2533 }
2534 }
2535
2536 if (!is_zero_ether_addr(key->dst))
2537 if (is_valid_ether_addr(key->dst) ||
2538 is_multicast_ether_addr(key->dst)) {
2539
2540 for (i = 0; i < ETH_ALEN; i++)
2541 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2542 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2543 key->dst);
2544 }
2545
2546 if (!is_zero_ether_addr(key->src))
2547 if (is_valid_ether_addr(key->src) ||
2548 is_multicast_ether_addr(key->src)) {
2549
2550 for (i = 0; i < ETH_ALEN; i++)
2551 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2552 ether_addr_copy(vf->data.tcp_spec.src_mac,
2553 key->src);
2554 }
2555 }
2556
2557 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2558 struct flow_dissector_key_vlan *key =
2559 skb_flow_dissector_target(f->dissector,
2560 FLOW_DISSECTOR_KEY_VLAN,
2561 f->key);
2562 struct flow_dissector_key_vlan *mask =
2563 skb_flow_dissector_target(f->dissector,
2564 FLOW_DISSECTOR_KEY_VLAN,
2565 f->mask);
2566
2567 if (mask->vlan_id) {
2568 if (mask->vlan_id == VLAN_VID_MASK) {
2569 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2570 } else {
2571 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2572 mask->vlan_id);
2573 return I40E_ERR_CONFIG;
2574 }
2575 }
2576 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2577 vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
2578 }
2579
2580 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
2581 struct flow_dissector_key_control *key =
2582 skb_flow_dissector_target(f->dissector,
2583 FLOW_DISSECTOR_KEY_CONTROL,
2584 f->key);
2585
2586 addr_type = key->addr_type;
2587 }
2588
2589 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2590 struct flow_dissector_key_ipv4_addrs *key =
2591 skb_flow_dissector_target(f->dissector,
2592 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2593 f->key);
2594 struct flow_dissector_key_ipv4_addrs *mask =
2595 skb_flow_dissector_target(f->dissector,
2596 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2597 f->mask);
2598
2599 if (mask->dst) {
2600 if (mask->dst == cpu_to_be32(0xffffffff)) {
2601 field_flags |= IAVF_CLOUD_FIELD_IIP;
2602 } else {
2603 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2604 be32_to_cpu(mask->dst));
2605 return I40E_ERR_CONFIG;
2606 }
2607 }
2608
2609 if (mask->src) {
2610 if (mask->src == cpu_to_be32(0xffffffff)) {
2611 field_flags |= IAVF_CLOUD_FIELD_IIP;
2612 } else {
2613 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2614 be32_to_cpu(mask->dst));
2615 return I40E_ERR_CONFIG;
2616 }
2617 }
2618
2619 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
2620 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2621 return I40E_ERR_CONFIG;
2622 }
2623 if (key->dst) {
2624 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2625 vf->data.tcp_spec.dst_ip[0] = key->dst;
2626 }
2627 if (key->src) {
2628 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2629 vf->data.tcp_spec.src_ip[0] = key->src;
2630 }
2631 }
2632
2633 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2634 struct flow_dissector_key_ipv6_addrs *key =
2635 skb_flow_dissector_target(f->dissector,
2636 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2637 f->key);
2638 struct flow_dissector_key_ipv6_addrs *mask =
2639 skb_flow_dissector_target(f->dissector,
2640 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2641 f->mask);
2642
2643
2644 if (ipv6_addr_any(&mask->dst)) {
2645 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2646 IPV6_ADDR_ANY);
2647 return I40E_ERR_CONFIG;
2648 }
2649
2650
2651
2652
2653 if (ipv6_addr_loopback(&key->dst) ||
2654 ipv6_addr_loopback(&key->src)) {
2655 dev_err(&adapter->pdev->dev,
2656 "ipv6 addr should not be loopback\n");
2657 return I40E_ERR_CONFIG;
2658 }
2659 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
2660 field_flags |= IAVF_CLOUD_FIELD_IIP;
2661
2662 for (i = 0; i < 4; i++)
2663 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2664 memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
2665 sizeof(vf->data.tcp_spec.dst_ip));
2666 for (i = 0; i < 4; i++)
2667 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2668 memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
2669 sizeof(vf->data.tcp_spec.src_ip));
2670 }
2671 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
2672 struct flow_dissector_key_ports *key =
2673 skb_flow_dissector_target(f->dissector,
2674 FLOW_DISSECTOR_KEY_PORTS,
2675 f->key);
2676 struct flow_dissector_key_ports *mask =
2677 skb_flow_dissector_target(f->dissector,
2678 FLOW_DISSECTOR_KEY_PORTS,
2679 f->mask);
2680
2681 if (mask->src) {
2682 if (mask->src == cpu_to_be16(0xffff)) {
2683 field_flags |= IAVF_CLOUD_FIELD_IIP;
2684 } else {
2685 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2686 be16_to_cpu(mask->src));
2687 return I40E_ERR_CONFIG;
2688 }
2689 }
2690
2691 if (mask->dst) {
2692 if (mask->dst == cpu_to_be16(0xffff)) {
2693 field_flags |= IAVF_CLOUD_FIELD_IIP;
2694 } else {
2695 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2696 be16_to_cpu(mask->dst));
2697 return I40E_ERR_CONFIG;
2698 }
2699 }
2700 if (key->dst) {
2701 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2702 vf->data.tcp_spec.dst_port = key->dst;
2703 }
2704
2705 if (key->src) {
2706 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2707 vf->data.tcp_spec.src_port = key->src;
2708 }
2709 }
2710 vf->field_flags = field_flags;
2711
2712 return 0;
2713}
2714
2715
2716
2717
2718
2719
2720
2721static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2722 struct iavf_cloud_filter *filter)
2723{
2724 if (tc == 0)
2725 return 0;
2726 if (tc < adapter->num_tc) {
2727 if (!filter->f.data.tcp_spec.dst_port) {
2728 dev_err(&adapter->pdev->dev,
2729 "Specify destination port to redirect to traffic class other than TC0\n");
2730 return -EINVAL;
2731 }
2732 }
2733
2734 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2735 filter->f.action_meta = tc;
2736 return 0;
2737}
2738
2739
2740
2741
2742
2743
2744static int iavf_configure_clsflower(struct iavf_adapter *adapter,
2745 struct tc_cls_flower_offload *cls_flower)
2746{
2747 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2748 struct iavf_cloud_filter *filter = NULL;
2749 int err = -EINVAL, count = 50;
2750
2751 if (tc < 0) {
2752 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2753 return -EINVAL;
2754 }
2755
2756 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2757 if (!filter)
2758 return -ENOMEM;
2759
2760 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2761 &adapter->crit_section)) {
2762 if (--count == 0)
2763 goto err;
2764 udelay(1);
2765 }
2766
2767 filter->cookie = cls_flower->cookie;
2768
2769
2770 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2771
2772 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2773 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
2774 if (err < 0)
2775 goto err;
2776
2777 err = iavf_handle_tclass(adapter, tc, filter);
2778 if (err < 0)
2779 goto err;
2780
2781
2782 spin_lock_bh(&adapter->cloud_filter_list_lock);
2783 list_add_tail(&filter->list, &adapter->cloud_filter_list);
2784 adapter->num_cloud_filters++;
2785 filter->add = true;
2786 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2787 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2788err:
2789 if (err)
2790 kfree(filter);
2791
2792 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2793 return err;
2794}
2795
2796
2797
2798
2799
2800
2801
2802
2803static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
2804 unsigned long *cookie)
2805{
2806 struct iavf_cloud_filter *filter = NULL;
2807
2808 if (!cookie)
2809 return NULL;
2810
2811 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
2812 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
2813 return filter;
2814 }
2815 return NULL;
2816}
2817
2818
2819
2820
2821
2822
2823static int iavf_delete_clsflower(struct iavf_adapter *adapter,
2824 struct tc_cls_flower_offload *cls_flower)
2825{
2826 struct iavf_cloud_filter *filter = NULL;
2827 int err = 0;
2828
2829 spin_lock_bh(&adapter->cloud_filter_list_lock);
2830 filter = iavf_find_cf(adapter, &cls_flower->cookie);
2831 if (filter) {
2832 filter->del = true;
2833 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
2834 } else {
2835 err = -EINVAL;
2836 }
2837 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2838
2839 return err;
2840}
2841
2842
2843
2844
2845
2846
2847static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
2848 struct tc_cls_flower_offload *cls_flower)
2849{
2850 if (cls_flower->common.chain_index)
2851 return -EOPNOTSUPP;
2852
2853 switch (cls_flower->command) {
2854 case TC_CLSFLOWER_REPLACE:
2855 return iavf_configure_clsflower(adapter, cls_flower);
2856 case TC_CLSFLOWER_DESTROY:
2857 return iavf_delete_clsflower(adapter, cls_flower);
2858 case TC_CLSFLOWER_STATS:
2859 return -EOPNOTSUPP;
2860 default:
2861 return -EOPNOTSUPP;
2862 }
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2874 void *cb_priv)
2875{
2876 switch (type) {
2877 case TC_SETUP_CLSFLOWER:
2878 return iavf_setup_tc_cls_flower(cb_priv, type_data);
2879 default:
2880 return -EOPNOTSUPP;
2881 }
2882}
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892static int iavf_setup_tc_block(struct net_device *dev,
2893 struct tc_block_offload *f)
2894{
2895 struct iavf_adapter *adapter = netdev_priv(dev);
2896
2897 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2898 return -EOPNOTSUPP;
2899
2900 switch (f->command) {
2901 case TC_BLOCK_BIND:
2902 return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
2903 adapter, adapter, f->extack);
2904 case TC_BLOCK_UNBIND:
2905 tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
2906 adapter);
2907 return 0;
2908 default:
2909 return -EOPNOTSUPP;
2910 }
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2925 void *type_data)
2926{
2927 switch (type) {
2928 case TC_SETUP_QDISC_MQPRIO:
2929 return __iavf_setup_tc(netdev, type_data);
2930 case TC_SETUP_BLOCK:
2931 return iavf_setup_tc_block(netdev, type_data);
2932 default:
2933 return -EOPNOTSUPP;
2934 }
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949static int iavf_open(struct net_device *netdev)
2950{
2951 struct iavf_adapter *adapter = netdev_priv(netdev);
2952 int err;
2953
2954 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
2955 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2956 return -EIO;
2957 }
2958
2959 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2960 &adapter->crit_section))
2961 usleep_range(500, 1000);
2962
2963 if (adapter->state != __IAVF_DOWN) {
2964 err = -EBUSY;
2965 goto err_unlock;
2966 }
2967
2968
2969 err = iavf_setup_all_tx_resources(adapter);
2970 if (err)
2971 goto err_setup_tx;
2972
2973
2974 err = iavf_setup_all_rx_resources(adapter);
2975 if (err)
2976 goto err_setup_rx;
2977
2978
2979 err = iavf_request_traffic_irqs(adapter, netdev->name);
2980 if (err)
2981 goto err_req_irq;
2982
2983 spin_lock_bh(&adapter->mac_vlan_list_lock);
2984
2985 iavf_add_filter(adapter, adapter->hw.mac.addr);
2986
2987 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2988
2989 iavf_configure(adapter);
2990
2991 iavf_up_complete(adapter);
2992
2993 iavf_irq_enable(adapter, true);
2994
2995 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2996
2997 return 0;
2998
2999err_req_irq:
3000 iavf_down(adapter);
3001 iavf_free_traffic_irqs(adapter);
3002err_setup_rx:
3003 iavf_free_all_rx_resources(adapter);
3004err_setup_tx:
3005 iavf_free_all_tx_resources(adapter);
3006err_unlock:
3007 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3008
3009 return err;
3010}
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023static int iavf_close(struct net_device *netdev)
3024{
3025 struct iavf_adapter *adapter = netdev_priv(netdev);
3026 int status;
3027
3028 if (adapter->state <= __IAVF_DOWN_PENDING)
3029 return 0;
3030
3031 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3032 &adapter->crit_section))
3033 usleep_range(500, 1000);
3034
3035 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3036 if (CLIENT_ENABLED(adapter))
3037 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3038
3039 iavf_down(adapter);
3040 adapter->state = __IAVF_DOWN_PENDING;
3041 iavf_free_traffic_irqs(adapter);
3042
3043 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056 status = wait_event_timeout(adapter->down_waitqueue,
3057 adapter->state == __IAVF_DOWN,
3058 msecs_to_jiffies(200));
3059 if (!status)
3060 netdev_warn(netdev, "Device resources not yet released\n");
3061 return 0;
3062}
3063
3064
3065
3066
3067
3068
3069
3070
3071static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3072{
3073 struct iavf_adapter *adapter = netdev_priv(netdev);
3074
3075 netdev->mtu = new_mtu;
3076 if (CLIENT_ENABLED(adapter)) {
3077 iavf_notify_client_l2_params(&adapter->vsi);
3078 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3079 }
3080 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3081 schedule_work(&adapter->reset_task);
3082
3083 return 0;
3084}
3085
3086
3087
3088
3089
3090
3091
3092static int iavf_set_features(struct net_device *netdev,
3093 netdev_features_t features)
3094{
3095 struct iavf_adapter *adapter = netdev_priv(netdev);
3096
3097
3098
3099
3100 if (!VLAN_ALLOWED(adapter)) {
3101 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3102 return -EINVAL;
3103 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3104 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3105 adapter->aq_required |=
3106 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3107 else
3108 adapter->aq_required |=
3109 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3110 }
3111
3112 return 0;
3113}
3114
3115
3116
3117
3118
3119
3120
3121static netdev_features_t iavf_features_check(struct sk_buff *skb,
3122 struct net_device *dev,
3123 netdev_features_t features)
3124{
3125 size_t len;
3126
3127
3128
3129
3130
3131 if (skb->ip_summed != CHECKSUM_PARTIAL)
3132 return features;
3133
3134
3135
3136
3137 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3138 features &= ~NETIF_F_GSO_MASK;
3139
3140
3141 len = skb_network_header(skb) - skb->data;
3142 if (len & ~(63 * 2))
3143 goto out_err;
3144
3145
3146 len = skb_transport_header(skb) - skb_network_header(skb);
3147 if (len & ~(127 * 4))
3148 goto out_err;
3149
3150 if (skb->encapsulation) {
3151
3152 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3153 if (len & ~(127 * 2))
3154 goto out_err;
3155
3156
3157 len = skb_inner_transport_header(skb) -
3158 skb_inner_network_header(skb);
3159 if (len & ~(127 * 4))
3160 goto out_err;
3161 }
3162
3163
3164
3165
3166
3167
3168 return features;
3169out_err:
3170 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3171}
3172
3173
3174
3175
3176
3177
3178
3179
3180static netdev_features_t iavf_fix_features(struct net_device *netdev,
3181 netdev_features_t features)
3182{
3183 struct iavf_adapter *adapter = netdev_priv(netdev);
3184
3185 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3186 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3187 NETIF_F_HW_VLAN_CTAG_RX |
3188 NETIF_F_HW_VLAN_CTAG_FILTER);
3189
3190 return features;
3191}
3192
3193static const struct net_device_ops iavf_netdev_ops = {
3194 .ndo_open = iavf_open,
3195 .ndo_stop = iavf_close,
3196 .ndo_start_xmit = iavf_xmit_frame,
3197 .ndo_set_rx_mode = iavf_set_rx_mode,
3198 .ndo_validate_addr = eth_validate_addr,
3199 .ndo_set_mac_address = iavf_set_mac,
3200 .ndo_change_mtu = iavf_change_mtu,
3201 .ndo_tx_timeout = iavf_tx_timeout,
3202 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3203 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3204 .ndo_features_check = iavf_features_check,
3205 .ndo_fix_features = iavf_fix_features,
3206 .ndo_set_features = iavf_set_features,
3207 .ndo_setup_tc = iavf_setup_tc,
3208};
3209
3210
3211
3212
3213
3214
3215
3216static int iavf_check_reset_complete(struct iavf_hw *hw)
3217{
3218 u32 rstat;
3219 int i;
3220
3221 for (i = 0; i < 100; i++) {
3222 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3223 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3224 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3225 (rstat == VIRTCHNL_VFR_COMPLETED))
3226 return 0;
3227 usleep_range(10, 20);
3228 }
3229 return -EBUSY;
3230}
3231
3232
3233
3234
3235
3236
3237
3238
3239int iavf_process_config(struct iavf_adapter *adapter)
3240{
3241 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3242 int i, num_req_queues = adapter->num_req_queues;
3243 struct net_device *netdev = adapter->netdev;
3244 struct iavf_vsi *vsi = &adapter->vsi;
3245 netdev_features_t hw_enc_features;
3246 netdev_features_t hw_features;
3247
3248
3249 for (i = 0; i < vfres->num_vsis; i++) {
3250 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3251 adapter->vsi_res = &vfres->vsi_res[i];
3252 }
3253 if (!adapter->vsi_res) {
3254 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3255 return -ENODEV;
3256 }
3257
3258 if (num_req_queues &&
3259 num_req_queues != adapter->vsi_res->num_queue_pairs) {
3260
3261
3262
3263
3264 dev_err(&adapter->pdev->dev,
3265 "Requested %d queues, but PF only gave us %d.\n",
3266 num_req_queues,
3267 adapter->vsi_res->num_queue_pairs);
3268 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3269 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3270 iavf_schedule_reset(adapter);
3271 return -ENODEV;
3272 }
3273 adapter->num_req_queues = 0;
3274
3275 hw_enc_features = NETIF_F_SG |
3276 NETIF_F_IP_CSUM |
3277 NETIF_F_IPV6_CSUM |
3278 NETIF_F_HIGHDMA |
3279 NETIF_F_SOFT_FEATURES |
3280 NETIF_F_TSO |
3281 NETIF_F_TSO_ECN |
3282 NETIF_F_TSO6 |
3283 NETIF_F_SCTP_CRC |
3284 NETIF_F_RXHASH |
3285 NETIF_F_RXCSUM |
3286 0;
3287
3288
3289
3290
3291 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3292 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
3293 NETIF_F_GSO_GRE |
3294 NETIF_F_GSO_GRE_CSUM |
3295 NETIF_F_GSO_IPXIP4 |
3296 NETIF_F_GSO_IPXIP6 |
3297 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3298 NETIF_F_GSO_PARTIAL |
3299 0;
3300
3301 if (!(vfres->vf_cap_flags &
3302 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3303 netdev->gso_partial_features |=
3304 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3305
3306 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3307 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3308 netdev->hw_enc_features |= hw_enc_features;
3309 }
3310
3311 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3312
3313
3314
3315
3316 hw_features = hw_enc_features;
3317
3318
3319 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3320 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3321 NETIF_F_HW_VLAN_CTAG_RX);
3322
3323 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3324 hw_features |= NETIF_F_HW_TC;
3325
3326 netdev->hw_features |= hw_features;
3327
3328 netdev->features |= hw_features;
3329
3330 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3331 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3332
3333 netdev->priv_flags |= IFF_UNICAST_FLT;
3334
3335
3336
3337
3338 if (netdev->wanted_features) {
3339 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3340 netdev->mtu < 576)
3341 netdev->features &= ~NETIF_F_TSO;
3342 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3343 netdev->mtu < 576)
3344 netdev->features &= ~NETIF_F_TSO6;
3345 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3346 netdev->features &= ~NETIF_F_TSO_ECN;
3347 if (!(netdev->wanted_features & NETIF_F_GRO))
3348 netdev->features &= ~NETIF_F_GRO;
3349 if (!(netdev->wanted_features & NETIF_F_GSO))
3350 netdev->features &= ~NETIF_F_GSO;
3351 }
3352
3353 adapter->vsi.id = adapter->vsi_res->vsi_id;
3354
3355 adapter->vsi.back = adapter;
3356 adapter->vsi.base_vector = 1;
3357 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3358 vsi->netdev = adapter->netdev;
3359 vsi->qs_handle = adapter->vsi_res->qset_handle;
3360 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3361 adapter->rss_key_size = vfres->rss_key_size;
3362 adapter->rss_lut_size = vfres->rss_lut_size;
3363 } else {
3364 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3365 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3366 }
3367
3368 return 0;
3369}
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383static void iavf_init_task(struct work_struct *work)
3384{
3385 struct iavf_adapter *adapter = container_of(work,
3386 struct iavf_adapter,
3387 init_task.work);
3388 struct net_device *netdev = adapter->netdev;
3389 struct iavf_hw *hw = &adapter->hw;
3390 struct pci_dev *pdev = adapter->pdev;
3391 int err, bufsz;
3392
3393 switch (adapter->state) {
3394 case __IAVF_STARTUP:
3395
3396 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
3397 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3398 err = iavf_set_mac_type(hw);
3399 if (err) {
3400 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
3401 err);
3402 goto err;
3403 }
3404 err = iavf_check_reset_complete(hw);
3405 if (err) {
3406 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
3407 err);
3408 goto err;
3409 }
3410 hw->aq.num_arq_entries = IAVF_AQ_LEN;
3411 hw->aq.num_asq_entries = IAVF_AQ_LEN;
3412 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3413 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3414
3415 err = iavf_init_adminq(hw);
3416 if (err) {
3417 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
3418 err);
3419 goto err;
3420 }
3421 err = iavf_send_api_ver(adapter);
3422 if (err) {
3423 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
3424 iavf_shutdown_adminq(hw);
3425 goto err;
3426 }
3427 adapter->state = __IAVF_INIT_VERSION_CHECK;
3428 goto restart;
3429 case __IAVF_INIT_VERSION_CHECK:
3430 if (!iavf_asq_done(hw)) {
3431 dev_err(&pdev->dev, "Admin queue command never completed\n");
3432 iavf_shutdown_adminq(hw);
3433 adapter->state = __IAVF_STARTUP;
3434 goto err;
3435 }
3436
3437
3438 err = iavf_verify_api_ver(adapter);
3439 if (err) {
3440 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
3441 err = iavf_send_api_ver(adapter);
3442 else
3443 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
3444 adapter->pf_version.major,
3445 adapter->pf_version.minor,
3446 VIRTCHNL_VERSION_MAJOR,
3447 VIRTCHNL_VERSION_MINOR);
3448 goto err;
3449 }
3450 err = iavf_send_vf_config_msg(adapter);
3451 if (err) {
3452 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
3453 err);
3454 goto err;
3455 }
3456 adapter->state = __IAVF_INIT_GET_RESOURCES;
3457 goto restart;
3458 case __IAVF_INIT_GET_RESOURCES:
3459
3460 if (!adapter->vf_res) {
3461 bufsz = sizeof(struct virtchnl_vf_resource) +
3462 (IAVF_MAX_VF_VSI *
3463 sizeof(struct virtchnl_vsi_resource));
3464 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
3465 if (!adapter->vf_res)
3466 goto err;
3467 }
3468 err = iavf_get_vf_config(adapter);
3469 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
3470 err = iavf_send_vf_config_msg(adapter);
3471 goto err;
3472 } else if (err == I40E_ERR_PARAM) {
3473
3474
3475
3476
3477 iavf_shutdown_adminq(hw);
3478 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
3479 return;
3480 }
3481 if (err) {
3482 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
3483 err);
3484 goto err_alloc;
3485 }
3486 adapter->state = __IAVF_INIT_SW;
3487 break;
3488 default:
3489 goto err_alloc;
3490 }
3491
3492 if (iavf_process_config(adapter))
3493 goto err_alloc;
3494 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3495
3496 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
3497
3498 netdev->netdev_ops = &iavf_netdev_ops;
3499 iavf_set_ethtool_ops(netdev);
3500 netdev->watchdog_timeo = 5 * HZ;
3501
3502
3503 netdev->min_mtu = ETH_MIN_MTU;
3504 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
3505
3506 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
3507 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
3508 adapter->hw.mac.addr);
3509 eth_hw_addr_random(netdev);
3510 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
3511 } else {
3512 adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF;
3513 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
3514 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
3515 }
3516
3517 timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0);
3518 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3519
3520 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
3521 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
3522 err = iavf_init_interrupt_scheme(adapter);
3523 if (err)
3524 goto err_sw_init;
3525 iavf_map_rings_to_vectors(adapter);
3526 if (adapter->vf_res->vf_cap_flags &
3527 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
3528 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
3529
3530 err = iavf_request_misc_irq(adapter);
3531 if (err)
3532 goto err_sw_init;
3533
3534 netif_carrier_off(netdev);
3535 adapter->link_up = false;
3536
3537 if (!adapter->netdev_registered) {
3538 err = register_netdev(netdev);
3539 if (err)
3540 goto err_register;
3541 }
3542
3543 adapter->netdev_registered = true;
3544
3545 netif_tx_stop_all_queues(netdev);
3546 if (CLIENT_ALLOWED(adapter)) {
3547 err = iavf_lan_add_device(adapter);
3548 if (err)
3549 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
3550 err);
3551 }
3552
3553 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
3554 if (netdev->features & NETIF_F_GRO)
3555 dev_info(&pdev->dev, "GRO is enabled\n");
3556
3557 adapter->state = __IAVF_DOWN;
3558 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3559 iavf_misc_irq_enable(adapter);
3560 wake_up(&adapter->down_waitqueue);
3561
3562 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
3563 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
3564 if (!adapter->rss_key || !adapter->rss_lut)
3565 goto err_mem;
3566
3567 if (RSS_AQ(adapter)) {
3568 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3569 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
3570 } else {
3571 iavf_init_rss(adapter);
3572 }
3573 return;
3574restart:
3575 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
3576 return;
3577err_mem:
3578 iavf_free_rss(adapter);
3579err_register:
3580 iavf_free_misc_irq(adapter);
3581err_sw_init:
3582 iavf_reset_interrupt_capability(adapter);
3583err_alloc:
3584 kfree(adapter->vf_res);
3585 adapter->vf_res = NULL;
3586err:
3587
3588 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3589 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
3590 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3591 iavf_shutdown_adminq(hw);
3592 adapter->state = __IAVF_STARTUP;
3593 schedule_delayed_work(&adapter->init_task, HZ * 5);
3594 return;
3595 }
3596 schedule_delayed_work(&adapter->init_task, HZ);
3597}
3598
3599
3600
3601
3602
3603static void iavf_shutdown(struct pci_dev *pdev)
3604{
3605 struct net_device *netdev = pci_get_drvdata(pdev);
3606 struct iavf_adapter *adapter = netdev_priv(netdev);
3607
3608 netif_device_detach(netdev);
3609
3610 if (netif_running(netdev))
3611 iavf_close(netdev);
3612
3613
3614 adapter->state = __IAVF_REMOVE;
3615 adapter->aq_required = 0;
3616
3617#ifdef CONFIG_PM
3618 pci_save_state(pdev);
3619
3620#endif
3621 pci_disable_device(pdev);
3622}
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3636{
3637 struct net_device *netdev;
3638 struct iavf_adapter *adapter = NULL;
3639 struct iavf_hw *hw = NULL;
3640 int err;
3641
3642 err = pci_enable_device(pdev);
3643 if (err)
3644 return err;
3645
3646 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3647 if (err) {
3648 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3649 if (err) {
3650 dev_err(&pdev->dev,
3651 "DMA configuration failed: 0x%x\n", err);
3652 goto err_dma;
3653 }
3654 }
3655
3656 err = pci_request_regions(pdev, iavf_driver_name);
3657 if (err) {
3658 dev_err(&pdev->dev,
3659 "pci_request_regions failed 0x%x\n", err);
3660 goto err_pci_reg;
3661 }
3662
3663 pci_enable_pcie_error_reporting(pdev);
3664
3665 pci_set_master(pdev);
3666
3667 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3668 IAVF_MAX_REQ_QUEUES);
3669 if (!netdev) {
3670 err = -ENOMEM;
3671 goto err_alloc_etherdev;
3672 }
3673
3674 SET_NETDEV_DEV(netdev, &pdev->dev);
3675
3676 pci_set_drvdata(pdev, netdev);
3677 adapter = netdev_priv(netdev);
3678
3679 adapter->netdev = netdev;
3680 adapter->pdev = pdev;
3681
3682 hw = &adapter->hw;
3683 hw->back = adapter;
3684
3685 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3686 adapter->state = __IAVF_STARTUP;
3687
3688
3689 pci_save_state(pdev);
3690
3691 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3692 pci_resource_len(pdev, 0));
3693 if (!hw->hw_addr) {
3694 err = -EIO;
3695 goto err_ioremap;
3696 }
3697 hw->vendor_id = pdev->vendor;
3698 hw->device_id = pdev->device;
3699 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3700 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3701 hw->subsystem_device_id = pdev->subsystem_device;
3702 hw->bus.device = PCI_SLOT(pdev->devfn);
3703 hw->bus.func = PCI_FUNC(pdev->devfn);
3704 hw->bus.bus_id = pdev->bus->number;
3705
3706
3707
3708
3709 mutex_init(&hw->aq.asq_mutex);
3710 mutex_init(&hw->aq.arq_mutex);
3711
3712 spin_lock_init(&adapter->mac_vlan_list_lock);
3713 spin_lock_init(&adapter->cloud_filter_list_lock);
3714
3715 INIT_LIST_HEAD(&adapter->mac_filter_list);
3716 INIT_LIST_HEAD(&adapter->vlan_filter_list);
3717 INIT_LIST_HEAD(&adapter->cloud_filter_list);
3718
3719 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3720 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3721 INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3722 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3723 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
3724 schedule_delayed_work(&adapter->init_task,
3725 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3726
3727
3728 init_waitqueue_head(&adapter->down_waitqueue);
3729
3730 return 0;
3731
3732err_ioremap:
3733 free_netdev(netdev);
3734err_alloc_etherdev:
3735 pci_release_regions(pdev);
3736err_pci_reg:
3737err_dma:
3738 pci_disable_device(pdev);
3739 return err;
3740}
3741
3742#ifdef CONFIG_PM
3743
3744
3745
3746
3747
3748
3749
3750static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
3751{
3752 struct net_device *netdev = pci_get_drvdata(pdev);
3753 struct iavf_adapter *adapter = netdev_priv(netdev);
3754 int retval = 0;
3755
3756 netif_device_detach(netdev);
3757
3758 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3759 &adapter->crit_section))
3760 usleep_range(500, 1000);
3761
3762 if (netif_running(netdev)) {
3763 rtnl_lock();
3764 iavf_down(adapter);
3765 rtnl_unlock();
3766 }
3767 iavf_free_misc_irq(adapter);
3768 iavf_reset_interrupt_capability(adapter);
3769
3770 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3771
3772 retval = pci_save_state(pdev);
3773 if (retval)
3774 return retval;
3775
3776 pci_disable_device(pdev);
3777
3778 return 0;
3779}
3780
3781
3782
3783
3784
3785
3786
3787static int iavf_resume(struct pci_dev *pdev)
3788{
3789 struct iavf_adapter *adapter = pci_get_drvdata(pdev);
3790 struct net_device *netdev = adapter->netdev;
3791 u32 err;
3792
3793 pci_set_power_state(pdev, PCI_D0);
3794 pci_restore_state(pdev);
3795
3796
3797
3798 pci_save_state(pdev);
3799
3800 err = pci_enable_device_mem(pdev);
3801 if (err) {
3802 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3803 return err;
3804 }
3805 pci_set_master(pdev);
3806
3807 rtnl_lock();
3808 err = iavf_set_interrupt_capability(adapter);
3809 if (err) {
3810 rtnl_unlock();
3811 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3812 return err;
3813 }
3814 err = iavf_request_misc_irq(adapter);
3815 rtnl_unlock();
3816 if (err) {
3817 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3818 return err;
3819 }
3820
3821 schedule_work(&adapter->reset_task);
3822
3823 netif_device_attach(netdev);
3824
3825 return err;
3826}
3827
3828#endif
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838static void iavf_remove(struct pci_dev *pdev)
3839{
3840 struct net_device *netdev = pci_get_drvdata(pdev);
3841 struct iavf_adapter *adapter = netdev_priv(netdev);
3842 struct iavf_vlan_filter *vlf, *vlftmp;
3843 struct iavf_mac_filter *f, *ftmp;
3844 struct iavf_cloud_filter *cf, *cftmp;
3845 struct iavf_hw *hw = &adapter->hw;
3846 int err;
3847
3848 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
3849 cancel_delayed_work_sync(&adapter->init_task);
3850 cancel_work_sync(&adapter->reset_task);
3851 cancel_delayed_work_sync(&adapter->client_task);
3852 if (adapter->netdev_registered) {
3853 unregister_netdev(netdev);
3854 adapter->netdev_registered = false;
3855 }
3856 if (CLIENT_ALLOWED(adapter)) {
3857 err = iavf_lan_del_device(adapter);
3858 if (err)
3859 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3860 err);
3861 }
3862
3863
3864 adapter->state = __IAVF_REMOVE;
3865 adapter->aq_required = 0;
3866 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3867 iavf_request_reset(adapter);
3868 msleep(50);
3869
3870 if (!iavf_asq_done(hw)) {
3871 iavf_request_reset(adapter);
3872 msleep(50);
3873 }
3874 iavf_free_all_tx_resources(adapter);
3875 iavf_free_all_rx_resources(adapter);
3876 iavf_misc_irq_disable(adapter);
3877 iavf_free_misc_irq(adapter);
3878 iavf_reset_interrupt_capability(adapter);
3879 iavf_free_q_vectors(adapter);
3880
3881 if (adapter->watchdog_timer.function)
3882 del_timer_sync(&adapter->watchdog_timer);
3883
3884 cancel_work_sync(&adapter->adminq_task);
3885
3886 iavf_free_rss(adapter);
3887
3888 if (hw->aq.asq.count)
3889 iavf_shutdown_adminq(hw);
3890
3891
3892 mutex_destroy(&hw->aq.arq_mutex);
3893 mutex_destroy(&hw->aq.asq_mutex);
3894
3895 iounmap(hw->hw_addr);
3896 pci_release_regions(pdev);
3897 iavf_free_all_tx_resources(adapter);
3898 iavf_free_all_rx_resources(adapter);
3899 iavf_free_queues(adapter);
3900 kfree(adapter->vf_res);
3901 spin_lock_bh(&adapter->mac_vlan_list_lock);
3902
3903
3904
3905 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3906 list_del(&f->list);
3907 kfree(f);
3908 }
3909 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3910 list) {
3911 list_del(&vlf->list);
3912 kfree(vlf);
3913 }
3914
3915 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3916
3917 spin_lock_bh(&adapter->cloud_filter_list_lock);
3918 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3919 list_del(&cf->list);
3920 kfree(cf);
3921 }
3922 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3923
3924 free_netdev(netdev);
3925
3926 pci_disable_pcie_error_reporting(pdev);
3927
3928 pci_disable_device(pdev);
3929}
3930
3931static struct pci_driver iavf_driver = {
3932 .name = iavf_driver_name,
3933 .id_table = iavf_pci_tbl,
3934 .probe = iavf_probe,
3935 .remove = iavf_remove,
3936#ifdef CONFIG_PM
3937 .suspend = iavf_suspend,
3938 .resume = iavf_resume,
3939#endif
3940 .shutdown = iavf_shutdown,
3941};
3942
3943
3944
3945
3946
3947
3948
3949static int __init iavf_init_module(void)
3950{
3951 int ret;
3952
3953 pr_info("iavf: %s - version %s\n", iavf_driver_string,
3954 iavf_driver_version);
3955
3956 pr_info("%s\n", iavf_copyright);
3957
3958 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3959 iavf_driver_name);
3960 if (!iavf_wq) {
3961 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
3962 return -ENOMEM;
3963 }
3964 ret = pci_register_driver(&iavf_driver);
3965 return ret;
3966}
3967
3968module_init(iavf_init_module);
3969
3970
3971
3972
3973
3974
3975
3976static void __exit iavf_exit_module(void)
3977{
3978 pci_unregister_driver(&iavf_driver);
3979 destroy_workqueue(iavf_wq);
3980}
3981
3982module_exit(iavf_exit_module);
3983
3984
3985