1
2
3
4#include "iavf.h"
5#include "iavf_prototype.h"
6#include "iavf_client.h"
7#include <generated/utsrelease.h>
8
9
10
11
12#define CREATE_TRACE_POINTS
13#include "iavf_trace.h"
14
15static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
16static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
17static int iavf_close(struct net_device *netdev);
18static void iavf_init_get_resources(struct iavf_adapter *adapter);
19static int iavf_check_reset_complete(struct iavf_hw *hw);
20
21char iavf_driver_name[] = "iavf";
22static const char iavf_driver_string[] =
23 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
24
25static const char iavf_copyright[] =
26 "Copyright (c) 2013 - 2018 Intel Corporation.";
27
28
29
30
31
32
33
34
35
36static const struct pci_device_id iavf_pci_tbl[] = {
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
40 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
41
42 {0, }
43};
44
45MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
46
47MODULE_ALIAS("i40evf");
48MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
49MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
50MODULE_LICENSE("GPL v2");
51MODULE_VERSION(UTS_RELEASE);
52
53static const struct net_device_ops iavf_netdev_ops;
54struct workqueue_struct *iavf_wq;
55
56
57
58
59
60static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
61{
62 return netdev_priv(pci_get_drvdata(pdev));
63}
64
65
66
67
68
69
70
71
72enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
73 struct iavf_dma_mem *mem,
74 u64 size, u32 alignment)
75{
76 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
77
78 if (!mem)
79 return IAVF_ERR_PARAM;
80
81 mem->size = ALIGN(size, alignment);
82 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
83 (dma_addr_t *)&mem->pa, GFP_KERNEL);
84 if (mem->va)
85 return 0;
86 else
87 return IAVF_ERR_NO_MEMORY;
88}
89
90
91
92
93
94
95enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
96 struct iavf_dma_mem *mem)
97{
98 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
99
100 if (!mem || !mem->va)
101 return IAVF_ERR_PARAM;
102 dma_free_coherent(&adapter->pdev->dev, mem->size,
103 mem->va, (dma_addr_t)mem->pa);
104 return 0;
105}
106
107
108
109
110
111
112
113enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
114 struct iavf_virt_mem *mem, u32 size)
115{
116 if (!mem)
117 return IAVF_ERR_PARAM;
118
119 mem->size = size;
120 mem->va = kzalloc(size, GFP_KERNEL);
121
122 if (mem->va)
123 return 0;
124 else
125 return IAVF_ERR_NO_MEMORY;
126}
127
128
129
130
131
132
133enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
134 struct iavf_virt_mem *mem)
135{
136 if (!mem)
137 return IAVF_ERR_PARAM;
138
139
140 kfree(mem->va);
141
142 return 0;
143}
144
145
146
147
148
149
150
151
152int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
153{
154 unsigned int wait, delay = 10;
155
156 for (wait = 0; wait < msecs; wait += delay) {
157 if (mutex_trylock(lock))
158 return 0;
159
160 msleep(delay);
161 }
162
163 return -1;
164}
165
166
167
168
169
170void iavf_schedule_reset(struct iavf_adapter *adapter)
171{
172 if (!(adapter->flags &
173 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
174 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
175 queue_work(iavf_wq, &adapter->reset_task);
176 }
177}
178
179
180
181
182
183
184
185
186void iavf_schedule_request_stats(struct iavf_adapter *adapter)
187{
188 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
189 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
190}
191
192
193
194
195
196
197static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
198{
199 struct iavf_adapter *adapter = netdev_priv(netdev);
200
201 adapter->tx_timeout_count++;
202 iavf_schedule_reset(adapter);
203}
204
205
206
207
208
209static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
210{
211 struct iavf_hw *hw = &adapter->hw;
212
213 if (!adapter->msix_entries)
214 return;
215
216 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
217
218 iavf_flush(hw);
219
220 synchronize_irq(adapter->msix_entries[0].vector);
221}
222
223
224
225
226
227static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
228{
229 struct iavf_hw *hw = &adapter->hw;
230
231 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
232 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
233 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
234
235 iavf_flush(hw);
236}
237
238
239
240
241
242static void iavf_irq_disable(struct iavf_adapter *adapter)
243{
244 int i;
245 struct iavf_hw *hw = &adapter->hw;
246
247 if (!adapter->msix_entries)
248 return;
249
250 for (i = 1; i < adapter->num_msix_vectors; i++) {
251 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
252 synchronize_irq(adapter->msix_entries[i].vector);
253 }
254 iavf_flush(hw);
255}
256
257
258
259
260
261
262void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
263{
264 struct iavf_hw *hw = &adapter->hw;
265 int i;
266
267 for (i = 1; i < adapter->num_msix_vectors; i++) {
268 if (mask & BIT(i - 1)) {
269 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
270 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
271 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
272 }
273 }
274}
275
276
277
278
279
280
281void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
282{
283 struct iavf_hw *hw = &adapter->hw;
284
285 iavf_misc_irq_enable(adapter);
286 iavf_irq_enable_queues(adapter, ~0);
287
288 if (flush)
289 iavf_flush(hw);
290}
291
292
293
294
295
296
297static irqreturn_t iavf_msix_aq(int irq, void *data)
298{
299 struct net_device *netdev = data;
300 struct iavf_adapter *adapter = netdev_priv(netdev);
301 struct iavf_hw *hw = &adapter->hw;
302
303
304 rd32(hw, IAVF_VFINT_ICR01);
305 rd32(hw, IAVF_VFINT_ICR0_ENA1);
306
307 if (adapter->state != __IAVF_REMOVE)
308
309 queue_work(iavf_wq, &adapter->adminq_task);
310
311 return IRQ_HANDLED;
312}
313
314
315
316
317
318
319static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
320{
321 struct iavf_q_vector *q_vector = data;
322
323 if (!q_vector->tx.ring && !q_vector->rx.ring)
324 return IRQ_HANDLED;
325
326 napi_schedule_irqoff(&q_vector->napi);
327
328 return IRQ_HANDLED;
329}
330
331
332
333
334
335
336
337static void
338iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
339{
340 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
341 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
342 struct iavf_hw *hw = &adapter->hw;
343
344 rx_ring->q_vector = q_vector;
345 rx_ring->next = q_vector->rx.ring;
346 rx_ring->vsi = &adapter->vsi;
347 q_vector->rx.ring = rx_ring;
348 q_vector->rx.count++;
349 q_vector->rx.next_update = jiffies + 1;
350 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
351 q_vector->ring_mask |= BIT(r_idx);
352 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
353 q_vector->rx.current_itr >> 1);
354 q_vector->rx.current_itr = q_vector->rx.target_itr;
355}
356
357
358
359
360
361
362
363static void
364iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
365{
366 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
367 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
368 struct iavf_hw *hw = &adapter->hw;
369
370 tx_ring->q_vector = q_vector;
371 tx_ring->next = q_vector->tx.ring;
372 tx_ring->vsi = &adapter->vsi;
373 q_vector->tx.ring = tx_ring;
374 q_vector->tx.count++;
375 q_vector->tx.next_update = jiffies + 1;
376 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
377 q_vector->num_ringpairs++;
378 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
379 q_vector->tx.target_itr >> 1);
380 q_vector->tx.current_itr = q_vector->tx.target_itr;
381}
382
383
384
385
386
387
388
389
390
391
392
393static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
394{
395 int rings_remaining = adapter->num_active_queues;
396 int ridx = 0, vidx = 0;
397 int q_vectors;
398
399 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
400
401 for (; ridx < rings_remaining; ridx++) {
402 iavf_map_vector_to_rxq(adapter, vidx, ridx);
403 iavf_map_vector_to_txq(adapter, vidx, ridx);
404
405
406
407
408 if (++vidx >= q_vectors)
409 vidx = 0;
410 }
411
412 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
413}
414
415
416
417
418
419
420
421
422
423static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
424 const cpumask_t *mask)
425{
426 struct iavf_q_vector *q_vector =
427 container_of(notify, struct iavf_q_vector, affinity_notify);
428
429 cpumask_copy(&q_vector->affinity_mask, mask);
430}
431
432
433
434
435
436
437
438
439
440static void iavf_irq_affinity_release(struct kref *ref) {}
441
442
443
444
445
446
447
448
449
450static int
451iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
452{
453 unsigned int vector, q_vectors;
454 unsigned int rx_int_idx = 0, tx_int_idx = 0;
455 int irq_num, err;
456 int cpu;
457
458 iavf_irq_disable(adapter);
459
460 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
461
462 for (vector = 0; vector < q_vectors; vector++) {
463 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
464
465 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
466
467 if (q_vector->tx.ring && q_vector->rx.ring) {
468 snprintf(q_vector->name, sizeof(q_vector->name),
469 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
470 tx_int_idx++;
471 } else if (q_vector->rx.ring) {
472 snprintf(q_vector->name, sizeof(q_vector->name),
473 "iavf-%s-rx-%u", basename, rx_int_idx++);
474 } else if (q_vector->tx.ring) {
475 snprintf(q_vector->name, sizeof(q_vector->name),
476 "iavf-%s-tx-%u", basename, tx_int_idx++);
477 } else {
478
479 continue;
480 }
481 err = request_irq(irq_num,
482 iavf_msix_clean_rings,
483 0,
484 q_vector->name,
485 q_vector);
486 if (err) {
487 dev_info(&adapter->pdev->dev,
488 "Request_irq failed, error: %d\n", err);
489 goto free_queue_irqs;
490 }
491
492 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
493 q_vector->affinity_notify.release =
494 iavf_irq_affinity_release;
495 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
496
497
498
499
500 cpu = cpumask_local_spread(q_vector->v_idx, -1);
501 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
502 }
503
504 return 0;
505
506free_queue_irqs:
507 while (vector) {
508 vector--;
509 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
510 irq_set_affinity_notifier(irq_num, NULL);
511 irq_set_affinity_hint(irq_num, NULL);
512 free_irq(irq_num, &adapter->q_vectors[vector]);
513 }
514 return err;
515}
516
517
518
519
520
521
522
523
524
525static int iavf_request_misc_irq(struct iavf_adapter *adapter)
526{
527 struct net_device *netdev = adapter->netdev;
528 int err;
529
530 snprintf(adapter->misc_vector_name,
531 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
532 dev_name(&adapter->pdev->dev));
533 err = request_irq(adapter->msix_entries[0].vector,
534 &iavf_msix_aq, 0,
535 adapter->misc_vector_name, netdev);
536 if (err) {
537 dev_err(&adapter->pdev->dev,
538 "request_irq for %s failed: %d\n",
539 adapter->misc_vector_name, err);
540 free_irq(adapter->msix_entries[0].vector, netdev);
541 }
542 return err;
543}
544
545
546
547
548
549
550
551static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
552{
553 int vector, irq_num, q_vectors;
554
555 if (!adapter->msix_entries)
556 return;
557
558 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
559
560 for (vector = 0; vector < q_vectors; vector++) {
561 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
562 irq_set_affinity_notifier(irq_num, NULL);
563 irq_set_affinity_hint(irq_num, NULL);
564 free_irq(irq_num, &adapter->q_vectors[vector]);
565 }
566}
567
568
569
570
571
572
573
574static void iavf_free_misc_irq(struct iavf_adapter *adapter)
575{
576 struct net_device *netdev = adapter->netdev;
577
578 if (!adapter->msix_entries)
579 return;
580
581 free_irq(adapter->msix_entries[0].vector, netdev);
582}
583
584
585
586
587
588
589
590static void iavf_configure_tx(struct iavf_adapter *adapter)
591{
592 struct iavf_hw *hw = &adapter->hw;
593 int i;
594
595 for (i = 0; i < adapter->num_active_queues; i++)
596 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
597}
598
599
600
601
602
603
604
605static void iavf_configure_rx(struct iavf_adapter *adapter)
606{
607 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
608 struct iavf_hw *hw = &adapter->hw;
609 int i;
610
611
612#if (PAGE_SIZE < 8192)
613 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
614 struct net_device *netdev = adapter->netdev;
615
616
617
618
619
620 rx_buf_len = IAVF_RXBUFFER_3072;
621
622
623
624
625
626 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
627 (netdev->mtu <= ETH_DATA_LEN))
628 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
629 }
630#endif
631
632 for (i = 0; i < adapter->num_active_queues; i++) {
633 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
634 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
635
636 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
637 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
638 else
639 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
640 }
641}
642
643
644
645
646
647
648
649
650
651static struct
652iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
653 struct iavf_vlan vlan)
654{
655 struct iavf_vlan_filter *f;
656
657 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
658 if (f->vlan.vid == vlan.vid &&
659 f->vlan.tpid == vlan.tpid)
660 return f;
661 }
662
663 return NULL;
664}
665
666
667
668
669
670
671
672
673static struct
674iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
675 struct iavf_vlan vlan)
676{
677 struct iavf_vlan_filter *f = NULL;
678
679 spin_lock_bh(&adapter->mac_vlan_list_lock);
680
681 f = iavf_find_vlan(adapter, vlan);
682 if (!f) {
683 f = kzalloc(sizeof(*f), GFP_ATOMIC);
684 if (!f)
685 goto clearout;
686
687 f->vlan = vlan;
688
689 list_add_tail(&f->list, &adapter->vlan_filter_list);
690 f->add = true;
691 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
692 }
693
694clearout:
695 spin_unlock_bh(&adapter->mac_vlan_list_lock);
696 return f;
697}
698
699
700
701
702
703
704static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
705{
706 struct iavf_vlan_filter *f;
707
708 spin_lock_bh(&adapter->mac_vlan_list_lock);
709
710 f = iavf_find_vlan(adapter, vlan);
711 if (f) {
712 f->remove = true;
713 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
714 }
715
716 spin_unlock_bh(&adapter->mac_vlan_list_lock);
717}
718
719
720
721
722
723
724
725static void iavf_restore_filters(struct iavf_adapter *adapter)
726{
727 u16 vid;
728
729
730 for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
731 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
732
733 for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
734 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
735}
736
737
738
739
740
741static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
742{
743 return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
744 bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
745}
746
747
748
749
750
751
752
753
754
755static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
756{
757
758
759
760 if (VLAN_ALLOWED(adapter))
761 return VLAN_N_VID;
762 else if (VLAN_V2_ALLOWED(adapter))
763 return adapter->vlan_v2_caps.filtering.max_filters;
764
765 return 0;
766}
767
768
769
770
771
772static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
773{
774 if (iavf_get_num_vlans_added(adapter) <
775 iavf_get_max_vlans_allowed(adapter))
776 return false;
777
778 return true;
779}
780
781
782
783
784
785
786
787static int iavf_vlan_rx_add_vid(struct net_device *netdev,
788 __always_unused __be16 proto, u16 vid)
789{
790 struct iavf_adapter *adapter = netdev_priv(netdev);
791
792 if (!VLAN_FILTERING_ALLOWED(adapter))
793 return -EIO;
794
795 if (iavf_max_vlans_added(adapter)) {
796 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
797 iavf_get_max_vlans_allowed(adapter));
798 return -EIO;
799 }
800
801 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
802 return -ENOMEM;
803
804 if (proto == cpu_to_be16(ETH_P_8021Q))
805 set_bit(vid, adapter->vsi.active_cvlans);
806 else
807 set_bit(vid, adapter->vsi.active_svlans);
808
809 return 0;
810}
811
812
813
814
815
816
817
818static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
819 __always_unused __be16 proto, u16 vid)
820{
821 struct iavf_adapter *adapter = netdev_priv(netdev);
822
823 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
824 if (proto == cpu_to_be16(ETH_P_8021Q))
825 clear_bit(vid, adapter->vsi.active_cvlans);
826 else
827 clear_bit(vid, adapter->vsi.active_svlans);
828
829 return 0;
830}
831
832
833
834
835
836
837
838
839
840static struct
841iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
842 const u8 *macaddr)
843{
844 struct iavf_mac_filter *f;
845
846 if (!macaddr)
847 return NULL;
848
849 list_for_each_entry(f, &adapter->mac_filter_list, list) {
850 if (ether_addr_equal(macaddr, f->macaddr))
851 return f;
852 }
853 return NULL;
854}
855
856
857
858
859
860
861
862
863struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
864 const u8 *macaddr)
865{
866 struct iavf_mac_filter *f;
867
868 if (!macaddr)
869 return NULL;
870
871 f = iavf_find_filter(adapter, macaddr);
872 if (!f) {
873 f = kzalloc(sizeof(*f), GFP_ATOMIC);
874 if (!f)
875 return f;
876
877 ether_addr_copy(f->macaddr, macaddr);
878
879 list_add_tail(&f->list, &adapter->mac_filter_list);
880 f->add = true;
881 f->is_new_mac = true;
882 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
883 } else {
884 f->remove = false;
885 }
886
887 return f;
888}
889
890
891
892
893
894
895
896
897static int iavf_set_mac(struct net_device *netdev, void *p)
898{
899 struct iavf_adapter *adapter = netdev_priv(netdev);
900 struct iavf_hw *hw = &adapter->hw;
901 struct iavf_mac_filter *f;
902 struct sockaddr *addr = p;
903
904 if (!is_valid_ether_addr(addr->sa_data))
905 return -EADDRNOTAVAIL;
906
907 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
908 return 0;
909
910 spin_lock_bh(&adapter->mac_vlan_list_lock);
911
912 f = iavf_find_filter(adapter, hw->mac.addr);
913 if (f) {
914 f->remove = true;
915 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
916 }
917
918 f = iavf_add_filter(adapter, addr->sa_data);
919
920 spin_unlock_bh(&adapter->mac_vlan_list_lock);
921
922 if (f) {
923 ether_addr_copy(hw->mac.addr, addr->sa_data);
924 }
925
926 return (f == NULL) ? -ENOMEM : 0;
927}
928
929
930
931
932
933
934
935
936
937static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
938{
939 struct iavf_adapter *adapter = netdev_priv(netdev);
940
941 if (iavf_add_filter(adapter, addr))
942 return 0;
943 else
944 return -ENOMEM;
945}
946
947
948
949
950
951
952
953
954
955static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
956{
957 struct iavf_adapter *adapter = netdev_priv(netdev);
958 struct iavf_mac_filter *f;
959
960
961
962
963
964
965 if (ether_addr_equal(addr, netdev->dev_addr))
966 return 0;
967
968 f = iavf_find_filter(adapter, addr);
969 if (f) {
970 f->remove = true;
971 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
972 }
973 return 0;
974}
975
976
977
978
979
980static void iavf_set_rx_mode(struct net_device *netdev)
981{
982 struct iavf_adapter *adapter = netdev_priv(netdev);
983
984 spin_lock_bh(&adapter->mac_vlan_list_lock);
985 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
986 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
987 spin_unlock_bh(&adapter->mac_vlan_list_lock);
988
989 if (netdev->flags & IFF_PROMISC &&
990 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
991 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
992 else if (!(netdev->flags & IFF_PROMISC) &&
993 adapter->flags & IAVF_FLAG_PROMISC_ON)
994 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
995
996 if (netdev->flags & IFF_ALLMULTI &&
997 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
998 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
999 else if (!(netdev->flags & IFF_ALLMULTI) &&
1000 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1001 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1002}
1003
1004
1005
1006
1007
1008static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1009{
1010 int q_idx;
1011 struct iavf_q_vector *q_vector;
1012 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1013
1014 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1015 struct napi_struct *napi;
1016
1017 q_vector = &adapter->q_vectors[q_idx];
1018 napi = &q_vector->napi;
1019 napi_enable(napi);
1020 }
1021}
1022
1023
1024
1025
1026
1027static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1028{
1029 int q_idx;
1030 struct iavf_q_vector *q_vector;
1031 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1032
1033 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1034 q_vector = &adapter->q_vectors[q_idx];
1035 napi_disable(&q_vector->napi);
1036 }
1037}
1038
1039
1040
1041
1042
1043static void iavf_configure(struct iavf_adapter *adapter)
1044{
1045 struct net_device *netdev = adapter->netdev;
1046 int i;
1047
1048 iavf_set_rx_mode(netdev);
1049
1050 iavf_configure_tx(adapter);
1051 iavf_configure_rx(adapter);
1052 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1053
1054 for (i = 0; i < adapter->num_active_queues; i++) {
1055 struct iavf_ring *ring = &adapter->rx_rings[i];
1056
1057 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1058 }
1059}
1060
1061
1062
1063
1064
1065
1066
1067static void iavf_up_complete(struct iavf_adapter *adapter)
1068{
1069 iavf_change_state(adapter, __IAVF_RUNNING);
1070 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1071
1072 iavf_napi_enable_all(adapter);
1073
1074 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1075 if (CLIENT_ENABLED(adapter))
1076 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1077 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1078}
1079
1080
1081
1082
1083
1084
1085
1086void iavf_down(struct iavf_adapter *adapter)
1087{
1088 struct net_device *netdev = adapter->netdev;
1089 struct iavf_vlan_filter *vlf;
1090 struct iavf_cloud_filter *cf;
1091 struct iavf_fdir_fltr *fdir;
1092 struct iavf_mac_filter *f;
1093 struct iavf_adv_rss *rss;
1094
1095 if (adapter->state <= __IAVF_DOWN_PENDING)
1096 return;
1097
1098 netif_carrier_off(netdev);
1099 netif_tx_disable(netdev);
1100 adapter->link_up = false;
1101 iavf_napi_disable_all(adapter);
1102 iavf_irq_disable(adapter);
1103
1104 spin_lock_bh(&adapter->mac_vlan_list_lock);
1105
1106
1107 __dev_uc_unsync(adapter->netdev, NULL);
1108 __dev_mc_unsync(adapter->netdev, NULL);
1109
1110
1111 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1112 f->remove = true;
1113 }
1114
1115
1116 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1117 vlf->remove = true;
1118 }
1119
1120 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1121
1122
1123 spin_lock_bh(&adapter->cloud_filter_list_lock);
1124 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1125 cf->del = true;
1126 }
1127 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1128
1129
1130 spin_lock_bh(&adapter->fdir_fltr_lock);
1131 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1132 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1133 }
1134 spin_unlock_bh(&adapter->fdir_fltr_lock);
1135
1136
1137 spin_lock_bh(&adapter->adv_rss_lock);
1138 list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
1139 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1140 spin_unlock_bh(&adapter->adv_rss_lock);
1141
1142 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1143
1144 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1145
1146
1147
1148
1149 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1150 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1151 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1152 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1153 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1154 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1155 }
1156
1157 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static int
1170iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1171{
1172 int err, vector_threshold;
1173
1174
1175
1176
1177
1178
1179 vector_threshold = MIN_MSIX_COUNT;
1180
1181
1182
1183
1184
1185
1186 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1187 vector_threshold, vectors);
1188 if (err < 0) {
1189 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1190 kfree(adapter->msix_entries);
1191 adapter->msix_entries = NULL;
1192 return err;
1193 }
1194
1195
1196
1197
1198
1199 adapter->num_msix_vectors = err;
1200 return 0;
1201}
1202
1203
1204
1205
1206
1207
1208
1209static void iavf_free_queues(struct iavf_adapter *adapter)
1210{
1211 if (!adapter->vsi_res)
1212 return;
1213 adapter->num_active_queues = 0;
1214 kfree(adapter->tx_rings);
1215 adapter->tx_rings = NULL;
1216 kfree(adapter->rx_rings);
1217 adapter->rx_rings = NULL;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1230{
1231 int i;
1232
1233 for (i = 0; i < adapter->num_active_queues; i++) {
1234 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1235 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1236
1237
1238 tx_ring->flags &=
1239 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1240 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1241 rx_ring->flags &=
1242 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1243 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1244
1245 if (VLAN_ALLOWED(adapter)) {
1246 tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1247 rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1248 } else if (VLAN_V2_ALLOWED(adapter)) {
1249 struct virtchnl_vlan_supported_caps *stripping_support;
1250 struct virtchnl_vlan_supported_caps *insertion_support;
1251
1252 stripping_support =
1253 &adapter->vlan_v2_caps.offloads.stripping_support;
1254 insertion_support =
1255 &adapter->vlan_v2_caps.offloads.insertion_support;
1256
1257 if (stripping_support->outer) {
1258 if (stripping_support->outer &
1259 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1260 rx_ring->flags |=
1261 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1262 else if (stripping_support->outer &
1263 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1264 rx_ring->flags |=
1265 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1266 } else if (stripping_support->inner) {
1267 if (stripping_support->inner &
1268 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1269 rx_ring->flags |=
1270 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1271 else if (stripping_support->inner &
1272 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1273 rx_ring->flags |=
1274 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1275 }
1276
1277 if (insertion_support->outer) {
1278 if (insertion_support->outer &
1279 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1280 tx_ring->flags |=
1281 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1282 else if (insertion_support->outer &
1283 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1284 tx_ring->flags |=
1285 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1286 } else if (insertion_support->inner) {
1287 if (insertion_support->inner &
1288 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1289 tx_ring->flags |=
1290 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1291 else if (insertion_support->inner &
1292 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1293 tx_ring->flags |=
1294 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1295 }
1296 }
1297 }
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static int iavf_alloc_queues(struct iavf_adapter *adapter)
1309{
1310 int i, num_active_queues;
1311
1312
1313
1314
1315
1316
1317 if (adapter->num_req_queues)
1318 num_active_queues = adapter->num_req_queues;
1319 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1320 adapter->num_tc)
1321 num_active_queues = adapter->ch_config.total_qps;
1322 else
1323 num_active_queues = min_t(int,
1324 adapter->vsi_res->num_queue_pairs,
1325 (int)(num_online_cpus()));
1326
1327
1328 adapter->tx_rings = kcalloc(num_active_queues,
1329 sizeof(struct iavf_ring), GFP_KERNEL);
1330 if (!adapter->tx_rings)
1331 goto err_out;
1332 adapter->rx_rings = kcalloc(num_active_queues,
1333 sizeof(struct iavf_ring), GFP_KERNEL);
1334 if (!adapter->rx_rings)
1335 goto err_out;
1336
1337 for (i = 0; i < num_active_queues; i++) {
1338 struct iavf_ring *tx_ring;
1339 struct iavf_ring *rx_ring;
1340
1341 tx_ring = &adapter->tx_rings[i];
1342
1343 tx_ring->queue_index = i;
1344 tx_ring->netdev = adapter->netdev;
1345 tx_ring->dev = &adapter->pdev->dev;
1346 tx_ring->count = adapter->tx_desc_count;
1347 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1348 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1349 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1350
1351 rx_ring = &adapter->rx_rings[i];
1352 rx_ring->queue_index = i;
1353 rx_ring->netdev = adapter->netdev;
1354 rx_ring->dev = &adapter->pdev->dev;
1355 rx_ring->count = adapter->rx_desc_count;
1356 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1357 }
1358
1359 adapter->num_active_queues = num_active_queues;
1360
1361 iavf_set_queue_vlan_tag_loc(adapter);
1362
1363 return 0;
1364
1365err_out:
1366 iavf_free_queues(adapter);
1367 return -ENOMEM;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1378{
1379 int vector, v_budget;
1380 int pairs = 0;
1381 int err = 0;
1382
1383 if (!adapter->vsi_res) {
1384 err = -EIO;
1385 goto out;
1386 }
1387 pairs = adapter->num_active_queues;
1388
1389
1390
1391
1392
1393
1394 v_budget = min_t(int, pairs + NONQ_VECS,
1395 (int)adapter->vf_res->max_vectors);
1396
1397 adapter->msix_entries = kcalloc(v_budget,
1398 sizeof(struct msix_entry), GFP_KERNEL);
1399 if (!adapter->msix_entries) {
1400 err = -ENOMEM;
1401 goto out;
1402 }
1403
1404 for (vector = 0; vector < v_budget; vector++)
1405 adapter->msix_entries[vector].entry = vector;
1406
1407 err = iavf_acquire_msix_vectors(adapter, v_budget);
1408
1409out:
1410 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1411 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1412 return err;
1413}
1414
1415
1416
1417
1418
1419
1420
1421static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1422{
1423 struct iavf_aqc_get_set_rss_key_data *rss_key =
1424 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1425 struct iavf_hw *hw = &adapter->hw;
1426 int ret = 0;
1427
1428 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1429
1430 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1431 adapter->current_op);
1432 return -EBUSY;
1433 }
1434
1435 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1436 if (ret) {
1437 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1438 iavf_stat_str(hw, ret),
1439 iavf_aq_str(hw, hw->aq.asq_last_status));
1440 return ret;
1441
1442 }
1443
1444 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1445 adapter->rss_lut, adapter->rss_lut_size);
1446 if (ret) {
1447 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1448 iavf_stat_str(hw, ret),
1449 iavf_aq_str(hw, hw->aq.asq_last_status));
1450 }
1451
1452 return ret;
1453
1454}
1455
1456
1457
1458
1459
1460
1461
1462static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1463{
1464 struct iavf_hw *hw = &adapter->hw;
1465 u32 *dw;
1466 u16 i;
1467
1468 dw = (u32 *)adapter->rss_key;
1469 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1470 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1471
1472 dw = (u32 *)adapter->rss_lut;
1473 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1474 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1475
1476 iavf_flush(hw);
1477
1478 return 0;
1479}
1480
1481
1482
1483
1484
1485
1486
1487int iavf_config_rss(struct iavf_adapter *adapter)
1488{
1489
1490 if (RSS_PF(adapter)) {
1491 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1492 IAVF_FLAG_AQ_SET_RSS_KEY;
1493 return 0;
1494 } else if (RSS_AQ(adapter)) {
1495 return iavf_config_rss_aq(adapter);
1496 } else {
1497 return iavf_config_rss_reg(adapter);
1498 }
1499}
1500
1501
1502
1503
1504
1505static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1506{
1507 u16 i;
1508
1509 for (i = 0; i < adapter->rss_lut_size; i++)
1510 adapter->rss_lut[i] = i % adapter->num_active_queues;
1511}
1512
1513
1514
1515
1516
1517
1518
1519static int iavf_init_rss(struct iavf_adapter *adapter)
1520{
1521 struct iavf_hw *hw = &adapter->hw;
1522 int ret;
1523
1524 if (!RSS_PF(adapter)) {
1525
1526 if (adapter->vf_res->vf_cap_flags &
1527 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1528 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1529 else
1530 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1531
1532 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1533 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1534 }
1535
1536 iavf_fill_rss_lut(adapter);
1537 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1538 ret = iavf_config_rss(adapter);
1539
1540 return ret;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1551{
1552 int q_idx = 0, num_q_vectors;
1553 struct iavf_q_vector *q_vector;
1554
1555 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1556 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1557 GFP_KERNEL);
1558 if (!adapter->q_vectors)
1559 return -ENOMEM;
1560
1561 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1562 q_vector = &adapter->q_vectors[q_idx];
1563 q_vector->adapter = adapter;
1564 q_vector->vsi = &adapter->vsi;
1565 q_vector->v_idx = q_idx;
1566 q_vector->reg_idx = q_idx;
1567 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1568 netif_napi_add(adapter->netdev, &q_vector->napi,
1569 iavf_napi_poll, NAPI_POLL_WEIGHT);
1570 }
1571
1572 return 0;
1573}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1584{
1585 int q_idx, num_q_vectors;
1586 int napi_vectors;
1587
1588 if (!adapter->q_vectors)
1589 return;
1590
1591 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1592 napi_vectors = adapter->num_active_queues;
1593
1594 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1595 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1596
1597 if (q_idx < napi_vectors)
1598 netif_napi_del(&q_vector->napi);
1599 }
1600 kfree(adapter->q_vectors);
1601 adapter->q_vectors = NULL;
1602}
1603
1604
1605
1606
1607
1608
1609void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1610{
1611 if (!adapter->msix_entries)
1612 return;
1613
1614 pci_disable_msix(adapter->pdev);
1615 kfree(adapter->msix_entries);
1616 adapter->msix_entries = NULL;
1617}
1618
1619
1620
1621
1622
1623
1624int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1625{
1626 int err;
1627
1628 err = iavf_alloc_queues(adapter);
1629 if (err) {
1630 dev_err(&adapter->pdev->dev,
1631 "Unable to allocate memory for queues\n");
1632 goto err_alloc_queues;
1633 }
1634
1635 rtnl_lock();
1636 err = iavf_set_interrupt_capability(adapter);
1637 rtnl_unlock();
1638 if (err) {
1639 dev_err(&adapter->pdev->dev,
1640 "Unable to setup interrupt capabilities\n");
1641 goto err_set_interrupt;
1642 }
1643
1644 err = iavf_alloc_q_vectors(adapter);
1645 if (err) {
1646 dev_err(&adapter->pdev->dev,
1647 "Unable to allocate memory for queue vectors\n");
1648 goto err_alloc_q_vectors;
1649 }
1650
1651
1652
1653
1654
1655
1656 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1657 adapter->num_tc)
1658 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1659 adapter->num_tc);
1660
1661 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1662 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1663 adapter->num_active_queues);
1664
1665 return 0;
1666err_alloc_q_vectors:
1667 iavf_reset_interrupt_capability(adapter);
1668err_set_interrupt:
1669 iavf_free_queues(adapter);
1670err_alloc_queues:
1671 return err;
1672}
1673
1674
1675
1676
1677
1678static void iavf_free_rss(struct iavf_adapter *adapter)
1679{
1680 kfree(adapter->rss_key);
1681 adapter->rss_key = NULL;
1682
1683 kfree(adapter->rss_lut);
1684 adapter->rss_lut = NULL;
1685}
1686
1687
1688
1689
1690
1691
1692
1693static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1694{
1695 struct net_device *netdev = adapter->netdev;
1696 int err;
1697
1698 if (netif_running(netdev))
1699 iavf_free_traffic_irqs(adapter);
1700 iavf_free_misc_irq(adapter);
1701 iavf_reset_interrupt_capability(adapter);
1702 iavf_free_q_vectors(adapter);
1703 iavf_free_queues(adapter);
1704
1705 err = iavf_init_interrupt_scheme(adapter);
1706 if (err)
1707 goto err;
1708
1709 netif_tx_stop_all_queues(netdev);
1710
1711 err = iavf_request_misc_irq(adapter);
1712 if (err)
1713 goto err;
1714
1715 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1716
1717 iavf_map_rings_to_vectors(adapter);
1718err:
1719 return err;
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731static int iavf_process_aq_command(struct iavf_adapter *adapter)
1732{
1733 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1734 return iavf_send_vf_config_msg(adapter);
1735 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
1736 return iavf_send_vf_offload_vlan_v2_msg(adapter);
1737 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1738 iavf_disable_queues(adapter);
1739 return 0;
1740 }
1741
1742 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1743 iavf_map_queues(adapter);
1744 return 0;
1745 }
1746
1747 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1748 iavf_add_ether_addrs(adapter);
1749 return 0;
1750 }
1751
1752 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1753 iavf_add_vlans(adapter);
1754 return 0;
1755 }
1756
1757 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1758 iavf_del_ether_addrs(adapter);
1759 return 0;
1760 }
1761
1762 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1763 iavf_del_vlans(adapter);
1764 return 0;
1765 }
1766
1767 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1768 iavf_enable_vlan_stripping(adapter);
1769 return 0;
1770 }
1771
1772 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1773 iavf_disable_vlan_stripping(adapter);
1774 return 0;
1775 }
1776
1777 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1778 iavf_configure_queues(adapter);
1779 return 0;
1780 }
1781
1782 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1783 iavf_enable_queues(adapter);
1784 return 0;
1785 }
1786
1787 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1788
1789
1790
1791
1792 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1793 return 0;
1794 }
1795 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1796 iavf_get_hena(adapter);
1797 return 0;
1798 }
1799 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1800 iavf_set_hena(adapter);
1801 return 0;
1802 }
1803 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1804 iavf_set_rss_key(adapter);
1805 return 0;
1806 }
1807 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1808 iavf_set_rss_lut(adapter);
1809 return 0;
1810 }
1811
1812 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1813 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1814 FLAG_VF_MULTICAST_PROMISC);
1815 return 0;
1816 }
1817
1818 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1819 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1820 return 0;
1821 }
1822 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
1823 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1824 iavf_set_promiscuous(adapter, 0);
1825 return 0;
1826 }
1827
1828 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1829 iavf_enable_channels(adapter);
1830 return 0;
1831 }
1832
1833 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1834 iavf_disable_channels(adapter);
1835 return 0;
1836 }
1837 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1838 iavf_add_cloud_filter(adapter);
1839 return 0;
1840 }
1841
1842 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1843 iavf_del_cloud_filter(adapter);
1844 return 0;
1845 }
1846 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1847 iavf_del_cloud_filter(adapter);
1848 return 0;
1849 }
1850 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1851 iavf_add_cloud_filter(adapter);
1852 return 0;
1853 }
1854 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
1855 iavf_add_fdir_filter(adapter);
1856 return IAVF_SUCCESS;
1857 }
1858 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
1859 iavf_del_fdir_filter(adapter);
1860 return IAVF_SUCCESS;
1861 }
1862 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
1863 iavf_add_adv_rss_cfg(adapter);
1864 return 0;
1865 }
1866 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
1867 iavf_del_adv_rss_cfg(adapter);
1868 return 0;
1869 }
1870 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
1871 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
1872 return 0;
1873 }
1874 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
1875 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
1876 return 0;
1877 }
1878 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
1879 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
1880 return 0;
1881 }
1882 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
1883 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
1884 return 0;
1885 }
1886 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
1887 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
1888 return 0;
1889 }
1890 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
1891 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
1892 return 0;
1893 }
1894 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
1895 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
1896 return 0;
1897 }
1898 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
1899 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
1900 return 0;
1901 }
1902
1903 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
1904 iavf_request_stats(adapter);
1905 return 0;
1906 }
1907
1908 return -EAGAIN;
1909}
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922void
1923iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
1924 netdev_features_t prev_features,
1925 netdev_features_t features)
1926{
1927 bool enable_stripping = true, enable_insertion = true;
1928 u16 vlan_ethertype = 0;
1929 u64 aq_required = 0;
1930
1931
1932
1933
1934
1935
1936
1937 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
1938 vlan_ethertype = ETH_P_8021AD;
1939 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
1940 vlan_ethertype = ETH_P_8021Q;
1941 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
1942 vlan_ethertype = ETH_P_8021AD;
1943 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
1944 vlan_ethertype = ETH_P_8021Q;
1945 else
1946 vlan_ethertype = ETH_P_8021Q;
1947
1948 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
1949 enable_stripping = false;
1950 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
1951 enable_insertion = false;
1952
1953 if (VLAN_ALLOWED(adapter)) {
1954
1955
1956
1957
1958 if (enable_stripping)
1959 aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1960 else
1961 aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1962
1963 } else if (VLAN_V2_ALLOWED(adapter)) {
1964 switch (vlan_ethertype) {
1965 case ETH_P_8021Q:
1966 if (enable_stripping)
1967 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1968 else
1969 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1970
1971 if (enable_insertion)
1972 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1973 else
1974 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1975 break;
1976 case ETH_P_8021AD:
1977 if (enable_stripping)
1978 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1979 else
1980 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1981
1982 if (enable_insertion)
1983 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1984 else
1985 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1986 break;
1987 }
1988 }
1989
1990 if (aq_required) {
1991 adapter->aq_required |= aq_required;
1992 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1993 }
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004static void iavf_startup(struct iavf_adapter *adapter)
2005{
2006 struct pci_dev *pdev = adapter->pdev;
2007 struct iavf_hw *hw = &adapter->hw;
2008 int err;
2009
2010 WARN_ON(adapter->state != __IAVF_STARTUP);
2011
2012
2013 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2014 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2015 err = iavf_set_mac_type(hw);
2016 if (err) {
2017 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
2018 goto err;
2019 }
2020
2021 err = iavf_check_reset_complete(hw);
2022 if (err) {
2023 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2024 err);
2025 goto err;
2026 }
2027 hw->aq.num_arq_entries = IAVF_AQ_LEN;
2028 hw->aq.num_asq_entries = IAVF_AQ_LEN;
2029 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2030 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2031
2032 err = iavf_init_adminq(hw);
2033 if (err) {
2034 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
2035 goto err;
2036 }
2037 err = iavf_send_api_ver(adapter);
2038 if (err) {
2039 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
2040 iavf_shutdown_adminq(hw);
2041 goto err;
2042 }
2043 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2044 return;
2045err:
2046 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057static void iavf_init_version_check(struct iavf_adapter *adapter)
2058{
2059 struct pci_dev *pdev = adapter->pdev;
2060 struct iavf_hw *hw = &adapter->hw;
2061 int err = -EAGAIN;
2062
2063 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2064
2065 if (!iavf_asq_done(hw)) {
2066 dev_err(&pdev->dev, "Admin queue command never completed\n");
2067 iavf_shutdown_adminq(hw);
2068 iavf_change_state(adapter, __IAVF_STARTUP);
2069 goto err;
2070 }
2071
2072
2073 err = iavf_verify_api_ver(adapter);
2074 if (err) {
2075 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
2076 err = iavf_send_api_ver(adapter);
2077 else
2078 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2079 adapter->pf_version.major,
2080 adapter->pf_version.minor,
2081 VIRTCHNL_VERSION_MAJOR,
2082 VIRTCHNL_VERSION_MINOR);
2083 goto err;
2084 }
2085 err = iavf_send_vf_config_msg(adapter);
2086 if (err) {
2087 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2088 err);
2089 goto err;
2090 }
2091 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2092 return;
2093err:
2094 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2095}
2096
2097
2098
2099
2100
2101int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2102{
2103 int i, num_req_queues = adapter->num_req_queues;
2104 struct iavf_vsi *vsi = &adapter->vsi;
2105
2106 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2107 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2108 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2109 }
2110 if (!adapter->vsi_res) {
2111 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2112 return -ENODEV;
2113 }
2114
2115 if (num_req_queues &&
2116 num_req_queues > adapter->vsi_res->num_queue_pairs) {
2117
2118
2119
2120
2121 dev_err(&adapter->pdev->dev,
2122 "Requested %d queues, but PF only gave us %d.\n",
2123 num_req_queues,
2124 adapter->vsi_res->num_queue_pairs);
2125 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2126 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2127 iavf_schedule_reset(adapter);
2128
2129 return -EAGAIN;
2130 }
2131 adapter->num_req_queues = 0;
2132 adapter->vsi.id = adapter->vsi_res->vsi_id;
2133
2134 adapter->vsi.back = adapter;
2135 adapter->vsi.base_vector = 1;
2136 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
2137 vsi->netdev = adapter->netdev;
2138 vsi->qs_handle = adapter->vsi_res->qset_handle;
2139 if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2140 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2141 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2142 } else {
2143 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2144 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2145 }
2146
2147 return 0;
2148}
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159static void iavf_init_get_resources(struct iavf_adapter *adapter)
2160{
2161 struct pci_dev *pdev = adapter->pdev;
2162 struct iavf_hw *hw = &adapter->hw;
2163 int err;
2164
2165 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2166
2167 if (!adapter->vf_res) {
2168 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2169 GFP_KERNEL);
2170 if (!adapter->vf_res) {
2171 err = -ENOMEM;
2172 goto err;
2173 }
2174 }
2175 err = iavf_get_vf_config(adapter);
2176 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
2177 err = iavf_send_vf_config_msg(adapter);
2178 goto err_alloc;
2179 } else if (err == IAVF_ERR_PARAM) {
2180
2181
2182
2183
2184 iavf_shutdown_adminq(hw);
2185 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2186 return;
2187 }
2188 if (err) {
2189 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2190 goto err_alloc;
2191 }
2192
2193 err = iavf_parse_vf_resource_msg(adapter);
2194 if (err)
2195 goto err_alloc;
2196
2197 err = iavf_send_vf_offload_vlan_v2_msg(adapter);
2198 if (err == -EOPNOTSUPP) {
2199
2200
2201
2202 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2203 return;
2204 } else if (err) {
2205 dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n",
2206 err);
2207 goto err_alloc;
2208 }
2209
2210
2211
2212
2213 iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
2214 return;
2215
2216err_alloc:
2217 kfree(adapter->vf_res);
2218 adapter->vf_res = NULL;
2219err:
2220 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2221}
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2232{
2233 int ret;
2234
2235 WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
2236
2237 memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2238
2239 ret = iavf_get_vf_vlan_v2_caps(adapter);
2240 if (ret) {
2241 if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
2242 iavf_send_vf_offload_vlan_v2_msg(adapter);
2243 goto err;
2244 }
2245
2246 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2247 return;
2248err:
2249 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
2262 struct pci_dev *pdev = adapter->pdev;
2263 int err;
2264
2265 WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2266
2267 if (iavf_process_config(adapter))
2268 goto err;
2269
2270 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2271
2272 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2273
2274 netdev->netdev_ops = &iavf_netdev_ops;
2275 iavf_set_ethtool_ops(netdev);
2276 netdev->watchdog_timeo = 5 * HZ;
2277
2278
2279 netdev->min_mtu = ETH_MIN_MTU;
2280 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2281
2282 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2283 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2284 adapter->hw.mac.addr);
2285 eth_hw_addr_random(netdev);
2286 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2287 } else {
2288 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2289 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2290 }
2291
2292 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2293 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2294 err = iavf_init_interrupt_scheme(adapter);
2295 if (err)
2296 goto err_sw_init;
2297 iavf_map_rings_to_vectors(adapter);
2298 if (adapter->vf_res->vf_cap_flags &
2299 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2300 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2301
2302 err = iavf_request_misc_irq(adapter);
2303 if (err)
2304 goto err_sw_init;
2305
2306 netif_carrier_off(netdev);
2307 adapter->link_up = false;
2308
2309
2310
2311
2312 rtnl_lock();
2313 if (!adapter->netdev_registered) {
2314 err = register_netdevice(netdev);
2315 if (err) {
2316 rtnl_unlock();
2317 goto err_register;
2318 }
2319 }
2320
2321 adapter->netdev_registered = true;
2322
2323 netif_tx_stop_all_queues(netdev);
2324 if (CLIENT_ALLOWED(adapter)) {
2325 err = iavf_lan_add_device(adapter);
2326 if (err)
2327 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2328 err);
2329 }
2330 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2331 if (netdev->features & NETIF_F_GRO)
2332 dev_info(&pdev->dev, "GRO is enabled\n");
2333
2334 iavf_change_state(adapter, __IAVF_DOWN);
2335 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2336 rtnl_unlock();
2337
2338 iavf_misc_irq_enable(adapter);
2339 wake_up(&adapter->down_waitqueue);
2340
2341 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2342 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2343 if (!adapter->rss_key || !adapter->rss_lut) {
2344 err = -ENOMEM;
2345 goto err_mem;
2346 }
2347 if (RSS_AQ(adapter))
2348 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2349 else
2350 iavf_init_rss(adapter);
2351
2352 if (VLAN_V2_ALLOWED(adapter))
2353
2354 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2355
2356 return;
2357err_mem:
2358 iavf_free_rss(adapter);
2359err_register:
2360 iavf_free_misc_irq(adapter);
2361err_sw_init:
2362 iavf_reset_interrupt_capability(adapter);
2363err:
2364 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2365}
2366
2367
2368
2369
2370
2371static void iavf_watchdog_task(struct work_struct *work)
2372{
2373 struct iavf_adapter *adapter = container_of(work,
2374 struct iavf_adapter,
2375 watchdog_task.work);
2376 struct iavf_hw *hw = &adapter->hw;
2377 u32 reg_val;
2378
2379 if (!mutex_trylock(&adapter->crit_lock)) {
2380 if (adapter->state == __IAVF_REMOVE)
2381 return;
2382
2383 goto restart_watchdog;
2384 }
2385
2386 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2387 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2388
2389 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2390 adapter->aq_required = 0;
2391 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2392 mutex_unlock(&adapter->crit_lock);
2393 queue_work(iavf_wq, &adapter->reset_task);
2394 return;
2395 }
2396
2397 switch (adapter->state) {
2398 case __IAVF_STARTUP:
2399 iavf_startup(adapter);
2400 mutex_unlock(&adapter->crit_lock);
2401 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2402 msecs_to_jiffies(30));
2403 return;
2404 case __IAVF_INIT_VERSION_CHECK:
2405 iavf_init_version_check(adapter);
2406 mutex_unlock(&adapter->crit_lock);
2407 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2408 msecs_to_jiffies(30));
2409 return;
2410 case __IAVF_INIT_GET_RESOURCES:
2411 iavf_init_get_resources(adapter);
2412 mutex_unlock(&adapter->crit_lock);
2413 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2414 msecs_to_jiffies(1));
2415 return;
2416 case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS:
2417 iavf_init_get_offload_vlan_v2_caps(adapter);
2418 mutex_unlock(&adapter->crit_lock);
2419 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2420 msecs_to_jiffies(1));
2421 return;
2422 case __IAVF_INIT_CONFIG_ADAPTER:
2423 iavf_init_config_adapter(adapter);
2424 mutex_unlock(&adapter->crit_lock);
2425 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2426 msecs_to_jiffies(1));
2427 return;
2428 case __IAVF_INIT_FAILED:
2429 if (test_bit(__IAVF_IN_REMOVE_TASK,
2430 &adapter->crit_section)) {
2431
2432
2433
2434
2435 mutex_unlock(&adapter->crit_lock);
2436 return;
2437 }
2438 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2439 dev_err(&adapter->pdev->dev,
2440 "Failed to communicate with PF; waiting before retry\n");
2441 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2442 iavf_shutdown_adminq(hw);
2443 mutex_unlock(&adapter->crit_lock);
2444 queue_delayed_work(iavf_wq,
2445 &adapter->watchdog_task, (5 * HZ));
2446 return;
2447 }
2448
2449 iavf_change_state(adapter, adapter->last_state);
2450 mutex_unlock(&adapter->crit_lock);
2451 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2452 return;
2453 case __IAVF_COMM_FAILED:
2454 if (test_bit(__IAVF_IN_REMOVE_TASK,
2455 &adapter->crit_section)) {
2456
2457
2458
2459
2460 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2461 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2462 mutex_unlock(&adapter->crit_lock);
2463 return;
2464 }
2465 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2466 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2467 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2468 reg_val == VIRTCHNL_VFR_COMPLETED) {
2469
2470 dev_err(&adapter->pdev->dev,
2471 "Hardware came out of reset. Attempting reinit.\n");
2472
2473
2474
2475
2476 iavf_change_state(adapter, __IAVF_STARTUP);
2477 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2478 }
2479 adapter->aq_required = 0;
2480 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2481 mutex_unlock(&adapter->crit_lock);
2482 queue_delayed_work(iavf_wq,
2483 &adapter->watchdog_task,
2484 msecs_to_jiffies(10));
2485 return;
2486 case __IAVF_RESETTING:
2487 mutex_unlock(&adapter->crit_lock);
2488 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2489 return;
2490 case __IAVF_DOWN:
2491 case __IAVF_DOWN_PENDING:
2492 case __IAVF_TESTING:
2493 case __IAVF_RUNNING:
2494 if (adapter->current_op) {
2495 if (!iavf_asq_done(hw)) {
2496 dev_dbg(&adapter->pdev->dev,
2497 "Admin queue timeout\n");
2498 iavf_send_api_ver(adapter);
2499 }
2500 } else {
2501 int ret = iavf_process_aq_command(adapter);
2502
2503
2504
2505
2506
2507 if (ret && ret != -EOPNOTSUPP &&
2508 adapter->state == __IAVF_RUNNING)
2509 iavf_request_stats(adapter);
2510 }
2511 if (adapter->state == __IAVF_RUNNING)
2512 iavf_detect_recover_hung(&adapter->vsi);
2513 break;
2514 case __IAVF_REMOVE:
2515 default:
2516 mutex_unlock(&adapter->crit_lock);
2517 return;
2518 }
2519
2520
2521 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2522 if (!reg_val) {
2523 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2524 adapter->aq_required = 0;
2525 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2526 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2527 queue_work(iavf_wq, &adapter->reset_task);
2528 mutex_unlock(&adapter->crit_lock);
2529 queue_delayed_work(iavf_wq,
2530 &adapter->watchdog_task, HZ * 2);
2531 return;
2532 }
2533
2534 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2535 mutex_unlock(&adapter->crit_lock);
2536restart_watchdog:
2537 if (adapter->state >= __IAVF_DOWN)
2538 queue_work(iavf_wq, &adapter->adminq_task);
2539 if (adapter->aq_required)
2540 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2541 msecs_to_jiffies(20));
2542 else
2543 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2544}
2545
2546
2547
2548
2549
2550
2551
2552
2553static void iavf_disable_vf(struct iavf_adapter *adapter)
2554{
2555 struct iavf_mac_filter *f, *ftmp;
2556 struct iavf_vlan_filter *fv, *fvtmp;
2557 struct iavf_cloud_filter *cf, *cftmp;
2558
2559 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2560
2561
2562
2563
2564
2565 if (adapter->state == __IAVF_RUNNING) {
2566 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2567 netif_carrier_off(adapter->netdev);
2568 netif_tx_disable(adapter->netdev);
2569 adapter->link_up = false;
2570 iavf_napi_disable_all(adapter);
2571 iavf_irq_disable(adapter);
2572 iavf_free_traffic_irqs(adapter);
2573 iavf_free_all_tx_resources(adapter);
2574 iavf_free_all_rx_resources(adapter);
2575 }
2576
2577 spin_lock_bh(&adapter->mac_vlan_list_lock);
2578
2579
2580 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2581 list_del(&f->list);
2582 kfree(f);
2583 }
2584
2585 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2586 list_del(&fv->list);
2587 kfree(fv);
2588 }
2589
2590 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2591
2592 spin_lock_bh(&adapter->cloud_filter_list_lock);
2593 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2594 list_del(&cf->list);
2595 kfree(cf);
2596 adapter->num_cloud_filters--;
2597 }
2598 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2599
2600 iavf_free_misc_irq(adapter);
2601 iavf_reset_interrupt_capability(adapter);
2602 iavf_free_q_vectors(adapter);
2603 iavf_free_queues(adapter);
2604 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2605 iavf_shutdown_adminq(&adapter->hw);
2606 adapter->netdev->flags &= ~IFF_UP;
2607 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2608 iavf_change_state(adapter, __IAVF_DOWN);
2609 wake_up(&adapter->down_waitqueue);
2610 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2611}
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621static void iavf_reset_task(struct work_struct *work)
2622{
2623 struct iavf_adapter *adapter = container_of(work,
2624 struct iavf_adapter,
2625 reset_task);
2626 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2627 struct net_device *netdev = adapter->netdev;
2628 struct iavf_hw *hw = &adapter->hw;
2629 struct iavf_mac_filter *f, *ftmp;
2630 struct iavf_cloud_filter *cf;
2631 u32 reg_val;
2632 int i = 0, err;
2633 bool running;
2634
2635
2636
2637
2638 if (!mutex_trylock(&adapter->crit_lock)) {
2639 if (adapter->state != __IAVF_REMOVE)
2640 queue_work(iavf_wq, &adapter->reset_task);
2641
2642 return;
2643 }
2644
2645 while (!mutex_trylock(&adapter->client_lock))
2646 usleep_range(500, 1000);
2647 if (CLIENT_ENABLED(adapter)) {
2648 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2649 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2650 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2651 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2652 cancel_delayed_work_sync(&adapter->client_task);
2653 iavf_notify_client_close(&adapter->vsi, true);
2654 }
2655 iavf_misc_irq_disable(adapter);
2656 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2657 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2658
2659
2660
2661 iavf_shutdown_adminq(hw);
2662 iavf_init_adminq(hw);
2663 iavf_request_reset(adapter);
2664 }
2665 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2666
2667
2668 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2669 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2670 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2671 if (!reg_val)
2672 break;
2673 usleep_range(5000, 10000);
2674 }
2675 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2676 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2677 goto continue_reset;
2678 }
2679
2680
2681 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2682
2683 msleep(IAVF_RESET_WAIT_MS);
2684
2685 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2686 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2687 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2688 break;
2689 }
2690
2691 pci_set_master(adapter->pdev);
2692 pci_restore_msi_state(adapter->pdev);
2693
2694 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2695 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2696 reg_val);
2697 iavf_disable_vf(adapter);
2698 mutex_unlock(&adapter->client_lock);
2699 mutex_unlock(&adapter->crit_lock);
2700 return;
2701 }
2702
2703continue_reset:
2704
2705
2706
2707
2708 running = adapter->state == __IAVF_RUNNING;
2709
2710 if (running) {
2711 netdev->flags &= ~IFF_UP;
2712 netif_carrier_off(netdev);
2713 netif_tx_stop_all_queues(netdev);
2714 adapter->link_up = false;
2715 iavf_napi_disable_all(adapter);
2716 }
2717 iavf_irq_disable(adapter);
2718
2719 iavf_change_state(adapter, __IAVF_RESETTING);
2720 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2721
2722
2723
2724
2725 iavf_free_all_rx_resources(adapter);
2726 iavf_free_all_tx_resources(adapter);
2727
2728 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2729
2730 iavf_shutdown_adminq(hw);
2731 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2732 err = iavf_init_adminq(hw);
2733 if (err)
2734 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2735 err);
2736 adapter->aq_required = 0;
2737
2738 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
2739 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
2740 err = iavf_reinit_interrupt_scheme(adapter);
2741 if (err)
2742 goto reset_err;
2743 }
2744
2745 if (RSS_AQ(adapter)) {
2746 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2747 } else {
2748 err = iavf_init_rss(adapter);
2749 if (err)
2750 goto reset_err;
2751 }
2752
2753 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2754
2755
2756
2757
2758
2759
2760 adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
2761 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
2762
2763 spin_lock_bh(&adapter->mac_vlan_list_lock);
2764
2765
2766
2767
2768
2769 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2770 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2771 list_del(&f->list);
2772 kfree(f);
2773 }
2774 }
2775
2776 list_for_each_entry(f, &adapter->mac_filter_list, list) {
2777 f->add = true;
2778 }
2779 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2780
2781
2782 spin_lock_bh(&adapter->cloud_filter_list_lock);
2783 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2784 adapter->num_tc) {
2785 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2786 cf->add = true;
2787 }
2788 }
2789 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2790
2791 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2792 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2793 iavf_misc_irq_enable(adapter);
2794
2795 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
2796
2797
2798
2799
2800 if (running) {
2801
2802 err = iavf_setup_all_tx_resources(adapter);
2803 if (err)
2804 goto reset_err;
2805
2806
2807 err = iavf_setup_all_rx_resources(adapter);
2808 if (err)
2809 goto reset_err;
2810
2811 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
2812 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
2813 err = iavf_request_traffic_irqs(adapter, netdev->name);
2814 if (err)
2815 goto reset_err;
2816
2817 adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
2818 }
2819
2820 iavf_configure(adapter);
2821
2822
2823
2824
2825 iavf_up_complete(adapter);
2826 netdev->flags |= IFF_UP;
2827 iavf_irq_enable(adapter, true);
2828 } else {
2829 iavf_change_state(adapter, __IAVF_DOWN);
2830 wake_up(&adapter->down_waitqueue);
2831 }
2832
2833 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2834
2835 mutex_unlock(&adapter->client_lock);
2836 mutex_unlock(&adapter->crit_lock);
2837
2838 return;
2839reset_err:
2840 mutex_unlock(&adapter->client_lock);
2841 mutex_unlock(&adapter->crit_lock);
2842 if (running) {
2843 iavf_change_state(adapter, __IAVF_RUNNING);
2844 netdev->flags |= IFF_UP;
2845 }
2846 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2847 iavf_close(netdev);
2848}
2849
2850
2851
2852
2853
2854static void iavf_adminq_task(struct work_struct *work)
2855{
2856 struct iavf_adapter *adapter =
2857 container_of(work, struct iavf_adapter, adminq_task);
2858 struct iavf_hw *hw = &adapter->hw;
2859 struct iavf_arq_event_info event;
2860 enum virtchnl_ops v_op;
2861 enum iavf_status ret, v_ret;
2862 u32 val, oldval;
2863 u16 pending;
2864
2865 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2866 goto out;
2867
2868 if (!mutex_trylock(&adapter->crit_lock)) {
2869 if (adapter->state == __IAVF_REMOVE)
2870 return;
2871
2872 queue_work(iavf_wq, &adapter->adminq_task);
2873 goto out;
2874 }
2875
2876 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2877 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2878 if (!event.msg_buf)
2879 goto out;
2880
2881 do {
2882 ret = iavf_clean_arq_element(hw, &event, &pending);
2883 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2884 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
2885
2886 if (ret || !v_op)
2887 break;
2888
2889 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2890 event.msg_len);
2891 if (pending != 0)
2892 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2893 } while (pending);
2894 mutex_unlock(&adapter->crit_lock);
2895
2896 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
2897 if (adapter->netdev_registered ||
2898 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
2899 struct net_device *netdev = adapter->netdev;
2900
2901 rtnl_lock();
2902 netdev_update_features(netdev);
2903 rtnl_unlock();
2904
2905 if (VLAN_V2_ALLOWED(adapter))
2906 iavf_set_vlan_offload_features
2907 (adapter, 0, netdev->features);
2908
2909 iavf_set_queue_vlan_tag_loc(adapter);
2910 }
2911
2912 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2913 }
2914 if ((adapter->flags &
2915 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2916 adapter->state == __IAVF_RESETTING)
2917 goto freedom;
2918
2919
2920 val = rd32(hw, hw->aq.arq.len);
2921 if (val == 0xdeadbeef || val == 0xffffffff)
2922 goto freedom;
2923 oldval = val;
2924 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2925 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2926 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2927 }
2928 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2929 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2930 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2931 }
2932 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2933 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2934 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2935 }
2936 if (oldval != val)
2937 wr32(hw, hw->aq.arq.len, val);
2938
2939 val = rd32(hw, hw->aq.asq.len);
2940 oldval = val;
2941 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2942 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2943 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2944 }
2945 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2946 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2947 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2948 }
2949 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2950 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2951 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2952 }
2953 if (oldval != val)
2954 wr32(hw, hw->aq.asq.len, val);
2955
2956freedom:
2957 kfree(event.msg_buf);
2958out:
2959
2960 iavf_misc_irq_enable(adapter);
2961}
2962
2963
2964
2965
2966
2967
2968
2969
2970static void iavf_client_task(struct work_struct *work)
2971{
2972 struct iavf_adapter *adapter =
2973 container_of(work, struct iavf_adapter, client_task.work);
2974
2975
2976
2977
2978
2979 if (!mutex_trylock(&adapter->client_lock))
2980 return;
2981
2982 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2983 iavf_client_subtask(adapter);
2984 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2985 goto out;
2986 }
2987 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2988 iavf_notify_client_l2_params(&adapter->vsi);
2989 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2990 goto out;
2991 }
2992 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2993 iavf_notify_client_close(&adapter->vsi, false);
2994 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2995 goto out;
2996 }
2997 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2998 iavf_notify_client_open(&adapter->vsi);
2999 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3000 }
3001out:
3002 mutex_unlock(&adapter->client_lock);
3003}
3004
3005
3006
3007
3008
3009
3010
3011void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3012{
3013 int i;
3014
3015 if (!adapter->tx_rings)
3016 return;
3017
3018 for (i = 0; i < adapter->num_active_queues; i++)
3019 if (adapter->tx_rings[i].desc)
3020 iavf_free_tx_resources(&adapter->tx_rings[i]);
3021}
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3034{
3035 int i, err = 0;
3036
3037 for (i = 0; i < adapter->num_active_queues; i++) {
3038 adapter->tx_rings[i].count = adapter->tx_desc_count;
3039 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3040 if (!err)
3041 continue;
3042 dev_err(&adapter->pdev->dev,
3043 "Allocation for Tx Queue %u failed\n", i);
3044 break;
3045 }
3046
3047 return err;
3048}
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3061{
3062 int i, err = 0;
3063
3064 for (i = 0; i < adapter->num_active_queues; i++) {
3065 adapter->rx_rings[i].count = adapter->rx_desc_count;
3066 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3067 if (!err)
3068 continue;
3069 dev_err(&adapter->pdev->dev,
3070 "Allocation for Rx Queue %u failed\n", i);
3071 break;
3072 }
3073 return err;
3074}
3075
3076
3077
3078
3079
3080
3081
3082void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3083{
3084 int i;
3085
3086 if (!adapter->rx_rings)
3087 return;
3088
3089 for (i = 0; i < adapter->num_active_queues; i++)
3090 if (adapter->rx_rings[i].desc)
3091 iavf_free_rx_resources(&adapter->rx_rings[i]);
3092}
3093
3094
3095
3096
3097
3098
3099static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3100 u64 max_tx_rate)
3101{
3102 int speed = 0, ret = 0;
3103
3104 if (ADV_LINK_SUPPORT(adapter)) {
3105 if (adapter->link_speed_mbps < U32_MAX) {
3106 speed = adapter->link_speed_mbps;
3107 goto validate_bw;
3108 } else {
3109 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3110 return -EINVAL;
3111 }
3112 }
3113
3114 switch (adapter->link_speed) {
3115 case VIRTCHNL_LINK_SPEED_40GB:
3116 speed = SPEED_40000;
3117 break;
3118 case VIRTCHNL_LINK_SPEED_25GB:
3119 speed = SPEED_25000;
3120 break;
3121 case VIRTCHNL_LINK_SPEED_20GB:
3122 speed = SPEED_20000;
3123 break;
3124 case VIRTCHNL_LINK_SPEED_10GB:
3125 speed = SPEED_10000;
3126 break;
3127 case VIRTCHNL_LINK_SPEED_5GB:
3128 speed = SPEED_5000;
3129 break;
3130 case VIRTCHNL_LINK_SPEED_2_5GB:
3131 speed = SPEED_2500;
3132 break;
3133 case VIRTCHNL_LINK_SPEED_1GB:
3134 speed = SPEED_1000;
3135 break;
3136 case VIRTCHNL_LINK_SPEED_100MB:
3137 speed = SPEED_100;
3138 break;
3139 default:
3140 break;
3141 }
3142
3143validate_bw:
3144 if (max_tx_rate > speed) {
3145 dev_err(&adapter->pdev->dev,
3146 "Invalid tx rate specified\n");
3147 ret = -EINVAL;
3148 }
3149
3150 return ret;
3151}
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3163 struct tc_mqprio_qopt_offload *mqprio_qopt)
3164{
3165 u64 total_max_rate = 0;
3166 int i, num_qps = 0;
3167 u64 tx_rate = 0;
3168 int ret = 0;
3169
3170 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3171 mqprio_qopt->qopt.num_tc < 1)
3172 return -EINVAL;
3173
3174 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3175 if (!mqprio_qopt->qopt.count[i] ||
3176 mqprio_qopt->qopt.offset[i] != num_qps)
3177 return -EINVAL;
3178 if (mqprio_qopt->min_rate[i]) {
3179 dev_err(&adapter->pdev->dev,
3180 "Invalid min tx rate (greater than 0) specified\n");
3181 return -EINVAL;
3182 }
3183
3184 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3185 IAVF_MBPS_DIVISOR);
3186 total_max_rate += tx_rate;
3187 num_qps += mqprio_qopt->qopt.count[i];
3188 }
3189 if (num_qps > adapter->num_active_queues) {
3190 dev_err(&adapter->pdev->dev,
3191 "Cannot support requested number of queues\n");
3192 return -EINVAL;
3193 }
3194
3195 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3196 return ret;
3197}
3198
3199
3200
3201
3202
3203static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3204{
3205 struct iavf_cloud_filter *cf, *cftmp;
3206
3207 spin_lock_bh(&adapter->cloud_filter_list_lock);
3208 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3209 list) {
3210 list_del(&cf->list);
3211 kfree(cf);
3212 adapter->num_cloud_filters--;
3213 }
3214 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3215}
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3229{
3230 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3231 struct iavf_adapter *adapter = netdev_priv(netdev);
3232 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3233 u8 num_tc = 0, total_qps = 0;
3234 int ret = 0, netdev_tc = 0;
3235 u64 max_tx_rate;
3236 u16 mode;
3237 int i;
3238
3239 num_tc = mqprio_qopt->qopt.num_tc;
3240 mode = mqprio_qopt->mode;
3241
3242
3243 if (!mqprio_qopt->qopt.hw) {
3244 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3245
3246 netdev_reset_tc(netdev);
3247 adapter->num_tc = 0;
3248 netif_tx_stop_all_queues(netdev);
3249 netif_tx_disable(netdev);
3250 iavf_del_all_cloud_filters(adapter);
3251 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3252 goto exit;
3253 } else {
3254 return -EINVAL;
3255 }
3256 }
3257
3258
3259 if (mode == TC_MQPRIO_MODE_CHANNEL) {
3260 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3261 dev_err(&adapter->pdev->dev, "ADq not supported\n");
3262 return -EOPNOTSUPP;
3263 }
3264 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3265 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3266 return -EINVAL;
3267 }
3268
3269 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3270 if (ret)
3271 return ret;
3272
3273 if (adapter->num_tc == num_tc)
3274 return 0;
3275 adapter->num_tc = num_tc;
3276
3277 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3278 if (i < num_tc) {
3279 adapter->ch_config.ch_info[i].count =
3280 mqprio_qopt->qopt.count[i];
3281 adapter->ch_config.ch_info[i].offset =
3282 mqprio_qopt->qopt.offset[i];
3283 total_qps += mqprio_qopt->qopt.count[i];
3284 max_tx_rate = mqprio_qopt->max_rate[i];
3285
3286 max_tx_rate = div_u64(max_tx_rate,
3287 IAVF_MBPS_DIVISOR);
3288 adapter->ch_config.ch_info[i].max_tx_rate =
3289 max_tx_rate;
3290 } else {
3291 adapter->ch_config.ch_info[i].count = 1;
3292 adapter->ch_config.ch_info[i].offset = 0;
3293 }
3294 }
3295 adapter->ch_config.total_qps = total_qps;
3296 netif_tx_stop_all_queues(netdev);
3297 netif_tx_disable(netdev);
3298 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3299 netdev_reset_tc(netdev);
3300
3301 netdev_set_num_tc(adapter->netdev, num_tc);
3302 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3303 u16 qcount = mqprio_qopt->qopt.count[i];
3304 u16 qoffset = mqprio_qopt->qopt.offset[i];
3305
3306 if (i < num_tc)
3307 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3308 qoffset);
3309 }
3310 }
3311exit:
3312 return ret;
3313}
3314
3315
3316
3317
3318
3319
3320
3321static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3322 struct flow_cls_offload *f,
3323 struct iavf_cloud_filter *filter)
3324{
3325 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3326 struct flow_dissector *dissector = rule->match.dissector;
3327 u16 n_proto_mask = 0;
3328 u16 n_proto_key = 0;
3329 u8 field_flags = 0;
3330 u16 addr_type = 0;
3331 u16 n_proto = 0;
3332 int i = 0;
3333 struct virtchnl_filter *vf = &filter->f;
3334
3335 if (dissector->used_keys &
3336 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3337 BIT(FLOW_DISSECTOR_KEY_BASIC) |
3338 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3339 BIT(FLOW_DISSECTOR_KEY_VLAN) |
3340 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3341 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3342 BIT(FLOW_DISSECTOR_KEY_PORTS) |
3343 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3344 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3345 dissector->used_keys);
3346 return -EOPNOTSUPP;
3347 }
3348
3349 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3350 struct flow_match_enc_keyid match;
3351
3352 flow_rule_match_enc_keyid(rule, &match);
3353 if (match.mask->keyid != 0)
3354 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3355 }
3356
3357 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3358 struct flow_match_basic match;
3359
3360 flow_rule_match_basic(rule, &match);
3361 n_proto_key = ntohs(match.key->n_proto);
3362 n_proto_mask = ntohs(match.mask->n_proto);
3363
3364 if (n_proto_key == ETH_P_ALL) {
3365 n_proto_key = 0;
3366 n_proto_mask = 0;
3367 }
3368 n_proto = n_proto_key & n_proto_mask;
3369 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3370 return -EINVAL;
3371 if (n_proto == ETH_P_IPV6) {
3372
3373 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3374 }
3375
3376 if (match.key->ip_proto != IPPROTO_TCP) {
3377 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3378 return -EINVAL;
3379 }
3380 }
3381
3382 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3383 struct flow_match_eth_addrs match;
3384
3385 flow_rule_match_eth_addrs(rule, &match);
3386
3387
3388 if (!is_zero_ether_addr(match.mask->dst)) {
3389 if (is_broadcast_ether_addr(match.mask->dst)) {
3390 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3391 } else {
3392 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3393 match.mask->dst);
3394 return -EINVAL;
3395 }
3396 }
3397
3398 if (!is_zero_ether_addr(match.mask->src)) {
3399 if (is_broadcast_ether_addr(match.mask->src)) {
3400 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3401 } else {
3402 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3403 match.mask->src);
3404 return -EINVAL;
3405 }
3406 }
3407
3408 if (!is_zero_ether_addr(match.key->dst))
3409 if (is_valid_ether_addr(match.key->dst) ||
3410 is_multicast_ether_addr(match.key->dst)) {
3411
3412 for (i = 0; i < ETH_ALEN; i++)
3413 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3414 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3415 match.key->dst);
3416 }
3417
3418 if (!is_zero_ether_addr(match.key->src))
3419 if (is_valid_ether_addr(match.key->src) ||
3420 is_multicast_ether_addr(match.key->src)) {
3421
3422 for (i = 0; i < ETH_ALEN; i++)
3423 vf->mask.tcp_spec.src_mac[i] |= 0xff;
3424 ether_addr_copy(vf->data.tcp_spec.src_mac,
3425 match.key->src);
3426 }
3427 }
3428
3429 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3430 struct flow_match_vlan match;
3431
3432 flow_rule_match_vlan(rule, &match);
3433 if (match.mask->vlan_id) {
3434 if (match.mask->vlan_id == VLAN_VID_MASK) {
3435 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3436 } else {
3437 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3438 match.mask->vlan_id);
3439 return -EINVAL;
3440 }
3441 }
3442 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3443 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3444 }
3445
3446 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3447 struct flow_match_control match;
3448
3449 flow_rule_match_control(rule, &match);
3450 addr_type = match.key->addr_type;
3451 }
3452
3453 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3454 struct flow_match_ipv4_addrs match;
3455
3456 flow_rule_match_ipv4_addrs(rule, &match);
3457 if (match.mask->dst) {
3458 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3459 field_flags |= IAVF_CLOUD_FIELD_IIP;
3460 } else {
3461 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3462 be32_to_cpu(match.mask->dst));
3463 return -EINVAL;
3464 }
3465 }
3466
3467 if (match.mask->src) {
3468 if (match.mask->src == cpu_to_be32(0xffffffff)) {
3469 field_flags |= IAVF_CLOUD_FIELD_IIP;
3470 } else {
3471 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3472 be32_to_cpu(match.mask->dst));
3473 return -EINVAL;
3474 }
3475 }
3476
3477 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3478 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3479 return -EINVAL;
3480 }
3481 if (match.key->dst) {
3482 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3483 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3484 }
3485 if (match.key->src) {
3486 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3487 vf->data.tcp_spec.src_ip[0] = match.key->src;
3488 }
3489 }
3490
3491 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3492 struct flow_match_ipv6_addrs match;
3493
3494 flow_rule_match_ipv6_addrs(rule, &match);
3495
3496
3497 if (ipv6_addr_any(&match.mask->dst)) {
3498 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3499 IPV6_ADDR_ANY);
3500 return -EINVAL;
3501 }
3502
3503
3504
3505
3506 if (ipv6_addr_loopback(&match.key->dst) ||
3507 ipv6_addr_loopback(&match.key->src)) {
3508 dev_err(&adapter->pdev->dev,
3509 "ipv6 addr should not be loopback\n");
3510 return -EINVAL;
3511 }
3512 if (!ipv6_addr_any(&match.mask->dst) ||
3513 !ipv6_addr_any(&match.mask->src))
3514 field_flags |= IAVF_CLOUD_FIELD_IIP;
3515
3516 for (i = 0; i < 4; i++)
3517 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3518 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3519 sizeof(vf->data.tcp_spec.dst_ip));
3520 for (i = 0; i < 4; i++)
3521 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3522 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3523 sizeof(vf->data.tcp_spec.src_ip));
3524 }
3525 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3526 struct flow_match_ports match;
3527
3528 flow_rule_match_ports(rule, &match);
3529 if (match.mask->src) {
3530 if (match.mask->src == cpu_to_be16(0xffff)) {
3531 field_flags |= IAVF_CLOUD_FIELD_IIP;
3532 } else {
3533 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3534 be16_to_cpu(match.mask->src));
3535 return -EINVAL;
3536 }
3537 }
3538
3539 if (match.mask->dst) {
3540 if (match.mask->dst == cpu_to_be16(0xffff)) {
3541 field_flags |= IAVF_CLOUD_FIELD_IIP;
3542 } else {
3543 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3544 be16_to_cpu(match.mask->dst));
3545 return -EINVAL;
3546 }
3547 }
3548 if (match.key->dst) {
3549 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3550 vf->data.tcp_spec.dst_port = match.key->dst;
3551 }
3552
3553 if (match.key->src) {
3554 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3555 vf->data.tcp_spec.src_port = match.key->src;
3556 }
3557 }
3558 vf->field_flags = field_flags;
3559
3560 return 0;
3561}
3562
3563
3564
3565
3566
3567
3568
3569static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3570 struct iavf_cloud_filter *filter)
3571{
3572 if (tc == 0)
3573 return 0;
3574 if (tc < adapter->num_tc) {
3575 if (!filter->f.data.tcp_spec.dst_port) {
3576 dev_err(&adapter->pdev->dev,
3577 "Specify destination port to redirect to traffic class other than TC0\n");
3578 return -EINVAL;
3579 }
3580 }
3581
3582 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3583 filter->f.action_meta = tc;
3584 return 0;
3585}
3586
3587
3588
3589
3590
3591
3592static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3593 struct flow_cls_offload *cls_flower)
3594{
3595 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3596 struct iavf_cloud_filter *filter = NULL;
3597 int err = -EINVAL, count = 50;
3598
3599 if (tc < 0) {
3600 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3601 return -EINVAL;
3602 }
3603
3604 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3605 if (!filter)
3606 return -ENOMEM;
3607
3608 while (!mutex_trylock(&adapter->crit_lock)) {
3609 if (--count == 0) {
3610 kfree(filter);
3611 return err;
3612 }
3613 udelay(1);
3614 }
3615
3616 filter->cookie = cls_flower->cookie;
3617
3618
3619 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3620
3621 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
3622 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
3623 if (err)
3624 goto err;
3625
3626 err = iavf_handle_tclass(adapter, tc, filter);
3627 if (err)
3628 goto err;
3629
3630
3631 spin_lock_bh(&adapter->cloud_filter_list_lock);
3632 list_add_tail(&filter->list, &adapter->cloud_filter_list);
3633 adapter->num_cloud_filters++;
3634 filter->add = true;
3635 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3636 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3637err:
3638 if (err)
3639 kfree(filter);
3640
3641 mutex_unlock(&adapter->crit_lock);
3642 return err;
3643}
3644
3645
3646
3647
3648
3649
3650
3651
3652static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3653 unsigned long *cookie)
3654{
3655 struct iavf_cloud_filter *filter = NULL;
3656
3657 if (!cookie)
3658 return NULL;
3659
3660 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3661 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3662 return filter;
3663 }
3664 return NULL;
3665}
3666
3667
3668
3669
3670
3671
3672static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3673 struct flow_cls_offload *cls_flower)
3674{
3675 struct iavf_cloud_filter *filter = NULL;
3676 int err = 0;
3677
3678 spin_lock_bh(&adapter->cloud_filter_list_lock);
3679 filter = iavf_find_cf(adapter, &cls_flower->cookie);
3680 if (filter) {
3681 filter->del = true;
3682 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3683 } else {
3684 err = -EINVAL;
3685 }
3686 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3687
3688 return err;
3689}
3690
3691
3692
3693
3694
3695
3696static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
3697 struct flow_cls_offload *cls_flower)
3698{
3699 switch (cls_flower->command) {
3700 case FLOW_CLS_REPLACE:
3701 return iavf_configure_clsflower(adapter, cls_flower);
3702 case FLOW_CLS_DESTROY:
3703 return iavf_delete_clsflower(adapter, cls_flower);
3704 case FLOW_CLS_STATS:
3705 return -EOPNOTSUPP;
3706 default:
3707 return -EOPNOTSUPP;
3708 }
3709}
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3720 void *cb_priv)
3721{
3722 struct iavf_adapter *adapter = cb_priv;
3723
3724 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3725 return -EOPNOTSUPP;
3726
3727 switch (type) {
3728 case TC_SETUP_CLSFLOWER:
3729 return iavf_setup_tc_cls_flower(cb_priv, type_data);
3730 default:
3731 return -EOPNOTSUPP;
3732 }
3733}
3734
3735static LIST_HEAD(iavf_block_cb_list);
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3749 void *type_data)
3750{
3751 struct iavf_adapter *adapter = netdev_priv(netdev);
3752
3753 switch (type) {
3754 case TC_SETUP_QDISC_MQPRIO:
3755 return __iavf_setup_tc(netdev, type_data);
3756 case TC_SETUP_BLOCK:
3757 return flow_block_cb_setup_simple(type_data,
3758 &iavf_block_cb_list,
3759 iavf_setup_tc_block_cb,
3760 adapter, adapter, true);
3761 default:
3762 return -EOPNOTSUPP;
3763 }
3764}
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778static int iavf_open(struct net_device *netdev)
3779{
3780 struct iavf_adapter *adapter = netdev_priv(netdev);
3781 int err;
3782
3783 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
3784 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3785 return -EIO;
3786 }
3787
3788 while (!mutex_trylock(&adapter->crit_lock))
3789 usleep_range(500, 1000);
3790
3791 if (adapter->state != __IAVF_DOWN) {
3792 err = -EBUSY;
3793 goto err_unlock;
3794 }
3795
3796 if (adapter->state == __IAVF_RUNNING &&
3797 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
3798 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
3799 err = 0;
3800 goto err_unlock;
3801 }
3802
3803
3804 err = iavf_setup_all_tx_resources(adapter);
3805 if (err)
3806 goto err_setup_tx;
3807
3808
3809 err = iavf_setup_all_rx_resources(adapter);
3810 if (err)
3811 goto err_setup_rx;
3812
3813
3814 err = iavf_request_traffic_irqs(adapter, netdev->name);
3815 if (err)
3816 goto err_req_irq;
3817
3818 spin_lock_bh(&adapter->mac_vlan_list_lock);
3819
3820 iavf_add_filter(adapter, adapter->hw.mac.addr);
3821
3822 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3823
3824
3825 iavf_restore_filters(adapter);
3826
3827 iavf_configure(adapter);
3828
3829 iavf_up_complete(adapter);
3830
3831 iavf_irq_enable(adapter, true);
3832
3833 mutex_unlock(&adapter->crit_lock);
3834
3835 return 0;
3836
3837err_req_irq:
3838 iavf_down(adapter);
3839 iavf_free_traffic_irqs(adapter);
3840err_setup_rx:
3841 iavf_free_all_rx_resources(adapter);
3842err_setup_tx:
3843 iavf_free_all_tx_resources(adapter);
3844err_unlock:
3845 mutex_unlock(&adapter->crit_lock);
3846
3847 return err;
3848}
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861static int iavf_close(struct net_device *netdev)
3862{
3863 struct iavf_adapter *adapter = netdev_priv(netdev);
3864 int status;
3865
3866 mutex_lock(&adapter->crit_lock);
3867
3868 if (adapter->state <= __IAVF_DOWN_PENDING) {
3869 mutex_unlock(&adapter->crit_lock);
3870 return 0;
3871 }
3872
3873 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3874 if (CLIENT_ENABLED(adapter))
3875 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3876
3877 iavf_down(adapter);
3878 iavf_change_state(adapter, __IAVF_DOWN_PENDING);
3879 iavf_free_traffic_irqs(adapter);
3880
3881 mutex_unlock(&adapter->crit_lock);
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894 status = wait_event_timeout(adapter->down_waitqueue,
3895 adapter->state == __IAVF_DOWN,
3896 msecs_to_jiffies(500));
3897 if (!status)
3898 netdev_warn(netdev, "Device resources not yet released\n");
3899 return 0;
3900}
3901
3902
3903
3904
3905
3906
3907
3908
3909static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3910{
3911 struct iavf_adapter *adapter = netdev_priv(netdev);
3912
3913 netdev_dbg(netdev, "changing MTU from %d to %d\n",
3914 netdev->mtu, new_mtu);
3915 netdev->mtu = new_mtu;
3916 if (CLIENT_ENABLED(adapter)) {
3917 iavf_notify_client_l2_params(&adapter->vsi);
3918 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3919 }
3920
3921 if (netif_running(netdev)) {
3922 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3923 queue_work(iavf_wq, &adapter->reset_task);
3924 }
3925
3926 return 0;
3927}
3928
3929#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
3930 NETIF_F_HW_VLAN_CTAG_TX | \
3931 NETIF_F_HW_VLAN_STAG_RX | \
3932 NETIF_F_HW_VLAN_STAG_TX)
3933
3934
3935
3936
3937
3938
3939
3940static int iavf_set_features(struct net_device *netdev,
3941 netdev_features_t features)
3942{
3943 struct iavf_adapter *adapter = netdev_priv(netdev);
3944
3945
3946 if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
3947 (features & NETIF_VLAN_OFFLOAD_FEATURES))
3948 iavf_set_vlan_offload_features(adapter, netdev->features,
3949 features);
3950
3951 return 0;
3952}
3953
3954
3955
3956
3957
3958
3959
3960static netdev_features_t iavf_features_check(struct sk_buff *skb,
3961 struct net_device *dev,
3962 netdev_features_t features)
3963{
3964 size_t len;
3965
3966
3967
3968
3969
3970 if (skb->ip_summed != CHECKSUM_PARTIAL)
3971 return features;
3972
3973
3974
3975
3976 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3977 features &= ~NETIF_F_GSO_MASK;
3978
3979
3980 len = skb_network_header(skb) - skb->data;
3981 if (len & ~(63 * 2))
3982 goto out_err;
3983
3984
3985 len = skb_transport_header(skb) - skb_network_header(skb);
3986 if (len & ~(127 * 4))
3987 goto out_err;
3988
3989 if (skb->encapsulation) {
3990
3991 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3992 if (len & ~(127 * 2))
3993 goto out_err;
3994
3995
3996 len = skb_inner_transport_header(skb) -
3997 skb_inner_network_header(skb);
3998 if (len & ~(127 * 4))
3999 goto out_err;
4000 }
4001
4002
4003
4004
4005
4006
4007 return features;
4008out_err:
4009 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4010}
4011
4012
4013
4014
4015
4016
4017
4018
4019static netdev_features_t
4020iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4021{
4022 netdev_features_t hw_features = 0;
4023
4024 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4025 return hw_features;
4026
4027
4028 if (VLAN_ALLOWED(adapter)) {
4029 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4030 NETIF_F_HW_VLAN_CTAG_RX);
4031 } else if (VLAN_V2_ALLOWED(adapter)) {
4032 struct virtchnl_vlan_caps *vlan_v2_caps =
4033 &adapter->vlan_v2_caps;
4034 struct virtchnl_vlan_supported_caps *stripping_support =
4035 &vlan_v2_caps->offloads.stripping_support;
4036 struct virtchnl_vlan_supported_caps *insertion_support =
4037 &vlan_v2_caps->offloads.insertion_support;
4038
4039 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4040 stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4041 if (stripping_support->outer &
4042 VIRTCHNL_VLAN_ETHERTYPE_8100)
4043 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4044 if (stripping_support->outer &
4045 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4046 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4047 } else if (stripping_support->inner !=
4048 VIRTCHNL_VLAN_UNSUPPORTED &&
4049 stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4050 if (stripping_support->inner &
4051 VIRTCHNL_VLAN_ETHERTYPE_8100)
4052 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4053 }
4054
4055 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4056 insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4057 if (insertion_support->outer &
4058 VIRTCHNL_VLAN_ETHERTYPE_8100)
4059 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4060 if (insertion_support->outer &
4061 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4062 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4063 } else if (insertion_support->inner &&
4064 insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4065 if (insertion_support->inner &
4066 VIRTCHNL_VLAN_ETHERTYPE_8100)
4067 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4068 }
4069 }
4070
4071 return hw_features;
4072}
4073
4074
4075
4076
4077
4078
4079
4080
4081static netdev_features_t
4082iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4083{
4084 netdev_features_t features = 0;
4085
4086 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4087 return features;
4088
4089 if (VLAN_ALLOWED(adapter)) {
4090 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4091 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4092 } else if (VLAN_V2_ALLOWED(adapter)) {
4093 struct virtchnl_vlan_caps *vlan_v2_caps =
4094 &adapter->vlan_v2_caps;
4095 struct virtchnl_vlan_supported_caps *filtering_support =
4096 &vlan_v2_caps->filtering.filtering_support;
4097 struct virtchnl_vlan_supported_caps *stripping_support =
4098 &vlan_v2_caps->offloads.stripping_support;
4099 struct virtchnl_vlan_supported_caps *insertion_support =
4100 &vlan_v2_caps->offloads.insertion_support;
4101 u32 ethertype_init;
4102
4103
4104
4105
4106 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4107 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4108 if (stripping_support->outer &
4109 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4110 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4111 features |= NETIF_F_HW_VLAN_CTAG_RX;
4112 else if (stripping_support->outer &
4113 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4114 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4115 features |= NETIF_F_HW_VLAN_STAG_RX;
4116 } else if (stripping_support->inner !=
4117 VIRTCHNL_VLAN_UNSUPPORTED) {
4118 if (stripping_support->inner &
4119 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4120 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4121 features |= NETIF_F_HW_VLAN_CTAG_RX;
4122 }
4123
4124
4125
4126
4127 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4128 if (insertion_support->outer &
4129 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4130 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4131 features |= NETIF_F_HW_VLAN_CTAG_TX;
4132 else if (insertion_support->outer &
4133 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4134 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4135 features |= NETIF_F_HW_VLAN_STAG_TX;
4136 } else if (insertion_support->inner !=
4137 VIRTCHNL_VLAN_UNSUPPORTED) {
4138 if (insertion_support->inner &
4139 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4140 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4141 features |= NETIF_F_HW_VLAN_CTAG_TX;
4142 }
4143
4144
4145
4146
4147 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4148 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4149 if (filtering_support->outer &
4150 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4151 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4152 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4153 if (filtering_support->outer &
4154 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4155 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4156 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4157 } else if (filtering_support->inner !=
4158 VIRTCHNL_VLAN_UNSUPPORTED) {
4159 if (filtering_support->inner &
4160 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4161 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4162 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4163 if (filtering_support->inner &
4164 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4165 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4166 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4167 }
4168 }
4169
4170 return features;
4171}
4172
4173#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4174 (!(((requested) & (feature_bit)) && \
4175 !((allowed) & (feature_bit))))
4176
4177
4178
4179
4180
4181
4182static netdev_features_t
4183iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4184 netdev_features_t requested_features)
4185{
4186 netdev_features_t allowed_features;
4187
4188 allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4189 iavf_get_netdev_vlan_features(adapter);
4190
4191 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4192 allowed_features,
4193 NETIF_F_HW_VLAN_CTAG_TX))
4194 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4195
4196 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4197 allowed_features,
4198 NETIF_F_HW_VLAN_CTAG_RX))
4199 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4200
4201 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4202 allowed_features,
4203 NETIF_F_HW_VLAN_STAG_TX))
4204 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4205 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4206 allowed_features,
4207 NETIF_F_HW_VLAN_STAG_RX))
4208 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4209
4210 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4211 allowed_features,
4212 NETIF_F_HW_VLAN_CTAG_FILTER))
4213 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4214
4215 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4216 allowed_features,
4217 NETIF_F_HW_VLAN_STAG_FILTER))
4218 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4219
4220 if ((requested_features &
4221 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4222 (requested_features &
4223 (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4224 adapter->vlan_v2_caps.offloads.ethertype_match ==
4225 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4226 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4227 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4228 NETIF_F_HW_VLAN_STAG_TX);
4229 }
4230
4231 return requested_features;
4232}
4233
4234
4235
4236
4237
4238
4239
4240
4241static netdev_features_t iavf_fix_features(struct net_device *netdev,
4242 netdev_features_t features)
4243{
4244 struct iavf_adapter *adapter = netdev_priv(netdev);
4245
4246 return iavf_fix_netdev_vlan_features(adapter, features);
4247}
4248
4249static const struct net_device_ops iavf_netdev_ops = {
4250 .ndo_open = iavf_open,
4251 .ndo_stop = iavf_close,
4252 .ndo_start_xmit = iavf_xmit_frame,
4253 .ndo_set_rx_mode = iavf_set_rx_mode,
4254 .ndo_validate_addr = eth_validate_addr,
4255 .ndo_set_mac_address = iavf_set_mac,
4256 .ndo_change_mtu = iavf_change_mtu,
4257 .ndo_tx_timeout = iavf_tx_timeout,
4258 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
4259 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
4260 .ndo_features_check = iavf_features_check,
4261 .ndo_fix_features = iavf_fix_features,
4262 .ndo_set_features = iavf_set_features,
4263 .ndo_setup_tc = iavf_setup_tc,
4264};
4265
4266
4267
4268
4269
4270
4271
4272static int iavf_check_reset_complete(struct iavf_hw *hw)
4273{
4274 u32 rstat;
4275 int i;
4276
4277 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4278 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4279 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4280 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4281 (rstat == VIRTCHNL_VFR_COMPLETED))
4282 return 0;
4283 usleep_range(10, 20);
4284 }
4285 return -EBUSY;
4286}
4287
4288
4289
4290
4291
4292
4293
4294
4295int iavf_process_config(struct iavf_adapter *adapter)
4296{
4297 struct virtchnl_vf_resource *vfres = adapter->vf_res;
4298 netdev_features_t hw_vlan_features, vlan_features;
4299 struct net_device *netdev = adapter->netdev;
4300 netdev_features_t hw_enc_features;
4301 netdev_features_t hw_features;
4302
4303 hw_enc_features = NETIF_F_SG |
4304 NETIF_F_IP_CSUM |
4305 NETIF_F_IPV6_CSUM |
4306 NETIF_F_HIGHDMA |
4307 NETIF_F_SOFT_FEATURES |
4308 NETIF_F_TSO |
4309 NETIF_F_TSO_ECN |
4310 NETIF_F_TSO6 |
4311 NETIF_F_SCTP_CRC |
4312 NETIF_F_RXHASH |
4313 NETIF_F_RXCSUM |
4314 0;
4315
4316
4317
4318
4319 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4320 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4321 NETIF_F_GSO_GRE |
4322 NETIF_F_GSO_GRE_CSUM |
4323 NETIF_F_GSO_IPXIP4 |
4324 NETIF_F_GSO_IPXIP6 |
4325 NETIF_F_GSO_UDP_TUNNEL_CSUM |
4326 NETIF_F_GSO_PARTIAL |
4327 0;
4328
4329 if (!(vfres->vf_cap_flags &
4330 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4331 netdev->gso_partial_features |=
4332 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4333
4334 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4335 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4336 netdev->hw_enc_features |= hw_enc_features;
4337 }
4338
4339 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4340
4341
4342
4343
4344 hw_features = hw_enc_features;
4345
4346
4347 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4348
4349
4350 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4351 hw_features |= NETIF_F_HW_TC;
4352 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4353 hw_features |= NETIF_F_GSO_UDP_L4;
4354
4355 netdev->hw_features |= hw_features | hw_vlan_features;
4356 vlan_features = iavf_get_netdev_vlan_features(adapter);
4357
4358 netdev->features |= hw_features | vlan_features;
4359
4360 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4361 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4362
4363 netdev->priv_flags |= IFF_UNICAST_FLT;
4364
4365
4366
4367
4368 if (netdev->wanted_features) {
4369 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4370 netdev->mtu < 576)
4371 netdev->features &= ~NETIF_F_TSO;
4372 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4373 netdev->mtu < 576)
4374 netdev->features &= ~NETIF_F_TSO6;
4375 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4376 netdev->features &= ~NETIF_F_TSO_ECN;
4377 if (!(netdev->wanted_features & NETIF_F_GRO))
4378 netdev->features &= ~NETIF_F_GRO;
4379 if (!(netdev->wanted_features & NETIF_F_GSO))
4380 netdev->features &= ~NETIF_F_GSO;
4381 }
4382
4383 return 0;
4384}
4385
4386
4387
4388
4389
4390static void iavf_shutdown(struct pci_dev *pdev)
4391{
4392 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4393 struct net_device *netdev = adapter->netdev;
4394
4395 netif_device_detach(netdev);
4396
4397 if (netif_running(netdev))
4398 iavf_close(netdev);
4399
4400 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4401 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4402
4403 iavf_change_state(adapter, __IAVF_REMOVE);
4404 adapter->aq_required = 0;
4405 mutex_unlock(&adapter->crit_lock);
4406
4407#ifdef CONFIG_PM
4408 pci_save_state(pdev);
4409
4410#endif
4411 pci_disable_device(pdev);
4412}
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4426{
4427 struct net_device *netdev;
4428 struct iavf_adapter *adapter = NULL;
4429 struct iavf_hw *hw = NULL;
4430 int err;
4431
4432 err = pci_enable_device(pdev);
4433 if (err)
4434 return err;
4435
4436 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4437 if (err) {
4438 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4439 if (err) {
4440 dev_err(&pdev->dev,
4441 "DMA configuration failed: 0x%x\n", err);
4442 goto err_dma;
4443 }
4444 }
4445
4446 err = pci_request_regions(pdev, iavf_driver_name);
4447 if (err) {
4448 dev_err(&pdev->dev,
4449 "pci_request_regions failed 0x%x\n", err);
4450 goto err_pci_reg;
4451 }
4452
4453 pci_enable_pcie_error_reporting(pdev);
4454
4455 pci_set_master(pdev);
4456
4457 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4458 IAVF_MAX_REQ_QUEUES);
4459 if (!netdev) {
4460 err = -ENOMEM;
4461 goto err_alloc_etherdev;
4462 }
4463
4464 SET_NETDEV_DEV(netdev, &pdev->dev);
4465
4466 pci_set_drvdata(pdev, netdev);
4467 adapter = netdev_priv(netdev);
4468
4469 adapter->netdev = netdev;
4470 adapter->pdev = pdev;
4471
4472 hw = &adapter->hw;
4473 hw->back = adapter;
4474
4475 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4476 iavf_change_state(adapter, __IAVF_STARTUP);
4477
4478
4479 pci_save_state(pdev);
4480
4481 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4482 pci_resource_len(pdev, 0));
4483 if (!hw->hw_addr) {
4484 err = -EIO;
4485 goto err_ioremap;
4486 }
4487 hw->vendor_id = pdev->vendor;
4488 hw->device_id = pdev->device;
4489 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4490 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4491 hw->subsystem_device_id = pdev->subsystem_device;
4492 hw->bus.device = PCI_SLOT(pdev->devfn);
4493 hw->bus.func = PCI_FUNC(pdev->devfn);
4494 hw->bus.bus_id = pdev->bus->number;
4495
4496
4497
4498
4499 mutex_init(&adapter->crit_lock);
4500 mutex_init(&adapter->client_lock);
4501 mutex_init(&hw->aq.asq_mutex);
4502 mutex_init(&hw->aq.arq_mutex);
4503
4504 spin_lock_init(&adapter->mac_vlan_list_lock);
4505 spin_lock_init(&adapter->cloud_filter_list_lock);
4506 spin_lock_init(&adapter->fdir_fltr_lock);
4507 spin_lock_init(&adapter->adv_rss_lock);
4508
4509 INIT_LIST_HEAD(&adapter->mac_filter_list);
4510 INIT_LIST_HEAD(&adapter->vlan_filter_list);
4511 INIT_LIST_HEAD(&adapter->cloud_filter_list);
4512 INIT_LIST_HEAD(&adapter->fdir_list_head);
4513 INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4514
4515 INIT_WORK(&adapter->reset_task, iavf_reset_task);
4516 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4517 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4518 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4519 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4520 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4521
4522
4523 init_waitqueue_head(&adapter->down_waitqueue);
4524
4525 return 0;
4526
4527err_ioremap:
4528 free_netdev(netdev);
4529err_alloc_etherdev:
4530 pci_disable_pcie_error_reporting(pdev);
4531 pci_release_regions(pdev);
4532err_pci_reg:
4533err_dma:
4534 pci_disable_device(pdev);
4535 return err;
4536}
4537
4538
4539
4540
4541
4542
4543
4544static int __maybe_unused iavf_suspend(struct device *dev_d)
4545{
4546 struct net_device *netdev = dev_get_drvdata(dev_d);
4547 struct iavf_adapter *adapter = netdev_priv(netdev);
4548
4549 netif_device_detach(netdev);
4550
4551 while (!mutex_trylock(&adapter->crit_lock))
4552 usleep_range(500, 1000);
4553
4554 if (netif_running(netdev)) {
4555 rtnl_lock();
4556 iavf_down(adapter);
4557 rtnl_unlock();
4558 }
4559 iavf_free_misc_irq(adapter);
4560 iavf_reset_interrupt_capability(adapter);
4561
4562 mutex_unlock(&adapter->crit_lock);
4563
4564 return 0;
4565}
4566
4567
4568
4569
4570
4571
4572
4573static int __maybe_unused iavf_resume(struct device *dev_d)
4574{
4575 struct pci_dev *pdev = to_pci_dev(dev_d);
4576 struct iavf_adapter *adapter;
4577 u32 err;
4578
4579 adapter = iavf_pdev_to_adapter(pdev);
4580
4581 pci_set_master(pdev);
4582
4583 rtnl_lock();
4584 err = iavf_set_interrupt_capability(adapter);
4585 if (err) {
4586 rtnl_unlock();
4587 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
4588 return err;
4589 }
4590 err = iavf_request_misc_irq(adapter);
4591 rtnl_unlock();
4592 if (err) {
4593 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
4594 return err;
4595 }
4596
4597 queue_work(iavf_wq, &adapter->reset_task);
4598
4599 netif_device_attach(adapter->netdev);
4600
4601 return err;
4602}
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613static void iavf_remove(struct pci_dev *pdev)
4614{
4615 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4616 struct net_device *netdev = adapter->netdev;
4617 struct iavf_fdir_fltr *fdir, *fdirtmp;
4618 struct iavf_vlan_filter *vlf, *vlftmp;
4619 struct iavf_adv_rss *rss, *rsstmp;
4620 struct iavf_mac_filter *f, *ftmp;
4621 struct iavf_cloud_filter *cf, *cftmp;
4622 struct iavf_hw *hw = &adapter->hw;
4623 int err;
4624
4625
4626
4627
4628
4629 if (adapter->state == __IAVF_REMOVE)
4630 return;
4631
4632 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
4633
4634
4635
4636 while (1) {
4637 mutex_lock(&adapter->crit_lock);
4638 if (adapter->state == __IAVF_RUNNING ||
4639 adapter->state == __IAVF_DOWN ||
4640 adapter->state == __IAVF_INIT_FAILED) {
4641 mutex_unlock(&adapter->crit_lock);
4642 break;
4643 }
4644
4645 mutex_unlock(&adapter->crit_lock);
4646 usleep_range(500, 1000);
4647 }
4648 cancel_delayed_work_sync(&adapter->watchdog_task);
4649
4650 if (adapter->netdev_registered) {
4651 rtnl_lock();
4652 unregister_netdevice(netdev);
4653 adapter->netdev_registered = false;
4654 rtnl_unlock();
4655 }
4656 if (CLIENT_ALLOWED(adapter)) {
4657 err = iavf_lan_del_device(adapter);
4658 if (err)
4659 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
4660 err);
4661 }
4662
4663 mutex_lock(&adapter->crit_lock);
4664 dev_info(&adapter->pdev->dev, "Remove device\n");
4665 iavf_change_state(adapter, __IAVF_REMOVE);
4666
4667 iavf_request_reset(adapter);
4668 msleep(50);
4669
4670 if (!iavf_asq_done(hw)) {
4671 iavf_request_reset(adapter);
4672 msleep(50);
4673 }
4674
4675 iavf_misc_irq_disable(adapter);
4676
4677 cancel_work_sync(&adapter->reset_task);
4678 cancel_delayed_work_sync(&adapter->watchdog_task);
4679 cancel_work_sync(&adapter->adminq_task);
4680 cancel_delayed_work_sync(&adapter->client_task);
4681
4682 adapter->aq_required = 0;
4683 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
4684
4685 iavf_free_all_tx_resources(adapter);
4686 iavf_free_all_rx_resources(adapter);
4687 iavf_free_misc_irq(adapter);
4688
4689 iavf_reset_interrupt_capability(adapter);
4690 iavf_free_q_vectors(adapter);
4691
4692 iavf_free_rss(adapter);
4693
4694 if (hw->aq.asq.count)
4695 iavf_shutdown_adminq(hw);
4696
4697
4698 mutex_destroy(&hw->aq.arq_mutex);
4699 mutex_destroy(&hw->aq.asq_mutex);
4700 mutex_destroy(&adapter->client_lock);
4701 mutex_unlock(&adapter->crit_lock);
4702 mutex_destroy(&adapter->crit_lock);
4703
4704 iounmap(hw->hw_addr);
4705 pci_release_regions(pdev);
4706 iavf_free_queues(adapter);
4707 kfree(adapter->vf_res);
4708 spin_lock_bh(&adapter->mac_vlan_list_lock);
4709
4710
4711
4712 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
4713 list_del(&f->list);
4714 kfree(f);
4715 }
4716 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
4717 list) {
4718 list_del(&vlf->list);
4719 kfree(vlf);
4720 }
4721
4722 spin_unlock_bh(&adapter->mac_vlan_list_lock);
4723
4724 spin_lock_bh(&adapter->cloud_filter_list_lock);
4725 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
4726 list_del(&cf->list);
4727 kfree(cf);
4728 }
4729 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4730
4731 spin_lock_bh(&adapter->fdir_fltr_lock);
4732 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
4733 list_del(&fdir->list);
4734 kfree(fdir);
4735 }
4736 spin_unlock_bh(&adapter->fdir_fltr_lock);
4737
4738 spin_lock_bh(&adapter->adv_rss_lock);
4739 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
4740 list) {
4741 list_del(&rss->list);
4742 kfree(rss);
4743 }
4744 spin_unlock_bh(&adapter->adv_rss_lock);
4745
4746 free_netdev(netdev);
4747
4748 pci_disable_pcie_error_reporting(pdev);
4749
4750 pci_disable_device(pdev);
4751}
4752
4753static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
4754
4755static struct pci_driver iavf_driver = {
4756 .name = iavf_driver_name,
4757 .id_table = iavf_pci_tbl,
4758 .probe = iavf_probe,
4759 .remove = iavf_remove,
4760 .driver.pm = &iavf_pm_ops,
4761 .shutdown = iavf_shutdown,
4762};
4763
4764
4765
4766
4767
4768
4769
4770static int __init iavf_init_module(void)
4771{
4772 int ret;
4773
4774 pr_info("iavf: %s\n", iavf_driver_string);
4775
4776 pr_info("%s\n", iavf_copyright);
4777
4778 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4779 iavf_driver_name);
4780 if (!iavf_wq) {
4781 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
4782 return -ENOMEM;
4783 }
4784 ret = pci_register_driver(&iavf_driver);
4785 return ret;
4786}
4787
4788module_init(iavf_init_module);
4789
4790
4791
4792
4793
4794
4795
4796static void __exit iavf_exit_module(void)
4797{
4798 pci_unregister_driver(&iavf_driver);
4799 destroy_workqueue(iavf_wq);
4800}
4801
4802module_exit(iavf_exit_module);
4803
4804
4805