1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/etherdevice.h>
28#include <linux/of_net.h>
29#include <linux/pci.h>
30#include <linux/bpf.h>
31
32
33#include "i40e.h"
34#include "i40e_diag.h"
35#include <net/udp_tunnel.h>
36
37
38
39
40#define CREATE_TRACE_POINTS
41#include "i40e_trace.h"
42
43const char i40e_driver_name[] = "i40e";
44static const char i40e_driver_string[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
46
47#define DRV_KERN "-k"
48
49#define DRV_VERSION_MAJOR 2
50#define DRV_VERSION_MINOR 1
51#define DRV_VERSION_BUILD 14
52#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55const char i40e_driver_version_str[] = DRV_VERSION;
56static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
57
58
59static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
60static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
61static int i40e_add_vsi(struct i40e_vsi *vsi);
62static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
63static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
64static int i40e_setup_misc_vector(struct i40e_pf *pf);
65static void i40e_determine_queue_usage(struct i40e_pf *pf);
66static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
67static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68static int i40e_reset(struct i40e_pf *pf);
69static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
70static void i40e_fdir_sb_setup(struct i40e_pf *pf);
71static int i40e_veb_get_bw_info(struct i40e_veb *veb);
72
73
74
75
76
77
78
79
80static const struct pci_device_id i40e_pci_tbl[] = {
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
100
101 {0, }
102};
103MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
104
105#define I40E_MAX_VF_COUNT 128
106static int debug = -1;
107module_param(debug, uint, 0);
108MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
109
110MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
111MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
115static struct workqueue_struct *i40e_wq;
116
117
118
119
120
121
122
123
124int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
125 u64 size, u32 alignment)
126{
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128
129 mem->size = ALIGN(size, alignment);
130 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
131 &mem->pa, GFP_KERNEL);
132 if (!mem->va)
133 return -ENOMEM;
134
135 return 0;
136}
137
138
139
140
141
142
143int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
144{
145 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
146
147 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
148 mem->va = NULL;
149 mem->pa = 0;
150 mem->size = 0;
151
152 return 0;
153}
154
155
156
157
158
159
160
161int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
162 u32 size)
163{
164 mem->size = size;
165 mem->va = kzalloc(size, GFP_KERNEL);
166
167 if (!mem->va)
168 return -ENOMEM;
169
170 return 0;
171}
172
173
174
175
176
177
178int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
179{
180
181 kfree(mem->va);
182 mem->va = NULL;
183 mem->size = 0;
184
185 return 0;
186}
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
202 u16 needed, u16 id)
203{
204 int ret = -ENOMEM;
205 int i, j;
206
207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 dev_info(&pf->pdev->dev,
209 "param err: pile=%p needed=%d id=0x%04x\n",
210 pile, needed, id);
211 return -EINVAL;
212 }
213
214
215 i = pile->search_hint;
216 while (i < pile->num_entries) {
217
218 if (pile->list[i] & I40E_PILE_VALID_BIT) {
219 i++;
220 continue;
221 }
222
223
224 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
225 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
226 break;
227 }
228
229 if (j == needed) {
230
231 for (j = 0; j < needed; j++)
232 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
233 ret = i;
234 pile->search_hint = i + j;
235 break;
236 }
237
238
239 i += j;
240 }
241
242 return ret;
243}
244
245
246
247
248
249
250
251
252
253static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
254{
255 int valid_id = (id | I40E_PILE_VALID_BIT);
256 int count = 0;
257 int i;
258
259 if (!pile || index >= pile->num_entries)
260 return -EINVAL;
261
262 for (i = index;
263 i < pile->num_entries && pile->list[i] == valid_id;
264 i++) {
265 pile->list[i] = 0;
266 count++;
267 }
268
269 if (count && index < pile->search_hint)
270 pile->search_hint = index;
271
272 return count;
273}
274
275
276
277
278
279
280struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
281{
282 int i;
283
284 for (i = 0; i < pf->num_alloc_vsi; i++)
285 if (pf->vsi[i] && (pf->vsi[i]->id == id))
286 return pf->vsi[i];
287
288 return NULL;
289}
290
291
292
293
294
295
296
297void i40e_service_event_schedule(struct i40e_pf *pf)
298{
299 if (!test_bit(__I40E_DOWN, pf->state) &&
300 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
301 queue_work(i40e_wq, &pf->service_task);
302}
303
304
305
306
307
308
309
310
311
312static void i40e_tx_timeout(struct net_device *netdev)
313{
314 struct i40e_netdev_priv *np = netdev_priv(netdev);
315 struct i40e_vsi *vsi = np->vsi;
316 struct i40e_pf *pf = vsi->back;
317 struct i40e_ring *tx_ring = NULL;
318 unsigned int i, hung_queue = 0;
319 u32 head, val;
320
321 pf->tx_timeout_count++;
322
323
324 for (i = 0; i < netdev->num_tx_queues; i++) {
325 struct netdev_queue *q;
326 unsigned long trans_start;
327
328 q = netdev_get_tx_queue(netdev, i);
329 trans_start = q->trans_start;
330 if (netif_xmit_stopped(q) &&
331 time_after(jiffies,
332 (trans_start + netdev->watchdog_timeo))) {
333 hung_queue = i;
334 break;
335 }
336 }
337
338 if (i == netdev->num_tx_queues) {
339 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
340 } else {
341
342 for (i = 0; i < vsi->num_queue_pairs; i++) {
343 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
344 if (hung_queue ==
345 vsi->tx_rings[i]->queue_index) {
346 tx_ring = vsi->tx_rings[i];
347 break;
348 }
349 }
350 }
351 }
352
353 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
354 pf->tx_timeout_recovery_level = 1;
355 else if (time_before(jiffies,
356 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
357 return;
358
359 if (tx_ring) {
360 head = i40e_get_head(tx_ring);
361
362 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
363 val = rd32(&pf->hw,
364 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
365 tx_ring->vsi->base_vector - 1));
366 else
367 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
368
369 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
370 vsi->seid, hung_queue, tx_ring->next_to_clean,
371 head, tx_ring->next_to_use,
372 readl(tx_ring->tail), val);
373 }
374
375 pf->tx_timeout_last_recovery = jiffies;
376 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
377 pf->tx_timeout_recovery_level, hung_queue);
378
379 switch (pf->tx_timeout_recovery_level) {
380 case 1:
381 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
382 break;
383 case 2:
384 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
385 break;
386 case 3:
387 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
388 break;
389 default:
390 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
391 break;
392 }
393
394 i40e_service_event_schedule(pf);
395 pf->tx_timeout_recovery_level++;
396}
397
398
399
400
401
402
403
404
405struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
406{
407 return &vsi->net_stats;
408}
409
410
411
412
413
414
415static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
416 struct rtnl_link_stats64 *stats)
417{
418 u64 bytes, packets;
419 unsigned int start;
420
421 do {
422 start = u64_stats_fetch_begin_irq(&ring->syncp);
423 packets = ring->stats.packets;
424 bytes = ring->stats.bytes;
425 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
426
427 stats->tx_packets += packets;
428 stats->tx_bytes += bytes;
429}
430
431
432
433
434
435
436
437
438static void i40e_get_netdev_stats_struct(struct net_device *netdev,
439 struct rtnl_link_stats64 *stats)
440{
441 struct i40e_netdev_priv *np = netdev_priv(netdev);
442 struct i40e_ring *tx_ring, *rx_ring;
443 struct i40e_vsi *vsi = np->vsi;
444 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
445 int i;
446
447 if (test_bit(__I40E_VSI_DOWN, vsi->state))
448 return;
449
450 if (!vsi->tx_rings)
451 return;
452
453 rcu_read_lock();
454 for (i = 0; i < vsi->num_queue_pairs; i++) {
455 u64 bytes, packets;
456 unsigned int start;
457
458 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
459 if (!tx_ring)
460 continue;
461 i40e_get_netdev_stats_struct_tx(tx_ring, stats);
462
463 rx_ring = &tx_ring[1];
464
465 do {
466 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
467 packets = rx_ring->stats.packets;
468 bytes = rx_ring->stats.bytes;
469 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
470
471 stats->rx_packets += packets;
472 stats->rx_bytes += bytes;
473
474 if (i40e_enabled_xdp_vsi(vsi))
475 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
476 }
477 rcu_read_unlock();
478
479
480 stats->multicast = vsi_stats->multicast;
481 stats->tx_errors = vsi_stats->tx_errors;
482 stats->tx_dropped = vsi_stats->tx_dropped;
483 stats->rx_errors = vsi_stats->rx_errors;
484 stats->rx_dropped = vsi_stats->rx_dropped;
485 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
486 stats->rx_length_errors = vsi_stats->rx_length_errors;
487}
488
489
490
491
492
493void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
494{
495 struct rtnl_link_stats64 *ns;
496 int i;
497
498 if (!vsi)
499 return;
500
501 ns = i40e_get_vsi_stats_struct(vsi);
502 memset(ns, 0, sizeof(*ns));
503 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
504 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
505 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
506 if (vsi->rx_rings && vsi->rx_rings[0]) {
507 for (i = 0; i < vsi->num_queue_pairs; i++) {
508 memset(&vsi->rx_rings[i]->stats, 0,
509 sizeof(vsi->rx_rings[i]->stats));
510 memset(&vsi->rx_rings[i]->rx_stats, 0,
511 sizeof(vsi->rx_rings[i]->rx_stats));
512 memset(&vsi->tx_rings[i]->stats, 0,
513 sizeof(vsi->tx_rings[i]->stats));
514 memset(&vsi->tx_rings[i]->tx_stats, 0,
515 sizeof(vsi->tx_rings[i]->tx_stats));
516 }
517 }
518 vsi->stat_offsets_loaded = false;
519}
520
521
522
523
524
525void i40e_pf_reset_stats(struct i40e_pf *pf)
526{
527 int i;
528
529 memset(&pf->stats, 0, sizeof(pf->stats));
530 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
531 pf->stat_offsets_loaded = false;
532
533 for (i = 0; i < I40E_MAX_VEB; i++) {
534 if (pf->veb[i]) {
535 memset(&pf->veb[i]->stats, 0,
536 sizeof(pf->veb[i]->stats));
537 memset(&pf->veb[i]->stats_offsets, 0,
538 sizeof(pf->veb[i]->stats_offsets));
539 pf->veb[i]->stat_offsets_loaded = false;
540 }
541 }
542 pf->hw_csum_rx_error = 0;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
561 bool offset_loaded, u64 *offset, u64 *stat)
562{
563 u64 new_data;
564
565 if (hw->device_id == I40E_DEV_ID_QEMU) {
566 new_data = rd32(hw, loreg);
567 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
568 } else {
569 new_data = rd64(hw, loreg);
570 }
571 if (!offset_loaded)
572 *offset = new_data;
573 if (likely(new_data >= *offset))
574 *stat = new_data - *offset;
575 else
576 *stat = (new_data + BIT_ULL(48)) - *offset;
577 *stat &= 0xFFFFFFFFFFFFULL;
578}
579
580
581
582
583
584
585
586
587
588static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
589 bool offset_loaded, u64 *offset, u64 *stat)
590{
591 u32 new_data;
592
593 new_data = rd32(hw, reg);
594 if (!offset_loaded)
595 *offset = new_data;
596 if (likely(new_data >= *offset))
597 *stat = (u32)(new_data - *offset);
598 else
599 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
600}
601
602
603
604
605
606void i40e_update_eth_stats(struct i40e_vsi *vsi)
607{
608 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
609 struct i40e_pf *pf = vsi->back;
610 struct i40e_hw *hw = &pf->hw;
611 struct i40e_eth_stats *oes;
612 struct i40e_eth_stats *es;
613
614 es = &vsi->eth_stats;
615 oes = &vsi->eth_stats_offsets;
616
617
618 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
619 vsi->stat_offsets_loaded,
620 &oes->tx_errors, &es->tx_errors);
621 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
622 vsi->stat_offsets_loaded,
623 &oes->rx_discards, &es->rx_discards);
624 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
625 vsi->stat_offsets_loaded,
626 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
627 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
628 vsi->stat_offsets_loaded,
629 &oes->tx_errors, &es->tx_errors);
630
631 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
632 I40E_GLV_GORCL(stat_idx),
633 vsi->stat_offsets_loaded,
634 &oes->rx_bytes, &es->rx_bytes);
635 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
636 I40E_GLV_UPRCL(stat_idx),
637 vsi->stat_offsets_loaded,
638 &oes->rx_unicast, &es->rx_unicast);
639 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
640 I40E_GLV_MPRCL(stat_idx),
641 vsi->stat_offsets_loaded,
642 &oes->rx_multicast, &es->rx_multicast);
643 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
644 I40E_GLV_BPRCL(stat_idx),
645 vsi->stat_offsets_loaded,
646 &oes->rx_broadcast, &es->rx_broadcast);
647
648 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
649 I40E_GLV_GOTCL(stat_idx),
650 vsi->stat_offsets_loaded,
651 &oes->tx_bytes, &es->tx_bytes);
652 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
653 I40E_GLV_UPTCL(stat_idx),
654 vsi->stat_offsets_loaded,
655 &oes->tx_unicast, &es->tx_unicast);
656 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
657 I40E_GLV_MPTCL(stat_idx),
658 vsi->stat_offsets_loaded,
659 &oes->tx_multicast, &es->tx_multicast);
660 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
661 I40E_GLV_BPTCL(stat_idx),
662 vsi->stat_offsets_loaded,
663 &oes->tx_broadcast, &es->tx_broadcast);
664 vsi->stat_offsets_loaded = true;
665}
666
667
668
669
670
671static void i40e_update_veb_stats(struct i40e_veb *veb)
672{
673 struct i40e_pf *pf = veb->pf;
674 struct i40e_hw *hw = &pf->hw;
675 struct i40e_eth_stats *oes;
676 struct i40e_eth_stats *es;
677 struct i40e_veb_tc_stats *veb_oes;
678 struct i40e_veb_tc_stats *veb_es;
679 int i, idx = 0;
680
681 idx = veb->stats_idx;
682 es = &veb->stats;
683 oes = &veb->stats_offsets;
684 veb_es = &veb->tc_stats;
685 veb_oes = &veb->tc_stats_offsets;
686
687
688 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
689 veb->stat_offsets_loaded,
690 &oes->tx_discards, &es->tx_discards);
691 if (hw->revision_id > 0)
692 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_unknown_protocol,
695 &es->rx_unknown_protocol);
696 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
697 veb->stat_offsets_loaded,
698 &oes->rx_bytes, &es->rx_bytes);
699 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_unicast, &es->rx_unicast);
702 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->rx_multicast, &es->rx_multicast);
705 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->rx_broadcast, &es->rx_broadcast);
708
709 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
710 veb->stat_offsets_loaded,
711 &oes->tx_bytes, &es->tx_bytes);
712 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->tx_unicast, &es->tx_unicast);
715 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
716 veb->stat_offsets_loaded,
717 &oes->tx_multicast, &es->tx_multicast);
718 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->tx_broadcast, &es->tx_broadcast);
721 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
722 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
723 I40E_GLVEBTC_RPCL(i, idx),
724 veb->stat_offsets_loaded,
725 &veb_oes->tc_rx_packets[i],
726 &veb_es->tc_rx_packets[i]);
727 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
728 I40E_GLVEBTC_RBCL(i, idx),
729 veb->stat_offsets_loaded,
730 &veb_oes->tc_rx_bytes[i],
731 &veb_es->tc_rx_bytes[i]);
732 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
733 I40E_GLVEBTC_TPCL(i, idx),
734 veb->stat_offsets_loaded,
735 &veb_oes->tc_tx_packets[i],
736 &veb_es->tc_tx_packets[i]);
737 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
738 I40E_GLVEBTC_TBCL(i, idx),
739 veb->stat_offsets_loaded,
740 &veb_oes->tc_tx_bytes[i],
741 &veb_es->tc_tx_bytes[i]);
742 }
743 veb->stat_offsets_loaded = true;
744}
745
746
747
748
749
750
751
752
753
754
755
756static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
757{
758 struct i40e_pf *pf = vsi->back;
759 struct rtnl_link_stats64 *ons;
760 struct rtnl_link_stats64 *ns;
761 struct i40e_eth_stats *oes;
762 struct i40e_eth_stats *es;
763 u32 tx_restart, tx_busy;
764 struct i40e_ring *p;
765 u32 rx_page, rx_buf;
766 u64 bytes, packets;
767 unsigned int start;
768 u64 tx_linearize;
769 u64 tx_force_wb;
770 u64 rx_p, rx_b;
771 u64 tx_p, tx_b;
772 u16 q;
773
774 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
775 test_bit(__I40E_CONFIG_BUSY, pf->state))
776 return;
777
778 ns = i40e_get_vsi_stats_struct(vsi);
779 ons = &vsi->net_stats_offsets;
780 es = &vsi->eth_stats;
781 oes = &vsi->eth_stats_offsets;
782
783
784
785
786 rx_b = rx_p = 0;
787 tx_b = tx_p = 0;
788 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
789 rx_page = 0;
790 rx_buf = 0;
791 rcu_read_lock();
792 for (q = 0; q < vsi->num_queue_pairs; q++) {
793
794 p = ACCESS_ONCE(vsi->tx_rings[q]);
795
796 do {
797 start = u64_stats_fetch_begin_irq(&p->syncp);
798 packets = p->stats.packets;
799 bytes = p->stats.bytes;
800 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
801 tx_b += bytes;
802 tx_p += packets;
803 tx_restart += p->tx_stats.restart_queue;
804 tx_busy += p->tx_stats.tx_busy;
805 tx_linearize += p->tx_stats.tx_linearize;
806 tx_force_wb += p->tx_stats.tx_force_wb;
807
808
809 p = &p[1];
810 do {
811 start = u64_stats_fetch_begin_irq(&p->syncp);
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 rx_b += bytes;
816 rx_p += packets;
817 rx_buf += p->rx_stats.alloc_buff_failed;
818 rx_page += p->rx_stats.alloc_page_failed;
819 }
820 rcu_read_unlock();
821 vsi->tx_restart = tx_restart;
822 vsi->tx_busy = tx_busy;
823 vsi->tx_linearize = tx_linearize;
824 vsi->tx_force_wb = tx_force_wb;
825 vsi->rx_page_failed = rx_page;
826 vsi->rx_buf_failed = rx_buf;
827
828 ns->rx_packets = rx_p;
829 ns->rx_bytes = rx_b;
830 ns->tx_packets = tx_p;
831 ns->tx_bytes = tx_b;
832
833
834 i40e_update_eth_stats(vsi);
835 ons->tx_errors = oes->tx_errors;
836 ns->tx_errors = es->tx_errors;
837 ons->multicast = oes->rx_multicast;
838 ns->multicast = es->rx_multicast;
839 ons->rx_dropped = oes->rx_discards;
840 ns->rx_dropped = es->rx_discards;
841 ons->tx_dropped = oes->tx_discards;
842 ns->tx_dropped = es->tx_discards;
843
844
845 if (vsi == pf->vsi[pf->lan_vsi]) {
846 ns->rx_crc_errors = pf->stats.crc_errors;
847 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
848 ns->rx_length_errors = pf->stats.rx_length_errors;
849 }
850}
851
852
853
854
855
856static void i40e_update_pf_stats(struct i40e_pf *pf)
857{
858 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
859 struct i40e_hw_port_stats *nsd = &pf->stats;
860 struct i40e_hw *hw = &pf->hw;
861 u32 val;
862 int i;
863
864 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
865 I40E_GLPRT_GORCL(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
868 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
869 I40E_GLPRT_GOTCL(hw->port),
870 pf->stat_offsets_loaded,
871 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
872 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.rx_discards,
875 &nsd->eth.rx_discards);
876 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
877 I40E_GLPRT_UPRCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.rx_unicast,
880 &nsd->eth.rx_unicast);
881 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
882 I40E_GLPRT_MPRCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.rx_multicast,
885 &nsd->eth.rx_multicast);
886 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
887 I40E_GLPRT_BPRCL(hw->port),
888 pf->stat_offsets_loaded,
889 &osd->eth.rx_broadcast,
890 &nsd->eth.rx_broadcast);
891 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
892 I40E_GLPRT_UPTCL(hw->port),
893 pf->stat_offsets_loaded,
894 &osd->eth.tx_unicast,
895 &nsd->eth.tx_unicast);
896 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
897 I40E_GLPRT_MPTCL(hw->port),
898 pf->stat_offsets_loaded,
899 &osd->eth.tx_multicast,
900 &nsd->eth.tx_multicast);
901 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
902 I40E_GLPRT_BPTCL(hw->port),
903 pf->stat_offsets_loaded,
904 &osd->eth.tx_broadcast,
905 &nsd->eth.tx_broadcast);
906
907 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
908 pf->stat_offsets_loaded,
909 &osd->tx_dropped_link_down,
910 &nsd->tx_dropped_link_down);
911
912 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->crc_errors, &nsd->crc_errors);
915
916 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->illegal_bytes, &nsd->illegal_bytes);
919
920 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->mac_local_faults,
923 &nsd->mac_local_faults);
924 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->mac_remote_faults,
927 &nsd->mac_remote_faults);
928
929 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->rx_length_errors,
932 &nsd->rx_length_errors);
933
934 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->link_xon_rx, &nsd->link_xon_rx);
937 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->link_xon_tx, &nsd->link_xon_tx);
940 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->link_xoff_rx, &nsd->link_xoff_rx);
943 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->link_xoff_tx, &nsd->link_xoff_tx);
946
947 for (i = 0; i < 8; i++) {
948 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
949 pf->stat_offsets_loaded,
950 &osd->priority_xoff_rx[i],
951 &nsd->priority_xoff_rx[i]);
952 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
953 pf->stat_offsets_loaded,
954 &osd->priority_xon_rx[i],
955 &nsd->priority_xon_rx[i]);
956 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
957 pf->stat_offsets_loaded,
958 &osd->priority_xon_tx[i],
959 &nsd->priority_xon_tx[i]);
960 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
961 pf->stat_offsets_loaded,
962 &osd->priority_xoff_tx[i],
963 &nsd->priority_xoff_tx[i]);
964 i40e_stat_update32(hw,
965 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
966 pf->stat_offsets_loaded,
967 &osd->priority_xon_2_xoff[i],
968 &nsd->priority_xon_2_xoff[i]);
969 }
970
971 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
972 I40E_GLPRT_PRC64L(hw->port),
973 pf->stat_offsets_loaded,
974 &osd->rx_size_64, &nsd->rx_size_64);
975 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
976 I40E_GLPRT_PRC127L(hw->port),
977 pf->stat_offsets_loaded,
978 &osd->rx_size_127, &nsd->rx_size_127);
979 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
980 I40E_GLPRT_PRC255L(hw->port),
981 pf->stat_offsets_loaded,
982 &osd->rx_size_255, &nsd->rx_size_255);
983 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
984 I40E_GLPRT_PRC511L(hw->port),
985 pf->stat_offsets_loaded,
986 &osd->rx_size_511, &nsd->rx_size_511);
987 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
988 I40E_GLPRT_PRC1023L(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->rx_size_1023, &nsd->rx_size_1023);
991 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
992 I40E_GLPRT_PRC1522L(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->rx_size_1522, &nsd->rx_size_1522);
995 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
996 I40E_GLPRT_PRC9522L(hw->port),
997 pf->stat_offsets_loaded,
998 &osd->rx_size_big, &nsd->rx_size_big);
999
1000 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1001 I40E_GLPRT_PTC64L(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->tx_size_64, &nsd->tx_size_64);
1004 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1005 I40E_GLPRT_PTC127L(hw->port),
1006 pf->stat_offsets_loaded,
1007 &osd->tx_size_127, &nsd->tx_size_127);
1008 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1009 I40E_GLPRT_PTC255L(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->tx_size_255, &nsd->tx_size_255);
1012 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1013 I40E_GLPRT_PTC511L(hw->port),
1014 pf->stat_offsets_loaded,
1015 &osd->tx_size_511, &nsd->tx_size_511);
1016 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1017 I40E_GLPRT_PTC1023L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->tx_size_1023, &nsd->tx_size_1023);
1020 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1021 I40E_GLPRT_PTC1522L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->tx_size_1522, &nsd->tx_size_1522);
1024 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1025 I40E_GLPRT_PTC9522L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->tx_size_big, &nsd->tx_size_big);
1028
1029 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->rx_undersize, &nsd->rx_undersize);
1032 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->rx_fragments, &nsd->rx_fragments);
1035 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_oversize, &nsd->rx_oversize);
1038 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_jabber, &nsd->rx_jabber);
1041
1042
1043 i40e_stat_update32(hw,
1044 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1045 pf->stat_offsets_loaded,
1046 &osd->fd_atr_match, &nsd->fd_atr_match);
1047 i40e_stat_update32(hw,
1048 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1049 pf->stat_offsets_loaded,
1050 &osd->fd_sb_match, &nsd->fd_sb_match);
1051 i40e_stat_update32(hw,
1052 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1053 pf->stat_offsets_loaded,
1054 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1055
1056 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1057 nsd->tx_lpi_status =
1058 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1059 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1060 nsd->rx_lpi_status =
1061 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1062 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1063 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1064 pf->stat_offsets_loaded,
1065 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1066 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1067 pf->stat_offsets_loaded,
1068 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1069
1070 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1071 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
1072 nsd->fd_sb_status = true;
1073 else
1074 nsd->fd_sb_status = false;
1075
1076 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1077 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
1078 nsd->fd_atr_status = true;
1079 else
1080 nsd->fd_atr_status = false;
1081
1082 pf->stat_offsets_loaded = true;
1083}
1084
1085
1086
1087
1088
1089
1090
1091void i40e_update_stats(struct i40e_vsi *vsi)
1092{
1093 struct i40e_pf *pf = vsi->back;
1094
1095 if (vsi == pf->vsi[pf->lan_vsi])
1096 i40e_update_pf_stats(pf);
1097
1098 i40e_update_vsi_stats(vsi);
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1110 const u8 *macaddr, s16 vlan)
1111{
1112 struct i40e_mac_filter *f;
1113 u64 key;
1114
1115 if (!vsi || !macaddr)
1116 return NULL;
1117
1118 key = i40e_addr_to_hkey(macaddr);
1119 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1120 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1121 (vlan == f->vlan))
1122 return f;
1123 }
1124 return NULL;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1136{
1137 struct i40e_mac_filter *f;
1138 u64 key;
1139
1140 if (!vsi || !macaddr)
1141 return NULL;
1142
1143 key = i40e_addr_to_hkey(macaddr);
1144 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1145 if ((ether_addr_equal(macaddr, f->macaddr)))
1146 return f;
1147 }
1148 return NULL;
1149}
1150
1151
1152
1153
1154
1155
1156
1157bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1158{
1159
1160 if (vsi->info.pvid)
1161 return true;
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 return vsi->has_vlan_filter;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1216 struct hlist_head *tmp_add_list,
1217 struct hlist_head *tmp_del_list,
1218 int vlan_filters)
1219{
1220 s16 pvid = le16_to_cpu(vsi->info.pvid);
1221 struct i40e_mac_filter *f, *add_head;
1222 struct i40e_new_mac_filter *new;
1223 struct hlist_node *h;
1224 int bkt, new_vlan;
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 hlist_for_each_entry(new, tmp_add_list, hlist) {
1242 if (pvid && new->f->vlan != pvid)
1243 new->f->vlan = pvid;
1244 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1245 new->f->vlan = 0;
1246 else if (!vlan_filters && new->f->vlan == 0)
1247 new->f->vlan = I40E_VLAN_ANY;
1248 }
1249
1250
1251 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1252
1253
1254
1255
1256
1257 if ((pvid && f->vlan != pvid) ||
1258 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1259 (!vlan_filters && f->vlan == 0)) {
1260
1261 if (pvid)
1262 new_vlan = pvid;
1263 else if (vlan_filters)
1264 new_vlan = 0;
1265 else
1266 new_vlan = I40E_VLAN_ANY;
1267
1268
1269 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1270 if (!add_head)
1271 return -ENOMEM;
1272
1273
1274 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1275 if (!new)
1276 return -ENOMEM;
1277
1278 new->f = add_head;
1279 new->state = add_head->state;
1280
1281
1282 hlist_add_head(&new->hlist, tmp_add_list);
1283
1284
1285 f->state = I40E_FILTER_REMOVE;
1286 hash_del(&f->hlist);
1287 hlist_add_head(&f->hlist, tmp_del_list);
1288 }
1289 }
1290
1291 vsi->has_vlan_filter = !!vlan_filters;
1292
1293 return 0;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1305{
1306 struct i40e_aqc_remove_macvlan_element_data element;
1307 struct i40e_pf *pf = vsi->back;
1308
1309
1310 if (vsi->type != I40E_VSI_MAIN)
1311 return;
1312
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316
1317 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1318 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1319
1320 memset(&element, 0, sizeof(element));
1321 ether_addr_copy(element.mac_addr, macaddr);
1322 element.vlan_tag = 0;
1323
1324 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1325 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1326 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1341 const u8 *macaddr, s16 vlan)
1342{
1343 struct i40e_mac_filter *f;
1344 u64 key;
1345
1346 if (!vsi || !macaddr)
1347 return NULL;
1348
1349 f = i40e_find_filter(vsi, macaddr, vlan);
1350 if (!f) {
1351 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1352 if (!f)
1353 return NULL;
1354
1355
1356
1357
1358 if (vlan >= 0)
1359 vsi->has_vlan_filter = true;
1360
1361 ether_addr_copy(f->macaddr, macaddr);
1362 f->vlan = vlan;
1363
1364
1365
1366
1367 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
1368 f->state = I40E_FILTER_FAILED;
1369 else
1370 f->state = I40E_FILTER_NEW;
1371 INIT_HLIST_NODE(&f->hlist);
1372
1373 key = i40e_addr_to_hkey(macaddr);
1374 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1375
1376 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1377 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 if (f->state == I40E_FILTER_REMOVE)
1389 f->state = I40E_FILTER_ACTIVE;
1390
1391 return f;
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1410{
1411 if (!f)
1412 return;
1413
1414
1415
1416
1417
1418 if ((f->state == I40E_FILTER_FAILED) ||
1419 (f->state == I40E_FILTER_NEW)) {
1420 hash_del(&f->hlist);
1421 kfree(f);
1422 } else {
1423 f->state = I40E_FILTER_REMOVE;
1424 }
1425
1426 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1427 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1443{
1444 struct i40e_mac_filter *f;
1445
1446 if (!vsi || !macaddr)
1447 return;
1448
1449 f = i40e_find_filter(vsi, macaddr, vlan);
1450 __i40e_del_filter(vsi, f);
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1466 const u8 *macaddr)
1467{
1468 struct i40e_mac_filter *f, *add = NULL;
1469 struct hlist_node *h;
1470 int bkt;
1471
1472 if (vsi->info.pvid)
1473 return i40e_add_filter(vsi, macaddr,
1474 le16_to_cpu(vsi->info.pvid));
1475
1476 if (!i40e_is_vsi_in_vlan(vsi))
1477 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1478
1479 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1480 if (f->state == I40E_FILTER_REMOVE)
1481 continue;
1482 add = i40e_add_filter(vsi, macaddr, f->vlan);
1483 if (!add)
1484 return NULL;
1485 }
1486
1487 return add;
1488}
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1501{
1502 struct i40e_mac_filter *f;
1503 struct hlist_node *h;
1504 bool found = false;
1505 int bkt;
1506
1507 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1508 "Missing mac_filter_hash_lock\n");
1509 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1510 if (ether_addr_equal(macaddr, f->macaddr)) {
1511 __i40e_del_filter(vsi, f);
1512 found = true;
1513 }
1514 }
1515
1516 if (found)
1517 return 0;
1518 else
1519 return -ENOENT;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529static int i40e_set_mac(struct net_device *netdev, void *p)
1530{
1531 struct i40e_netdev_priv *np = netdev_priv(netdev);
1532 struct i40e_vsi *vsi = np->vsi;
1533 struct i40e_pf *pf = vsi->back;
1534 struct i40e_hw *hw = &pf->hw;
1535 struct sockaddr *addr = p;
1536
1537 if (!is_valid_ether_addr(addr->sa_data))
1538 return -EADDRNOTAVAIL;
1539
1540 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1541 netdev_info(netdev, "already using mac address %pM\n",
1542 addr->sa_data);
1543 return 0;
1544 }
1545
1546 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1547 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1548 return -EADDRNOTAVAIL;
1549
1550 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1551 netdev_info(netdev, "returning to hw mac address %pM\n",
1552 hw->mac.addr);
1553 else
1554 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1555
1556 spin_lock_bh(&vsi->mac_filter_hash_lock);
1557 i40e_del_mac_filter(vsi, netdev->dev_addr);
1558 i40e_add_mac_filter(vsi, addr->sa_data);
1559 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1560 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1561 if (vsi->type == I40E_VSI_MAIN) {
1562 i40e_status ret;
1563
1564 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1565 I40E_AQC_WRITE_TYPE_LAA_WOL,
1566 addr->sa_data, NULL);
1567 if (ret)
1568 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1569 i40e_stat_str(hw, ret),
1570 i40e_aq_str(hw, hw->aq.asq_last_status));
1571 }
1572
1573
1574
1575
1576 i40e_service_event_schedule(vsi->back);
1577 return 0;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1590 struct i40e_vsi_context *ctxt,
1591 u8 enabled_tc,
1592 bool is_add)
1593{
1594 struct i40e_pf *pf = vsi->back;
1595 u16 sections = 0;
1596 u8 netdev_tc = 0;
1597 u16 numtc = 0;
1598 u16 qcount;
1599 u8 offset;
1600 u16 qmap;
1601 int i;
1602 u16 num_tc_qps = 0;
1603
1604 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1605 offset = 0;
1606
1607 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1608
1609 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1610 if (enabled_tc & BIT(i))
1611 numtc++;
1612 }
1613 if (!numtc) {
1614 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1615 numtc = 1;
1616 }
1617 } else {
1618
1619 numtc = 1;
1620 }
1621
1622 vsi->tc_config.numtc = numtc;
1623 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1624
1625 qcount = vsi->alloc_queue_pairs;
1626
1627 num_tc_qps = qcount / numtc;
1628 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1629
1630
1631 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1632
1633 if (vsi->tc_config.enabled_tc & BIT(i)) {
1634
1635 int pow, num_qps;
1636
1637 switch (vsi->type) {
1638 case I40E_VSI_MAIN:
1639 qcount = min_t(int, pf->alloc_rss_size,
1640 num_tc_qps);
1641 break;
1642 case I40E_VSI_FDIR:
1643 case I40E_VSI_SRIOV:
1644 case I40E_VSI_VMDQ2:
1645 default:
1646 qcount = num_tc_qps;
1647 WARN_ON(i != 0);
1648 break;
1649 }
1650 vsi->tc_config.tc_info[i].qoffset = offset;
1651 vsi->tc_config.tc_info[i].qcount = qcount;
1652
1653
1654 num_qps = qcount;
1655 pow = 0;
1656 while (num_qps && (BIT_ULL(pow) < qcount)) {
1657 pow++;
1658 num_qps >>= 1;
1659 }
1660
1661 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1662 qmap =
1663 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1664 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1665
1666 offset += qcount;
1667 } else {
1668
1669
1670
1671
1672 vsi->tc_config.tc_info[i].qoffset = 0;
1673 vsi->tc_config.tc_info[i].qcount = 1;
1674 vsi->tc_config.tc_info[i].netdev_tc = 0;
1675
1676 qmap = 0;
1677 }
1678 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1679 }
1680
1681
1682 vsi->num_queue_pairs = offset;
1683 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1684 if (vsi->req_queue_pairs > 0)
1685 vsi->num_queue_pairs = vsi->req_queue_pairs;
1686 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1687 vsi->num_queue_pairs = pf->num_lan_msix;
1688 }
1689
1690
1691 if (is_add) {
1692 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1693
1694 ctxt->info.up_enable_bits = enabled_tc;
1695 }
1696 if (vsi->type == I40E_VSI_SRIOV) {
1697 ctxt->info.mapping_flags |=
1698 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1699 for (i = 0; i < vsi->num_queue_pairs; i++)
1700 ctxt->info.queue_mapping[i] =
1701 cpu_to_le16(vsi->base_queue + i);
1702 } else {
1703 ctxt->info.mapping_flags |=
1704 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1705 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1706 }
1707 ctxt->info.valid_sections |= cpu_to_le16(sections);
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1719{
1720 struct i40e_netdev_priv *np = netdev_priv(netdev);
1721 struct i40e_vsi *vsi = np->vsi;
1722
1723 if (i40e_add_mac_filter(vsi, addr))
1724 return 0;
1725 else
1726 return -ENOMEM;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1738{
1739 struct i40e_netdev_priv *np = netdev_priv(netdev);
1740 struct i40e_vsi *vsi = np->vsi;
1741
1742 i40e_del_mac_filter(vsi, addr);
1743
1744 return 0;
1745}
1746
1747
1748
1749
1750
1751static void i40e_set_rx_mode(struct net_device *netdev)
1752{
1753 struct i40e_netdev_priv *np = netdev_priv(netdev);
1754 struct i40e_vsi *vsi = np->vsi;
1755
1756 spin_lock_bh(&vsi->mac_filter_hash_lock);
1757
1758 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1759 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1760
1761 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1762
1763
1764 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1765 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1766 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1767 }
1768
1769
1770
1771
1772 i40e_service_event_schedule(vsi->back);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1784 struct hlist_head *from)
1785{
1786 struct i40e_mac_filter *f;
1787 struct hlist_node *h;
1788
1789 hlist_for_each_entry_safe(f, h, from, hlist) {
1790 u64 key = i40e_addr_to_hkey(f->macaddr);
1791
1792
1793 hlist_del(&f->hlist);
1794 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1795 }
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1807 struct hlist_head *from)
1808{
1809 struct i40e_new_mac_filter *new;
1810 struct hlist_node *h;
1811
1812 hlist_for_each_entry_safe(new, h, from, hlist) {
1813
1814 hlist_del(&new->hlist);
1815 kfree(new);
1816 }
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827static
1828struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
1829{
1830 hlist_for_each_entry_continue(next, hlist) {
1831 if (!is_broadcast_ether_addr(next->f->macaddr))
1832 return next;
1833 }
1834
1835 return NULL;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848static int
1849i40e_update_filter_state(int count,
1850 struct i40e_aqc_add_macvlan_element_data *add_list,
1851 struct i40e_new_mac_filter *add_head)
1852{
1853 int retval = 0;
1854 int i;
1855
1856 for (i = 0; i < count; i++) {
1857
1858
1859
1860
1861
1862
1863 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
1864 add_head->state = I40E_FILTER_FAILED;
1865 } else {
1866 add_head->state = I40E_FILTER_ACTIVE;
1867 retval++;
1868 }
1869
1870 add_head = i40e_next_filter(add_head);
1871 if (!add_head)
1872 break;
1873 }
1874
1875 return retval;
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891static
1892void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
1893 struct i40e_aqc_remove_macvlan_element_data *list,
1894 int num_del, int *retval)
1895{
1896 struct i40e_hw *hw = &vsi->back->hw;
1897 i40e_status aq_ret;
1898 int aq_err;
1899
1900 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
1901 aq_err = hw->aq.asq_last_status;
1902
1903
1904 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1905 *retval = -EIO;
1906 dev_info(&vsi->back->pdev->dev,
1907 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1908 vsi_name, i40e_stat_str(hw, aq_ret),
1909 i40e_aq_str(hw, aq_err));
1910 }
1911}
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926static
1927void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
1928 struct i40e_aqc_add_macvlan_element_data *list,
1929 struct i40e_new_mac_filter *add_head,
1930 int num_add, bool *promisc_changed)
1931{
1932 struct i40e_hw *hw = &vsi->back->hw;
1933 int aq_err, fcnt;
1934
1935 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
1936 aq_err = hw->aq.asq_last_status;
1937 fcnt = i40e_update_filter_state(num_add, list, add_head);
1938
1939 if (fcnt != num_add) {
1940 *promisc_changed = true;
1941 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
1942 dev_warn(&vsi->back->pdev->dev,
1943 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1944 i40e_aq_str(hw, aq_err),
1945 vsi_name);
1946 }
1947}
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960static i40e_status
1961i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
1962 struct i40e_mac_filter *f)
1963{
1964 bool enable = f->state == I40E_FILTER_NEW;
1965 struct i40e_hw *hw = &vsi->back->hw;
1966 i40e_status aq_ret;
1967
1968 if (f->vlan == I40E_VLAN_ANY) {
1969 aq_ret = i40e_aq_set_vsi_broadcast(hw,
1970 vsi->seid,
1971 enable,
1972 NULL);
1973 } else {
1974 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
1975 vsi->seid,
1976 enable,
1977 f->vlan,
1978 NULL);
1979 }
1980
1981 if (aq_ret)
1982 dev_warn(&vsi->back->pdev->dev,
1983 "Error %s setting broadcast promiscuous mode on %s\n",
1984 i40e_aq_str(hw, hw->aq.asq_last_status),
1985 vsi_name);
1986
1987 return aq_ret;
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1999{
2000 struct hlist_head tmp_add_list, tmp_del_list;
2001 struct i40e_mac_filter *f;
2002 struct i40e_new_mac_filter *new, *add_head = NULL;
2003 struct i40e_hw *hw = &vsi->back->hw;
2004 unsigned int failed_filters = 0;
2005 unsigned int vlan_filters = 0;
2006 bool promisc_changed = false;
2007 char vsi_name[16] = "PF";
2008 int filter_list_len = 0;
2009 i40e_status aq_ret = 0;
2010 u32 changed_flags = 0;
2011 struct hlist_node *h;
2012 struct i40e_pf *pf;
2013 int num_add = 0;
2014 int num_del = 0;
2015 int retval = 0;
2016 u16 cmd_flags;
2017 int list_size;
2018 int bkt;
2019
2020
2021 struct i40e_aqc_add_macvlan_element_data *add_list;
2022 struct i40e_aqc_remove_macvlan_element_data *del_list;
2023
2024 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2025 usleep_range(1000, 2000);
2026 pf = vsi->back;
2027
2028 if (vsi->netdev) {
2029 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2030 vsi->current_netdev_flags = vsi->netdev->flags;
2031 }
2032
2033 INIT_HLIST_HEAD(&tmp_add_list);
2034 INIT_HLIST_HEAD(&tmp_del_list);
2035
2036 if (vsi->type == I40E_VSI_SRIOV)
2037 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2038 else if (vsi->type != I40E_VSI_MAIN)
2039 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2040
2041 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2042 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2043
2044 spin_lock_bh(&vsi->mac_filter_hash_lock);
2045
2046 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2047 if (f->state == I40E_FILTER_REMOVE) {
2048
2049 hash_del(&f->hlist);
2050 hlist_add_head(&f->hlist, &tmp_del_list);
2051
2052
2053 continue;
2054 }
2055 if (f->state == I40E_FILTER_NEW) {
2056
2057 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2058 if (!new)
2059 goto err_no_memory_locked;
2060
2061
2062 new->f = f;
2063 new->state = f->state;
2064
2065
2066 hlist_add_head(&new->hlist, &tmp_add_list);
2067 }
2068
2069
2070
2071
2072
2073 if (f->vlan > 0)
2074 vlan_filters++;
2075 }
2076
2077 retval = i40e_correct_mac_vlan_filters(vsi,
2078 &tmp_add_list,
2079 &tmp_del_list,
2080 vlan_filters);
2081 if (retval)
2082 goto err_no_memory_locked;
2083
2084 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2085 }
2086
2087
2088 if (!hlist_empty(&tmp_del_list)) {
2089 filter_list_len = hw->aq.asq_buf_size /
2090 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2091 list_size = filter_list_len *
2092 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2093 del_list = kzalloc(list_size, GFP_ATOMIC);
2094 if (!del_list)
2095 goto err_no_memory;
2096
2097 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2098 cmd_flags = 0;
2099
2100
2101
2102
2103 if (is_broadcast_ether_addr(f->macaddr)) {
2104 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2105
2106 hlist_del(&f->hlist);
2107 kfree(f);
2108 continue;
2109 }
2110
2111
2112 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2113 if (f->vlan == I40E_VLAN_ANY) {
2114 del_list[num_del].vlan_tag = 0;
2115 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2116 } else {
2117 del_list[num_del].vlan_tag =
2118 cpu_to_le16((u16)(f->vlan));
2119 }
2120
2121 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2122 del_list[num_del].flags = cmd_flags;
2123 num_del++;
2124
2125
2126 if (num_del == filter_list_len) {
2127 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2128 num_del, &retval);
2129 memset(del_list, 0, list_size);
2130 num_del = 0;
2131 }
2132
2133
2134
2135 hlist_del(&f->hlist);
2136 kfree(f);
2137 }
2138
2139 if (num_del) {
2140 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2141 num_del, &retval);
2142 }
2143
2144 kfree(del_list);
2145 del_list = NULL;
2146 }
2147
2148 if (!hlist_empty(&tmp_add_list)) {
2149
2150 filter_list_len = hw->aq.asq_buf_size /
2151 sizeof(struct i40e_aqc_add_macvlan_element_data);
2152 list_size = filter_list_len *
2153 sizeof(struct i40e_aqc_add_macvlan_element_data);
2154 add_list = kzalloc(list_size, GFP_ATOMIC);
2155 if (!add_list)
2156 goto err_no_memory;
2157
2158 num_add = 0;
2159 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2160 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2161 vsi->state)) {
2162 new->state = I40E_FILTER_FAILED;
2163 continue;
2164 }
2165
2166
2167
2168
2169 if (is_broadcast_ether_addr(new->f->macaddr)) {
2170 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2171 new->f))
2172 new->state = I40E_FILTER_FAILED;
2173 else
2174 new->state = I40E_FILTER_ACTIVE;
2175 continue;
2176 }
2177
2178
2179 if (num_add == 0)
2180 add_head = new;
2181 cmd_flags = 0;
2182 ether_addr_copy(add_list[num_add].mac_addr,
2183 new->f->macaddr);
2184 if (new->f->vlan == I40E_VLAN_ANY) {
2185 add_list[num_add].vlan_tag = 0;
2186 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2187 } else {
2188 add_list[num_add].vlan_tag =
2189 cpu_to_le16((u16)(new->f->vlan));
2190 }
2191 add_list[num_add].queue_number = 0;
2192
2193 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2194 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2195 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2196 num_add++;
2197
2198
2199 if (num_add == filter_list_len) {
2200 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2201 add_head, num_add,
2202 &promisc_changed);
2203 memset(add_list, 0, list_size);
2204 num_add = 0;
2205 }
2206 }
2207 if (num_add) {
2208 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2209 num_add, &promisc_changed);
2210 }
2211
2212
2213
2214 spin_lock_bh(&vsi->mac_filter_hash_lock);
2215 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2216
2217 if (new->f->state == I40E_FILTER_NEW)
2218 new->f->state = new->state;
2219 hlist_del(&new->hlist);
2220 kfree(new);
2221 }
2222 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2223 kfree(add_list);
2224 add_list = NULL;
2225 }
2226
2227
2228 spin_lock_bh(&vsi->mac_filter_hash_lock);
2229 vsi->active_filters = 0;
2230 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2231 if (f->state == I40E_FILTER_ACTIVE)
2232 vsi->active_filters++;
2233 else if (f->state == I40E_FILTER_FAILED)
2234 failed_filters++;
2235 }
2236 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2237
2238
2239
2240
2241 if (promisc_changed)
2242 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2243
2244
2245
2246
2247
2248 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
2249 !promisc_changed && !failed_filters &&
2250 (vsi->active_filters < vsi->promisc_threshold)) {
2251 dev_info(&pf->pdev->dev,
2252 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2253 vsi_name);
2254 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2255 promisc_changed = true;
2256 vsi->promisc_threshold = 0;
2257 }
2258
2259
2260 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2261 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2262 goto out;
2263 }
2264
2265
2266 if (changed_flags & IFF_ALLMULTI) {
2267 bool cur_multipromisc;
2268
2269 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2270 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2271 vsi->seid,
2272 cur_multipromisc,
2273 NULL);
2274 if (aq_ret) {
2275 retval = i40e_aq_rc_to_posix(aq_ret,
2276 hw->aq.asq_last_status);
2277 dev_info(&pf->pdev->dev,
2278 "set multi promisc failed on %s, err %s aq_err %s\n",
2279 vsi_name,
2280 i40e_stat_str(hw, aq_ret),
2281 i40e_aq_str(hw, hw->aq.asq_last_status));
2282 }
2283 }
2284
2285 if ((changed_flags & IFF_PROMISC) || promisc_changed) {
2286 bool cur_promisc;
2287
2288 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2289 test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2290 vsi->state));
2291 if ((vsi->type == I40E_VSI_MAIN) &&
2292 (pf->lan_veb != I40E_NO_VEB) &&
2293 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2294
2295
2296
2297
2298
2299 if (pf->cur_promisc != cur_promisc) {
2300 pf->cur_promisc = cur_promisc;
2301 if (cur_promisc)
2302 aq_ret =
2303 i40e_aq_set_default_vsi(hw,
2304 vsi->seid,
2305 NULL);
2306 else
2307 aq_ret =
2308 i40e_aq_clear_default_vsi(hw,
2309 vsi->seid,
2310 NULL);
2311 if (aq_ret) {
2312 retval = i40e_aq_rc_to_posix(aq_ret,
2313 hw->aq.asq_last_status);
2314 dev_info(&pf->pdev->dev,
2315 "Set default VSI failed on %s, err %s, aq_err %s\n",
2316 vsi_name,
2317 i40e_stat_str(hw, aq_ret),
2318 i40e_aq_str(hw,
2319 hw->aq.asq_last_status));
2320 }
2321 }
2322 } else {
2323 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2324 hw,
2325 vsi->seid,
2326 cur_promisc, NULL,
2327 true);
2328 if (aq_ret) {
2329 retval =
2330 i40e_aq_rc_to_posix(aq_ret,
2331 hw->aq.asq_last_status);
2332 dev_info(&pf->pdev->dev,
2333 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2334 vsi_name,
2335 i40e_stat_str(hw, aq_ret),
2336 i40e_aq_str(hw,
2337 hw->aq.asq_last_status));
2338 }
2339 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2340 hw,
2341 vsi->seid,
2342 cur_promisc, NULL);
2343 if (aq_ret) {
2344 retval =
2345 i40e_aq_rc_to_posix(aq_ret,
2346 hw->aq.asq_last_status);
2347 dev_info(&pf->pdev->dev,
2348 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2349 vsi_name,
2350 i40e_stat_str(hw, aq_ret),
2351 i40e_aq_str(hw,
2352 hw->aq.asq_last_status));
2353 }
2354 }
2355 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2356 vsi->seid,
2357 cur_promisc, NULL);
2358 if (aq_ret) {
2359 retval = i40e_aq_rc_to_posix(aq_ret,
2360 pf->hw.aq.asq_last_status);
2361 dev_info(&pf->pdev->dev,
2362 "set brdcast promisc failed, err %s, aq_err %s\n",
2363 i40e_stat_str(hw, aq_ret),
2364 i40e_aq_str(hw,
2365 hw->aq.asq_last_status));
2366 }
2367 }
2368out:
2369
2370 if (retval)
2371 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2372
2373 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2374 return retval;
2375
2376err_no_memory:
2377
2378 spin_lock_bh(&vsi->mac_filter_hash_lock);
2379err_no_memory_locked:
2380 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2381 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2382 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2383
2384 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2385 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2386 return -ENOMEM;
2387}
2388
2389
2390
2391
2392
2393static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2394{
2395 int v;
2396
2397 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2398 return;
2399 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2400
2401 for (v = 0; v < pf->num_alloc_vsi; v++) {
2402 if (pf->vsi[v] &&
2403 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2404 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2405
2406 if (ret) {
2407
2408 pf->flags |= I40E_FLAG_FILTER_SYNC;
2409 break;
2410 }
2411 }
2412 }
2413}
2414
2415
2416
2417
2418
2419static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2420{
2421 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2422 return I40E_RXBUFFER_2048;
2423 else
2424 return I40E_RXBUFFER_3072;
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2435{
2436 struct i40e_netdev_priv *np = netdev_priv(netdev);
2437 struct i40e_vsi *vsi = np->vsi;
2438 struct i40e_pf *pf = vsi->back;
2439
2440 if (i40e_enabled_xdp_vsi(vsi)) {
2441 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2442
2443 if (frame_size > i40e_max_xdp_frame_size(vsi))
2444 return -EINVAL;
2445 }
2446
2447 netdev_info(netdev, "changing MTU from %d to %d\n",
2448 netdev->mtu, new_mtu);
2449 netdev->mtu = new_mtu;
2450 if (netif_running(netdev))
2451 i40e_vsi_reinit_locked(vsi);
2452 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2453 I40E_FLAG_CLIENT_L2_CHANGE);
2454 return 0;
2455}
2456
2457
2458
2459
2460
2461
2462
2463int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2464{
2465 struct i40e_netdev_priv *np = netdev_priv(netdev);
2466 struct i40e_pf *pf = np->vsi->back;
2467
2468 switch (cmd) {
2469 case SIOCGHWTSTAMP:
2470 return i40e_ptp_get_ts_config(pf, ifr);
2471 case SIOCSHWTSTAMP:
2472 return i40e_ptp_set_ts_config(pf, ifr);
2473 default:
2474 return -EOPNOTSUPP;
2475 }
2476}
2477
2478
2479
2480
2481
2482void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2483{
2484 struct i40e_vsi_context ctxt;
2485 i40e_status ret;
2486
2487 if ((vsi->info.valid_sections &
2488 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2489 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2490 return;
2491
2492 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2493 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2494 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2495
2496 ctxt.seid = vsi->seid;
2497 ctxt.info = vsi->info;
2498 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2499 if (ret) {
2500 dev_info(&vsi->back->pdev->dev,
2501 "update vlan stripping failed, err %s aq_err %s\n",
2502 i40e_stat_str(&vsi->back->hw, ret),
2503 i40e_aq_str(&vsi->back->hw,
2504 vsi->back->hw.aq.asq_last_status));
2505 }
2506}
2507
2508
2509
2510
2511
2512void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2513{
2514 struct i40e_vsi_context ctxt;
2515 i40e_status ret;
2516
2517 if ((vsi->info.valid_sections &
2518 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2519 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2520 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2521 return;
2522
2523 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2524 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2525 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2526
2527 ctxt.seid = vsi->seid;
2528 ctxt.info = vsi->info;
2529 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2530 if (ret) {
2531 dev_info(&vsi->back->pdev->dev,
2532 "update vlan stripping failed, err %s aq_err %s\n",
2533 i40e_stat_str(&vsi->back->hw, ret),
2534 i40e_aq_str(&vsi->back->hw,
2535 vsi->back->hw.aq.asq_last_status));
2536 }
2537}
2538
2539
2540
2541
2542
2543
2544static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2545{
2546 struct i40e_netdev_priv *np = netdev_priv(netdev);
2547 struct i40e_vsi *vsi = np->vsi;
2548
2549 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2550 i40e_vlan_stripping_enable(vsi);
2551 else
2552 i40e_vlan_stripping_disable(vsi);
2553}
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2569{
2570 struct i40e_mac_filter *f, *add_f;
2571 struct hlist_node *h;
2572 int bkt;
2573
2574 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2575 if (f->state == I40E_FILTER_REMOVE)
2576 continue;
2577 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2578 if (!add_f) {
2579 dev_info(&vsi->back->pdev->dev,
2580 "Could not add vlan filter %d for %pM\n",
2581 vid, f->macaddr);
2582 return -ENOMEM;
2583 }
2584 }
2585
2586 return 0;
2587}
2588
2589
2590
2591
2592
2593
2594int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2595{
2596 int err;
2597
2598 if (vsi->info.pvid)
2599 return -EINVAL;
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609 if (!vid)
2610 return 0;
2611
2612
2613 spin_lock_bh(&vsi->mac_filter_hash_lock);
2614 err = i40e_add_vlan_all_mac(vsi, vid);
2615 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2616 if (err)
2617 return err;
2618
2619
2620
2621
2622 i40e_service_event_schedule(vsi->back);
2623 return 0;
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2640{
2641 struct i40e_mac_filter *f;
2642 struct hlist_node *h;
2643 int bkt;
2644
2645 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2646 if (f->vlan == vid)
2647 __i40e_del_filter(vsi, f);
2648 }
2649}
2650
2651
2652
2653
2654
2655
2656void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2657{
2658 if (!vid || vsi->info.pvid)
2659 return;
2660
2661 spin_lock_bh(&vsi->mac_filter_hash_lock);
2662 i40e_rm_vlan_all_mac(vsi, vid);
2663 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2664
2665
2666
2667
2668 i40e_service_event_schedule(vsi->back);
2669}
2670
2671
2672
2673
2674
2675
2676
2677
2678static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2679 __always_unused __be16 proto, u16 vid)
2680{
2681 struct i40e_netdev_priv *np = netdev_priv(netdev);
2682 struct i40e_vsi *vsi = np->vsi;
2683 int ret = 0;
2684
2685 if (vid >= VLAN_N_VID)
2686 return -EINVAL;
2687
2688 ret = i40e_vsi_add_vlan(vsi, vid);
2689 if (!ret)
2690 set_bit(vid, vsi->active_vlans);
2691
2692 return ret;
2693}
2694
2695
2696
2697
2698
2699
2700
2701
2702static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2703 __always_unused __be16 proto, u16 vid)
2704{
2705 struct i40e_netdev_priv *np = netdev_priv(netdev);
2706 struct i40e_vsi *vsi = np->vsi;
2707
2708
2709
2710
2711
2712 i40e_vsi_kill_vlan(vsi, vid);
2713
2714 clear_bit(vid, vsi->active_vlans);
2715
2716 return 0;
2717}
2718
2719
2720
2721
2722
2723static void i40e_restore_vlan(struct i40e_vsi *vsi)
2724{
2725 u16 vid;
2726
2727 if (!vsi->netdev)
2728 return;
2729
2730 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2731
2732 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2733 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2734 vid);
2735}
2736
2737
2738
2739
2740
2741
2742int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2743{
2744 struct i40e_vsi_context ctxt;
2745 i40e_status ret;
2746
2747 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2748 vsi->info.pvid = cpu_to_le16(vid);
2749 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2750 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2751 I40E_AQ_VSI_PVLAN_EMOD_STR;
2752
2753 ctxt.seid = vsi->seid;
2754 ctxt.info = vsi->info;
2755 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2756 if (ret) {
2757 dev_info(&vsi->back->pdev->dev,
2758 "add pvid failed, err %s aq_err %s\n",
2759 i40e_stat_str(&vsi->back->hw, ret),
2760 i40e_aq_str(&vsi->back->hw,
2761 vsi->back->hw.aq.asq_last_status));
2762 return -ENOENT;
2763 }
2764
2765 return 0;
2766}
2767
2768
2769
2770
2771
2772
2773
2774void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2775{
2776 i40e_vlan_stripping_disable(vsi);
2777
2778 vsi->info.pvid = 0;
2779}
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2792{
2793 int i, err = 0;
2794
2795 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2796 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2797
2798 if (!i40e_enabled_xdp_vsi(vsi))
2799 return err;
2800
2801 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2802 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2803
2804 return err;
2805}
2806
2807
2808
2809
2810
2811
2812
2813static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2814{
2815 int i;
2816
2817 if (vsi->tx_rings) {
2818 for (i = 0; i < vsi->num_queue_pairs; i++)
2819 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2820 i40e_free_tx_resources(vsi->tx_rings[i]);
2821 }
2822
2823 if (vsi->xdp_rings) {
2824 for (i = 0; i < vsi->num_queue_pairs; i++)
2825 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2826 i40e_free_tx_resources(vsi->xdp_rings[i]);
2827 }
2828}
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2841{
2842 int i, err = 0;
2843
2844 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2845 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2846 return err;
2847}
2848
2849
2850
2851
2852
2853
2854
2855static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2856{
2857 int i;
2858
2859 if (!vsi->rx_rings)
2860 return;
2861
2862 for (i = 0; i < vsi->num_queue_pairs; i++)
2863 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2864 i40e_free_rx_resources(vsi->rx_rings[i]);
2865}
2866
2867
2868
2869
2870
2871
2872
2873
2874static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2875{
2876 struct i40e_vsi *vsi = ring->vsi;
2877
2878 if (!ring->q_vector || !ring->netdev)
2879 return;
2880
2881 if ((vsi->tc_config.numtc <= 1) &&
2882 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
2883 netif_set_xps_queue(ring->netdev,
2884 get_cpu_mask(ring->q_vector->v_idx),
2885 ring->queue_index);
2886 }
2887
2888
2889
2890
2891 i40e_service_event_schedule(vsi->back);
2892}
2893
2894
2895
2896
2897
2898
2899
2900static int i40e_configure_tx_ring(struct i40e_ring *ring)
2901{
2902 struct i40e_vsi *vsi = ring->vsi;
2903 u16 pf_q = vsi->base_queue + ring->queue_index;
2904 struct i40e_hw *hw = &vsi->back->hw;
2905 struct i40e_hmc_obj_txq tx_ctx;
2906 i40e_status err = 0;
2907 u32 qtx_ctl = 0;
2908
2909
2910 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2911 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2912 ring->atr_count = 0;
2913 } else {
2914 ring->atr_sample_rate = 0;
2915 }
2916
2917
2918 i40e_config_xps_tx_ring(ring);
2919
2920
2921 memset(&tx_ctx, 0, sizeof(tx_ctx));
2922
2923 tx_ctx.new_context = 1;
2924 tx_ctx.base = (ring->dma / 128);
2925 tx_ctx.qlen = ring->count;
2926 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2927 I40E_FLAG_FD_ATR_ENABLED));
2928 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2929
2930 if (vsi->type != I40E_VSI_FDIR)
2931 tx_ctx.head_wb_ena = 1;
2932 tx_ctx.head_wb_addr = ring->dma +
2933 (ring->count * sizeof(struct i40e_tx_desc));
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2946 tx_ctx.rdylist_act = 0;
2947
2948
2949 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2950 if (err) {
2951 dev_info(&vsi->back->pdev->dev,
2952 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2953 ring->queue_index, pf_q, err);
2954 return -ENOMEM;
2955 }
2956
2957
2958 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2959 if (err) {
2960 dev_info(&vsi->back->pdev->dev,
2961 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2962 ring->queue_index, pf_q, err);
2963 return -ENOMEM;
2964 }
2965
2966
2967 if (vsi->type == I40E_VSI_VMDQ2) {
2968 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2969 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2970 I40E_QTX_CTL_VFVM_INDX_MASK;
2971 } else {
2972 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2973 }
2974
2975 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2976 I40E_QTX_CTL_PF_INDX_MASK);
2977 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2978 i40e_flush(hw);
2979
2980
2981 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2982
2983 return 0;
2984}
2985
2986
2987
2988
2989
2990
2991
2992static int i40e_configure_rx_ring(struct i40e_ring *ring)
2993{
2994 struct i40e_vsi *vsi = ring->vsi;
2995 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2996 u16 pf_q = vsi->base_queue + ring->queue_index;
2997 struct i40e_hw *hw = &vsi->back->hw;
2998 struct i40e_hmc_obj_rxq rx_ctx;
2999 i40e_status err = 0;
3000
3001 ring->state = 0;
3002
3003
3004 memset(&rx_ctx, 0, sizeof(rx_ctx));
3005
3006 ring->rx_buf_len = vsi->rx_buf_len;
3007
3008 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3009 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3010
3011 rx_ctx.base = (ring->dma / 128);
3012 rx_ctx.qlen = ring->count;
3013
3014
3015 rx_ctx.dsize = 1;
3016
3017
3018
3019
3020 rx_ctx.hsplit_0 = 0;
3021
3022 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3023 if (hw->revision_id == 0)
3024 rx_ctx.lrxqthresh = 0;
3025 else
3026 rx_ctx.lrxqthresh = 2;
3027 rx_ctx.crcstrip = 1;
3028 rx_ctx.l2tsel = 1;
3029
3030 rx_ctx.showiv = 0;
3031
3032 rx_ctx.prefena = 1;
3033
3034
3035 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3036 if (err) {
3037 dev_info(&vsi->back->pdev->dev,
3038 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3039 ring->queue_index, pf_q, err);
3040 return -ENOMEM;
3041 }
3042
3043
3044 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3045 if (err) {
3046 dev_info(&vsi->back->pdev->dev,
3047 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3048 ring->queue_index, pf_q, err);
3049 return -ENOMEM;
3050 }
3051
3052
3053 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3054 clear_ring_build_skb_enabled(ring);
3055 else
3056 set_ring_build_skb_enabled(ring);
3057
3058
3059 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3060 writel(0, ring->tail);
3061
3062 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3063
3064 return 0;
3065}
3066
3067
3068
3069
3070
3071
3072
3073static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3074{
3075 int err = 0;
3076 u16 i;
3077
3078 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3079 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3080
3081 if (!i40e_enabled_xdp_vsi(vsi))
3082 return err;
3083
3084 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3085 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3086
3087 return err;
3088}
3089
3090
3091
3092
3093
3094
3095
3096static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3097{
3098 int err = 0;
3099 u16 i;
3100
3101 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3102 vsi->max_frame = I40E_MAX_RXBUFFER;
3103 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3104#if (PAGE_SIZE < 8192)
3105 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3106 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3107 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3108 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3109#endif
3110 } else {
3111 vsi->max_frame = I40E_MAX_RXBUFFER;
3112 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3113 I40E_RXBUFFER_2048;
3114 }
3115
3116
3117 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3118 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3119
3120 return err;
3121}
3122
3123
3124
3125
3126
3127static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3128{
3129 struct i40e_ring *tx_ring, *rx_ring;
3130 u16 qoffset, qcount;
3131 int i, n;
3132
3133 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3134
3135 for (i = 0; i < vsi->num_queue_pairs; i++) {
3136 rx_ring = vsi->rx_rings[i];
3137 tx_ring = vsi->tx_rings[i];
3138 rx_ring->dcb_tc = 0;
3139 tx_ring->dcb_tc = 0;
3140 }
3141 }
3142
3143 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3144 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3145 continue;
3146
3147 qoffset = vsi->tc_config.tc_info[n].qoffset;
3148 qcount = vsi->tc_config.tc_info[n].qcount;
3149 for (i = qoffset; i < (qoffset + qcount); i++) {
3150 rx_ring = vsi->rx_rings[i];
3151 tx_ring = vsi->tx_rings[i];
3152 rx_ring->dcb_tc = n;
3153 tx_ring->dcb_tc = n;
3154 }
3155 }
3156}
3157
3158
3159
3160
3161
3162static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3163{
3164 if (vsi->netdev)
3165 i40e_set_rx_mode(vsi->netdev);
3166}
3167
3168
3169
3170
3171
3172
3173
3174
3175static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3176{
3177 struct i40e_fdir_filter *filter;
3178 struct i40e_pf *pf = vsi->back;
3179 struct hlist_node *node;
3180
3181 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3182 return;
3183
3184
3185 pf->fd_tcp4_filter_cnt = 0;
3186 pf->fd_udp4_filter_cnt = 0;
3187 pf->fd_sctp4_filter_cnt = 0;
3188 pf->fd_ip4_filter_cnt = 0;
3189
3190 hlist_for_each_entry_safe(filter, node,
3191 &pf->fdir_filter_list, fdir_node) {
3192 i40e_add_del_fdir(vsi, filter, true);
3193 }
3194}
3195
3196
3197
3198
3199
3200static int i40e_vsi_configure(struct i40e_vsi *vsi)
3201{
3202 int err;
3203
3204 i40e_set_vsi_rx_mode(vsi);
3205 i40e_restore_vlan(vsi);
3206 i40e_vsi_config_dcb_rings(vsi);
3207 err = i40e_vsi_configure_tx(vsi);
3208 if (!err)
3209 err = i40e_vsi_configure_rx(vsi);
3210
3211 return err;
3212}
3213
3214
3215
3216
3217
3218static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3219{
3220 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3221 struct i40e_pf *pf = vsi->back;
3222 struct i40e_hw *hw = &pf->hw;
3223 u16 vector;
3224 int i, q;
3225 u32 qp;
3226
3227
3228
3229
3230
3231 qp = vsi->base_queue;
3232 vector = vsi->base_vector;
3233 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3234 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3235
3236 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3237 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3238 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3239 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3240 q_vector->rx.itr);
3241 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3242 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3243 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3244 q_vector->tx.itr);
3245 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3246 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3247
3248
3249 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3250 for (q = 0; q < q_vector->num_ringpairs; q++) {
3251 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3252 u32 val;
3253
3254 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3255 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3256 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3257 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3258 (I40E_QUEUE_TYPE_TX <<
3259 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3260
3261 wr32(hw, I40E_QINT_RQCTL(qp), val);
3262
3263 if (has_xdp) {
3264 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3265 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3266 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3267 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3268 (I40E_QUEUE_TYPE_TX <<
3269 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3270
3271 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3272 }
3273
3274 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3275 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3276 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3277 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3278 (I40E_QUEUE_TYPE_RX <<
3279 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3280
3281
3282 if (q == (q_vector->num_ringpairs - 1))
3283 val |= (I40E_QUEUE_END_OF_LIST <<
3284 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3285
3286 wr32(hw, I40E_QINT_TQCTL(qp), val);
3287 qp++;
3288 }
3289 }
3290
3291 i40e_flush(hw);
3292}
3293
3294
3295
3296
3297
3298static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3299{
3300 struct i40e_hw *hw = &pf->hw;
3301 u32 val;
3302
3303
3304 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3305 rd32(hw, I40E_PFINT_ICR0);
3306
3307 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3308 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3309 I40E_PFINT_ICR0_ENA_GRST_MASK |
3310 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3311 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3312 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3313 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3314 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3315
3316 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3317 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3318
3319 if (pf->flags & I40E_FLAG_PTP)
3320 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3321
3322 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3323
3324
3325 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3326 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3327
3328
3329 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3330}
3331
3332
3333
3334
3335
3336static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3337{
3338 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3339 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3340 struct i40e_pf *pf = vsi->back;
3341 struct i40e_hw *hw = &pf->hw;
3342 u32 val;
3343
3344
3345 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3346 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3347 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3348 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3349 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3350 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3351 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3352
3353 i40e_enable_misc_int_causes(pf);
3354
3355
3356 wr32(hw, I40E_PFINT_LNKLST0, 0);
3357
3358
3359 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3360 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3361 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3362 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3363
3364 wr32(hw, I40E_QINT_RQCTL(0), val);
3365
3366 if (i40e_enabled_xdp_vsi(vsi)) {
3367 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3368 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3369 (I40E_QUEUE_TYPE_TX
3370 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3371
3372 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3373 }
3374
3375 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3376 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3377 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3378
3379 wr32(hw, I40E_QINT_TQCTL(0), val);
3380 i40e_flush(hw);
3381}
3382
3383
3384
3385
3386
3387void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3388{
3389 struct i40e_hw *hw = &pf->hw;
3390
3391 wr32(hw, I40E_PFINT_DYN_CTL0,
3392 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3393 i40e_flush(hw);
3394}
3395
3396
3397
3398
3399
3400
3401void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3402{
3403 struct i40e_hw *hw = &pf->hw;
3404 u32 val;
3405
3406 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3407 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3408 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3409
3410 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3411 i40e_flush(hw);
3412}
3413
3414
3415
3416
3417
3418
3419static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3420{
3421 struct i40e_q_vector *q_vector = data;
3422
3423 if (!q_vector->tx.ring && !q_vector->rx.ring)
3424 return IRQ_HANDLED;
3425
3426 napi_schedule_irqoff(&q_vector->napi);
3427
3428 return IRQ_HANDLED;
3429}
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3440 const cpumask_t *mask)
3441{
3442 struct i40e_q_vector *q_vector =
3443 container_of(notify, struct i40e_q_vector, affinity_notify);
3444
3445 cpumask_copy(&q_vector->affinity_mask, mask);
3446}
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456static void i40e_irq_affinity_release(struct kref *ref) {}
3457
3458
3459
3460
3461
3462
3463
3464
3465static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3466{
3467 int q_vectors = vsi->num_q_vectors;
3468 struct i40e_pf *pf = vsi->back;
3469 int base = vsi->base_vector;
3470 int rx_int_idx = 0;
3471 int tx_int_idx = 0;
3472 int vector, err;
3473 int irq_num;
3474
3475 for (vector = 0; vector < q_vectors; vector++) {
3476 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3477
3478 irq_num = pf->msix_entries[base + vector].vector;
3479
3480 if (q_vector->tx.ring && q_vector->rx.ring) {
3481 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3482 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3483 tx_int_idx++;
3484 } else if (q_vector->rx.ring) {
3485 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3486 "%s-%s-%d", basename, "rx", rx_int_idx++);
3487 } else if (q_vector->tx.ring) {
3488 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3489 "%s-%s-%d", basename, "tx", tx_int_idx++);
3490 } else {
3491
3492 continue;
3493 }
3494 err = request_irq(irq_num,
3495 vsi->irq_handler,
3496 0,
3497 q_vector->name,
3498 q_vector);
3499 if (err) {
3500 dev_info(&pf->pdev->dev,
3501 "MSIX request_irq failed, error: %d\n", err);
3502 goto free_queue_irqs;
3503 }
3504
3505
3506 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3507 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3508 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3509
3510
3511
3512 irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
3513 }
3514
3515 vsi->irqs_ready = true;
3516 return 0;
3517
3518free_queue_irqs:
3519 while (vector) {
3520 vector--;
3521 irq_num = pf->msix_entries[base + vector].vector;
3522 irq_set_affinity_notifier(irq_num, NULL);
3523 irq_set_affinity_hint(irq_num, NULL);
3524 free_irq(irq_num, &vsi->q_vectors[vector]);
3525 }
3526 return err;
3527}
3528
3529
3530
3531
3532
3533static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3534{
3535 struct i40e_pf *pf = vsi->back;
3536 struct i40e_hw *hw = &pf->hw;
3537 int base = vsi->base_vector;
3538 int i;
3539
3540
3541 for (i = 0; i < vsi->num_queue_pairs; i++) {
3542 u32 val;
3543
3544 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3545 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3546 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3547
3548 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3549 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3550 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3551
3552 if (!i40e_enabled_xdp_vsi(vsi))
3553 continue;
3554 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3555 }
3556
3557
3558 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3559 for (i = vsi->base_vector;
3560 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3561 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3562
3563 i40e_flush(hw);
3564 for (i = 0; i < vsi->num_q_vectors; i++)
3565 synchronize_irq(pf->msix_entries[i + base].vector);
3566 } else {
3567
3568 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3569 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3570 i40e_flush(hw);
3571 synchronize_irq(pf->pdev->irq);
3572 }
3573}
3574
3575
3576
3577
3578
3579static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3580{
3581 struct i40e_pf *pf = vsi->back;
3582 int i;
3583
3584 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3585 for (i = 0; i < vsi->num_q_vectors; i++)
3586 i40e_irq_dynamic_enable(vsi, i);
3587 } else {
3588 i40e_irq_dynamic_enable_icr0(pf, true);
3589 }
3590
3591 i40e_flush(&pf->hw);
3592 return 0;
3593}
3594
3595
3596
3597
3598
3599static void i40e_stop_misc_vector(struct i40e_pf *pf)
3600{
3601
3602 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3603 i40e_flush(&pf->hw);
3604}
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615static irqreturn_t i40e_intr(int irq, void *data)
3616{
3617 struct i40e_pf *pf = (struct i40e_pf *)data;
3618 struct i40e_hw *hw = &pf->hw;
3619 irqreturn_t ret = IRQ_NONE;
3620 u32 icr0, icr0_remaining;
3621 u32 val, ena_mask;
3622
3623 icr0 = rd32(hw, I40E_PFINT_ICR0);
3624 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3625
3626
3627 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3628 goto enable_intr;
3629
3630
3631 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3632 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3633 pf->sw_int_count++;
3634
3635 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3636 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3637 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3638 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3639 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3640 }
3641
3642
3643 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3644 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3645 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3646
3647
3648
3649
3650
3651
3652
3653 if (!test_bit(__I40E_DOWN, pf->state))
3654 napi_schedule_irqoff(&q_vector->napi);
3655 }
3656
3657 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3658 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3659 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3660 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3661 }
3662
3663 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3664 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3665 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3666 }
3667
3668 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3669 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3670 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3671 }
3672
3673 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3674 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3675 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3676 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3677 val = rd32(hw, I40E_GLGEN_RSTAT);
3678 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3679 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3680 if (val == I40E_RESET_CORER) {
3681 pf->corer_count++;
3682 } else if (val == I40E_RESET_GLOBR) {
3683 pf->globr_count++;
3684 } else if (val == I40E_RESET_EMPR) {
3685 pf->empr_count++;
3686 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3687 }
3688 }
3689
3690 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3691 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3692 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3693 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3694 rd32(hw, I40E_PFHMC_ERRORINFO),
3695 rd32(hw, I40E_PFHMC_ERRORDATA));
3696 }
3697
3698 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3699 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3700
3701 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3702 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3703 i40e_ptp_tx_hwtstamp(pf);
3704 }
3705 }
3706
3707
3708
3709
3710
3711 icr0_remaining = icr0 & ena_mask;
3712 if (icr0_remaining) {
3713 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3714 icr0_remaining);
3715 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3716 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3717 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3718 dev_info(&pf->pdev->dev, "device will be reset\n");
3719 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3720 i40e_service_event_schedule(pf);
3721 }
3722 ena_mask &= ~icr0_remaining;
3723 }
3724 ret = IRQ_HANDLED;
3725
3726enable_intr:
3727
3728 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3729 if (!test_bit(__I40E_DOWN, pf->state)) {
3730 i40e_service_event_schedule(pf);
3731 i40e_irq_dynamic_enable_icr0(pf, false);
3732 }
3733
3734 return ret;
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3745{
3746 struct i40e_vsi *vsi = tx_ring->vsi;
3747 u16 i = tx_ring->next_to_clean;
3748 struct i40e_tx_buffer *tx_buf;
3749 struct i40e_tx_desc *tx_desc;
3750
3751 tx_buf = &tx_ring->tx_bi[i];
3752 tx_desc = I40E_TX_DESC(tx_ring, i);
3753 i -= tx_ring->count;
3754
3755 do {
3756 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3757
3758
3759 if (!eop_desc)
3760 break;
3761
3762
3763 read_barrier_depends();
3764
3765
3766 if (!(eop_desc->cmd_type_offset_bsz &
3767 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3768 break;
3769
3770
3771 tx_buf->next_to_watch = NULL;
3772
3773 tx_desc->buffer_addr = 0;
3774 tx_desc->cmd_type_offset_bsz = 0;
3775
3776 tx_buf++;
3777 tx_desc++;
3778 i++;
3779 if (unlikely(!i)) {
3780 i -= tx_ring->count;
3781 tx_buf = tx_ring->tx_bi;
3782 tx_desc = I40E_TX_DESC(tx_ring, 0);
3783 }
3784
3785 dma_unmap_single(tx_ring->dev,
3786 dma_unmap_addr(tx_buf, dma),
3787 dma_unmap_len(tx_buf, len),
3788 DMA_TO_DEVICE);
3789 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3790 kfree(tx_buf->raw_buf);
3791
3792 tx_buf->raw_buf = NULL;
3793 tx_buf->tx_flags = 0;
3794 tx_buf->next_to_watch = NULL;
3795 dma_unmap_len_set(tx_buf, len, 0);
3796 tx_desc->buffer_addr = 0;
3797 tx_desc->cmd_type_offset_bsz = 0;
3798
3799
3800 tx_buf++;
3801 tx_desc++;
3802 i++;
3803 if (unlikely(!i)) {
3804 i -= tx_ring->count;
3805 tx_buf = tx_ring->tx_bi;
3806 tx_desc = I40E_TX_DESC(tx_ring, 0);
3807 }
3808
3809
3810 budget--;
3811 } while (likely(budget));
3812
3813 i += tx_ring->count;
3814 tx_ring->next_to_clean = i;
3815
3816 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3817 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3818
3819 return budget > 0;
3820}
3821
3822
3823
3824
3825
3826
3827static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3828{
3829 struct i40e_q_vector *q_vector = data;
3830 struct i40e_vsi *vsi;
3831
3832 if (!q_vector->tx.ring)
3833 return IRQ_HANDLED;
3834
3835 vsi = q_vector->tx.ring->vsi;
3836 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3837
3838 return IRQ_HANDLED;
3839}
3840
3841
3842
3843
3844
3845
3846
3847static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3848{
3849 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3850 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3851 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3852
3853 tx_ring->q_vector = q_vector;
3854 tx_ring->next = q_vector->tx.ring;
3855 q_vector->tx.ring = tx_ring;
3856 q_vector->tx.count++;
3857
3858
3859 if (i40e_enabled_xdp_vsi(vsi)) {
3860 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
3861
3862 xdp_ring->q_vector = q_vector;
3863 xdp_ring->next = q_vector->tx.ring;
3864 q_vector->tx.ring = xdp_ring;
3865 q_vector->tx.count++;
3866 }
3867
3868 rx_ring->q_vector = q_vector;
3869 rx_ring->next = q_vector->rx.ring;
3870 q_vector->rx.ring = rx_ring;
3871 q_vector->rx.count++;
3872}
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3884{
3885 int qp_remaining = vsi->num_queue_pairs;
3886 int q_vectors = vsi->num_q_vectors;
3887 int num_ringpairs;
3888 int v_start = 0;
3889 int qp_idx = 0;
3890
3891
3892
3893
3894
3895
3896
3897
3898 for (; v_start < q_vectors; v_start++) {
3899 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3900
3901 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3902
3903 q_vector->num_ringpairs = num_ringpairs;
3904
3905 q_vector->rx.count = 0;
3906 q_vector->tx.count = 0;
3907 q_vector->rx.ring = NULL;
3908 q_vector->tx.ring = NULL;
3909
3910 while (num_ringpairs--) {
3911 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3912 qp_idx++;
3913 qp_remaining--;
3914 }
3915 }
3916}
3917
3918
3919
3920
3921
3922
3923static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3924{
3925 struct i40e_pf *pf = vsi->back;
3926 int err;
3927
3928 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3929 err = i40e_vsi_request_irq_msix(vsi, basename);
3930 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3931 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3932 pf->int_name, pf);
3933 else
3934 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3935 pf->int_name, pf);
3936
3937 if (err)
3938 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3939
3940 return err;
3941}
3942
3943#ifdef CONFIG_NET_POLL_CONTROLLER
3944
3945
3946
3947
3948
3949
3950
3951static void i40e_netpoll(struct net_device *netdev)
3952{
3953 struct i40e_netdev_priv *np = netdev_priv(netdev);
3954 struct i40e_vsi *vsi = np->vsi;
3955 struct i40e_pf *pf = vsi->back;
3956 int i;
3957
3958
3959 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3960 return;
3961
3962 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3963 for (i = 0; i < vsi->num_q_vectors; i++)
3964 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3965 } else {
3966 i40e_intr(pf->pdev->irq, netdev);
3967 }
3968}
3969#endif
3970
3971#define I40E_QTX_ENA_WAIT_COUNT 50
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3985{
3986 int i;
3987 u32 tx_reg;
3988
3989 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3990 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3991 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3992 break;
3993
3994 usleep_range(10, 20);
3995 }
3996 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3997 return -ETIMEDOUT;
3998
3999 return 0;
4000}
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4013{
4014 struct i40e_hw *hw = &pf->hw;
4015 u32 tx_reg;
4016 int i;
4017
4018
4019 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4020 if (!enable)
4021 usleep_range(10, 20);
4022
4023 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4024 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4025 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4026 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4027 break;
4028 usleep_range(1000, 2000);
4029 }
4030
4031
4032 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4033 return;
4034
4035
4036 if (enable) {
4037 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4038 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4039 } else {
4040 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4041 }
4042
4043 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4044}
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4055 bool is_xdp, bool enable)
4056{
4057 int ret;
4058
4059 i40e_control_tx_q(pf, pf_q, enable);
4060
4061
4062 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4063 if (ret) {
4064 dev_info(&pf->pdev->dev,
4065 "VSI seid %d %sTx ring %d %sable timeout\n",
4066 seid, (is_xdp ? "XDP " : ""), pf_q,
4067 (enable ? "en" : "dis"));
4068 }
4069
4070 return ret;
4071}
4072
4073
4074
4075
4076
4077
4078static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4079{
4080 struct i40e_pf *pf = vsi->back;
4081 int i, pf_q, ret = 0;
4082
4083 pf_q = vsi->base_queue;
4084 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4085 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4086 pf_q,
4087 false , enable);
4088 if (ret)
4089 break;
4090
4091 if (!i40e_enabled_xdp_vsi(vsi))
4092 continue;
4093
4094 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4095 pf_q + vsi->alloc_queue_pairs,
4096 true , enable);
4097 if (ret)
4098 break;
4099 }
4100
4101 return ret;
4102}
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4116{
4117 int i;
4118 u32 rx_reg;
4119
4120 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4121 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4122 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4123 break;
4124
4125 usleep_range(10, 20);
4126 }
4127 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4128 return -ETIMEDOUT;
4129
4130 return 0;
4131}
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4144{
4145 struct i40e_hw *hw = &pf->hw;
4146 u32 rx_reg;
4147 int i;
4148
4149 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4150 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4151 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4152 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4153 break;
4154 usleep_range(1000, 2000);
4155 }
4156
4157
4158 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4159 return;
4160
4161
4162 if (enable)
4163 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4164 else
4165 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4166
4167 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4168}
4169
4170
4171
4172
4173
4174
4175static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4176{
4177 struct i40e_pf *pf = vsi->back;
4178 int i, pf_q, ret = 0;
4179
4180 pf_q = vsi->base_queue;
4181 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4182 i40e_control_rx_q(pf, pf_q, enable);
4183
4184
4185 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4186 if (ret) {
4187 dev_info(&pf->pdev->dev,
4188 "VSI seid %d Rx ring %d %sable timeout\n",
4189 vsi->seid, pf_q, (enable ? "en" : "dis"));
4190 break;
4191 }
4192 }
4193
4194
4195
4196
4197 if (!enable)
4198 mdelay(50);
4199
4200 return ret;
4201}
4202
4203
4204
4205
4206
4207int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4208{
4209 int ret = 0;
4210
4211
4212 ret = i40e_vsi_control_rx(vsi, true);
4213 if (ret)
4214 return ret;
4215 ret = i40e_vsi_control_tx(vsi, true);
4216
4217 return ret;
4218}
4219
4220
4221
4222
4223
4224void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4225{
4226
4227 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4228 return i40e_vsi_stop_rings_no_wait(vsi);
4229
4230
4231
4232
4233 i40e_vsi_control_tx(vsi, false);
4234 i40e_vsi_control_rx(vsi, false);
4235}
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4249{
4250 struct i40e_pf *pf = vsi->back;
4251 int i, pf_q;
4252
4253 pf_q = vsi->base_queue;
4254 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4255 i40e_control_tx_q(pf, pf_q, false);
4256 i40e_control_rx_q(pf, pf_q, false);
4257 }
4258}
4259
4260
4261
4262
4263
4264static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4265{
4266 struct i40e_pf *pf = vsi->back;
4267 struct i40e_hw *hw = &pf->hw;
4268 int base = vsi->base_vector;
4269 u32 val, qp;
4270 int i;
4271
4272 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4273 if (!vsi->q_vectors)
4274 return;
4275
4276 if (!vsi->irqs_ready)
4277 return;
4278
4279 vsi->irqs_ready = false;
4280 for (i = 0; i < vsi->num_q_vectors; i++) {
4281 int irq_num;
4282 u16 vector;
4283
4284 vector = i + base;
4285 irq_num = pf->msix_entries[vector].vector;
4286
4287
4288 if (!vsi->q_vectors[i] ||
4289 !vsi->q_vectors[i]->num_ringpairs)
4290 continue;
4291
4292
4293 irq_set_affinity_notifier(irq_num, NULL);
4294
4295 irq_set_affinity_hint(irq_num, NULL);
4296 synchronize_irq(irq_num);
4297 free_irq(irq_num, vsi->q_vectors[i]);
4298
4299
4300
4301
4302
4303
4304
4305
4306 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4307 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4308 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4309 val |= I40E_QUEUE_END_OF_LIST
4310 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4311 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4312
4313 while (qp != I40E_QUEUE_END_OF_LIST) {
4314 u32 next;
4315
4316 val = rd32(hw, I40E_QINT_RQCTL(qp));
4317
4318 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4319 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4320 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4321 I40E_QINT_RQCTL_INTEVENT_MASK);
4322
4323 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4324 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4325
4326 wr32(hw, I40E_QINT_RQCTL(qp), val);
4327
4328 val = rd32(hw, I40E_QINT_TQCTL(qp));
4329
4330 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4331 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4332
4333 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4334 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4335 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4336 I40E_QINT_TQCTL_INTEVENT_MASK);
4337
4338 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4339 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4340
4341 wr32(hw, I40E_QINT_TQCTL(qp), val);
4342 qp = next;
4343 }
4344 }
4345 } else {
4346 free_irq(pf->pdev->irq, pf);
4347
4348 val = rd32(hw, I40E_PFINT_LNKLST0);
4349 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4350 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4351 val |= I40E_QUEUE_END_OF_LIST
4352 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4353 wr32(hw, I40E_PFINT_LNKLST0, val);
4354
4355 val = rd32(hw, I40E_QINT_RQCTL(qp));
4356 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4357 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4358 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4359 I40E_QINT_RQCTL_INTEVENT_MASK);
4360
4361 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4362 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4363
4364 wr32(hw, I40E_QINT_RQCTL(qp), val);
4365
4366 val = rd32(hw, I40E_QINT_TQCTL(qp));
4367
4368 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4369 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4370 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4371 I40E_QINT_TQCTL_INTEVENT_MASK);
4372
4373 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4374 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4375
4376 wr32(hw, I40E_QINT_TQCTL(qp), val);
4377 }
4378}
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4390{
4391 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4392 struct i40e_ring *ring;
4393
4394 if (!q_vector)
4395 return;
4396
4397
4398 i40e_for_each_ring(ring, q_vector->tx)
4399 ring->q_vector = NULL;
4400
4401 i40e_for_each_ring(ring, q_vector->rx)
4402 ring->q_vector = NULL;
4403
4404
4405 if (vsi->netdev)
4406 netif_napi_del(&q_vector->napi);
4407
4408 vsi->q_vectors[v_idx] = NULL;
4409
4410 kfree_rcu(q_vector, rcu);
4411}
4412
4413
4414
4415
4416
4417
4418
4419
4420static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4421{
4422 int v_idx;
4423
4424 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4425 i40e_free_q_vector(vsi, v_idx);
4426}
4427
4428
4429
4430
4431
4432static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4433{
4434
4435 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4436 pci_disable_msix(pf->pdev);
4437 kfree(pf->msix_entries);
4438 pf->msix_entries = NULL;
4439 kfree(pf->irq_pile);
4440 pf->irq_pile = NULL;
4441 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4442 pci_disable_msi(pf->pdev);
4443 }
4444 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4455{
4456 int i;
4457
4458 i40e_stop_misc_vector(pf);
4459 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4460 synchronize_irq(pf->msix_entries[0].vector);
4461 free_irq(pf->msix_entries[0].vector, pf);
4462 }
4463
4464 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4465 I40E_IWARP_IRQ_PILE_ID);
4466
4467 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4468 for (i = 0; i < pf->num_alloc_vsi; i++)
4469 if (pf->vsi[i])
4470 i40e_vsi_free_q_vectors(pf->vsi[i]);
4471 i40e_reset_interrupt_capability(pf);
4472}
4473
4474
4475
4476
4477
4478static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4479{
4480 int q_idx;
4481
4482 if (!vsi->netdev)
4483 return;
4484
4485 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4486 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4487
4488 if (q_vector->rx.ring || q_vector->tx.ring)
4489 napi_enable(&q_vector->napi);
4490 }
4491}
4492
4493
4494
4495
4496
4497static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4498{
4499 int q_idx;
4500
4501 if (!vsi->netdev)
4502 return;
4503
4504 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4505 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4506
4507 if (q_vector->rx.ring || q_vector->tx.ring)
4508 napi_disable(&q_vector->napi);
4509 }
4510}
4511
4512
4513
4514
4515
4516static void i40e_vsi_close(struct i40e_vsi *vsi)
4517{
4518 struct i40e_pf *pf = vsi->back;
4519 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4520 i40e_down(vsi);
4521 i40e_vsi_free_irq(vsi);
4522 i40e_vsi_free_tx_resources(vsi);
4523 i40e_vsi_free_rx_resources(vsi);
4524 vsi->current_netdev_flags = 0;
4525 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4526 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4527 pf->flags |= I40E_FLAG_CLIENT_RESET;
4528}
4529
4530
4531
4532
4533
4534static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4535{
4536 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4537 return;
4538
4539 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4540 if (vsi->netdev && netif_running(vsi->netdev))
4541 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4542 else
4543 i40e_vsi_close(vsi);
4544}
4545
4546
4547
4548
4549
4550static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4551{
4552 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4553 return;
4554
4555 if (vsi->netdev && netif_running(vsi->netdev))
4556 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4557 else
4558 i40e_vsi_open(vsi);
4559}
4560
4561
4562
4563
4564
4565static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4566{
4567 int v;
4568
4569 for (v = 0; v < pf->num_alloc_vsi; v++) {
4570 if (pf->vsi[v])
4571 i40e_quiesce_vsi(pf->vsi[v]);
4572 }
4573}
4574
4575
4576
4577
4578
4579static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4580{
4581 int v;
4582
4583 for (v = 0; v < pf->num_alloc_vsi; v++) {
4584 if (pf->vsi[v])
4585 i40e_unquiesce_vsi(pf->vsi[v]);
4586 }
4587}
4588
4589
4590
4591
4592
4593
4594
4595int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4596{
4597 struct i40e_pf *pf = vsi->back;
4598 int i, pf_q, ret;
4599
4600 pf_q = vsi->base_queue;
4601 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4602
4603 ret = i40e_pf_txq_wait(pf, pf_q, false);
4604 if (ret) {
4605 dev_info(&pf->pdev->dev,
4606 "VSI seid %d Tx ring %d disable timeout\n",
4607 vsi->seid, pf_q);
4608 return ret;
4609 }
4610
4611 if (!i40e_enabled_xdp_vsi(vsi))
4612 goto wait_rx;
4613
4614
4615 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4616 false);
4617 if (ret) {
4618 dev_info(&pf->pdev->dev,
4619 "VSI seid %d XDP Tx ring %d disable timeout\n",
4620 vsi->seid, pf_q);
4621 return ret;
4622 }
4623wait_rx:
4624
4625 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4626 if (ret) {
4627 dev_info(&pf->pdev->dev,
4628 "VSI seid %d Rx ring %d disable timeout\n",
4629 vsi->seid, pf_q);
4630 return ret;
4631 }
4632 }
4633
4634 return 0;
4635}
4636
4637#ifdef CONFIG_I40E_DCB
4638
4639
4640
4641
4642
4643
4644
4645static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4646{
4647 int v, ret = 0;
4648
4649 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4650 if (pf->vsi[v]) {
4651 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4652 if (ret)
4653 break;
4654 }
4655 }
4656
4657 return ret;
4658}
4659
4660#endif
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4673{
4674 struct i40e_ring *tx_ring = NULL;
4675 struct i40e_pf *pf;
4676 u32 val, tx_pending;
4677 int i;
4678
4679 pf = vsi->back;
4680
4681
4682 for (i = 0; i < vsi->num_queue_pairs; i++) {
4683 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4684 if (q_idx == vsi->tx_rings[i]->queue_index) {
4685 tx_ring = vsi->tx_rings[i];
4686 break;
4687 }
4688 }
4689 }
4690
4691 if (!tx_ring)
4692 return;
4693
4694
4695 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4696 val = rd32(&pf->hw,
4697 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4698 tx_ring->vsi->base_vector - 1));
4699 else
4700 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4701
4702 tx_pending = i40e_get_tx_pending(tx_ring);
4703
4704
4705
4706
4707
4708
4709 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4710 i40e_force_wb(vsi, tx_ring->q_vector);
4711}
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721static void i40e_detect_recover_hung(struct i40e_pf *pf)
4722{
4723 struct net_device *netdev;
4724 struct i40e_vsi *vsi;
4725 unsigned int i;
4726
4727
4728 vsi = pf->vsi[pf->lan_vsi];
4729
4730 if (!vsi)
4731 return;
4732
4733
4734 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4735 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
4736 return;
4737
4738
4739 if (vsi->type != I40E_VSI_MAIN)
4740 return;
4741
4742 netdev = vsi->netdev;
4743 if (!netdev)
4744 return;
4745
4746
4747 if (!netif_carrier_ok(netdev))
4748 return;
4749
4750
4751 for (i = 0; i < netdev->num_tx_queues; i++) {
4752 struct netdev_queue *q;
4753
4754 q = netdev_get_tx_queue(netdev, i);
4755 if (q)
4756 i40e_detect_recover_hung_queue(i, vsi);
4757 }
4758}
4759
4760
4761
4762
4763
4764
4765
4766
4767static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4768{
4769 struct i40e_dcb_app_priority_table app;
4770 struct i40e_hw *hw = &pf->hw;
4771 u8 enabled_tc = 1;
4772 u8 tc, i;
4773
4774 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4775
4776 for (i = 0; i < dcbcfg->numapps; i++) {
4777 app = dcbcfg->app[i];
4778 if (app.selector == I40E_APP_SEL_TCPIP &&
4779 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4780 tc = dcbcfg->etscfg.prioritytable[app.priority];
4781 enabled_tc |= BIT(tc);
4782 break;
4783 }
4784 }
4785
4786 return enabled_tc;
4787}
4788
4789
4790
4791
4792
4793
4794
4795static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4796{
4797 int i, tc_unused = 0;
4798 u8 num_tc = 0;
4799 u8 ret = 0;
4800
4801
4802
4803
4804
4805 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4806 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4807
4808
4809
4810
4811 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4812 if (num_tc & BIT(i)) {
4813 if (!tc_unused) {
4814 ret++;
4815 } else {
4816 pr_err("Non-contiguous TC - Disabling DCB\n");
4817 return 1;
4818 }
4819 } else {
4820 tc_unused = 1;
4821 }
4822 }
4823
4824
4825 if (!ret)
4826 ret = 1;
4827
4828 return ret;
4829}
4830
4831
4832
4833
4834
4835
4836
4837
4838static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4839{
4840 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4841 u8 enabled_tc = 1;
4842 u8 i;
4843
4844 for (i = 0; i < num_tc; i++)
4845 enabled_tc |= BIT(i);
4846
4847 return enabled_tc;
4848}
4849
4850
4851
4852
4853
4854
4855
4856static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4857{
4858 struct i40e_hw *hw = &pf->hw;
4859 u8 i, enabled_tc = 1;
4860 u8 num_tc = 0;
4861 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4862
4863
4864 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4865 return 1;
4866
4867
4868 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4869 return i40e_dcb_get_num_tc(dcbcfg);
4870
4871
4872 if (pf->hw.func_caps.iscsi)
4873 enabled_tc = i40e_get_iscsi_tc_map(pf);
4874 else
4875 return 1;
4876
4877 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4878 if (enabled_tc & BIT(i))
4879 num_tc++;
4880 }
4881 return num_tc;
4882}
4883
4884
4885
4886
4887
4888
4889
4890static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4891{
4892
4893 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4894 return I40E_DEFAULT_TRAFFIC_CLASS;
4895
4896
4897 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4898 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4899
4900
4901 if (pf->hw.func_caps.iscsi)
4902 return i40e_get_iscsi_tc_map(pf);
4903 else
4904 return I40E_DEFAULT_TRAFFIC_CLASS;
4905}
4906
4907
4908
4909
4910
4911
4912
4913static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4914{
4915 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4916 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4917 struct i40e_pf *pf = vsi->back;
4918 struct i40e_hw *hw = &pf->hw;
4919 i40e_status ret;
4920 u32 tc_bw_max;
4921 int i;
4922
4923
4924 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4925 if (ret) {
4926 dev_info(&pf->pdev->dev,
4927 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4928 i40e_stat_str(&pf->hw, ret),
4929 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4930 return -EINVAL;
4931 }
4932
4933
4934 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4935 NULL);
4936 if (ret) {
4937 dev_info(&pf->pdev->dev,
4938 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4939 i40e_stat_str(&pf->hw, ret),
4940 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4941 return -EINVAL;
4942 }
4943
4944 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4945 dev_info(&pf->pdev->dev,
4946 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4947 bw_config.tc_valid_bits,
4948 bw_ets_config.tc_valid_bits);
4949
4950 }
4951
4952 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4953 vsi->bw_max_quanta = bw_config.max_bw;
4954 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4955 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4956 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4957 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4958 vsi->bw_ets_limit_credits[i] =
4959 le16_to_cpu(bw_ets_config.credits[i]);
4960
4961 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4962 }
4963
4964 return 0;
4965}
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4976 u8 *bw_share)
4977{
4978 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4979 i40e_status ret;
4980 int i;
4981
4982 bw_data.tc_valid_bits = enabled_tc;
4983 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4984 bw_data.tc_bw_credits[i] = bw_share[i];
4985
4986 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4987 NULL);
4988 if (ret) {
4989 dev_info(&vsi->back->pdev->dev,
4990 "AQ command Config VSI BW allocation per TC failed = %d\n",
4991 vsi->back->hw.aq.asq_last_status);
4992 return -EINVAL;
4993 }
4994
4995 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4996 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4997
4998 return 0;
4999}
5000
5001
5002
5003
5004
5005
5006
5007static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5008{
5009 struct net_device *netdev = vsi->netdev;
5010 struct i40e_pf *pf = vsi->back;
5011 struct i40e_hw *hw = &pf->hw;
5012 u8 netdev_tc = 0;
5013 int i;
5014 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5015
5016 if (!netdev)
5017 return;
5018
5019 if (!enabled_tc) {
5020 netdev_reset_tc(netdev);
5021 return;
5022 }
5023
5024
5025 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5026 return;
5027
5028
5029 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5030
5031
5032
5033
5034
5035
5036
5037 if (vsi->tc_config.enabled_tc & BIT(i))
5038 netdev_set_tc_queue(netdev,
5039 vsi->tc_config.tc_info[i].netdev_tc,
5040 vsi->tc_config.tc_info[i].qcount,
5041 vsi->tc_config.tc_info[i].qoffset);
5042 }
5043
5044
5045 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5046
5047 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5048
5049 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5050 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5051 }
5052}
5053
5054
5055
5056
5057
5058
5059static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5060 struct i40e_vsi_context *ctxt)
5061{
5062
5063
5064
5065
5066 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5067 memcpy(&vsi->info.queue_mapping,
5068 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5069 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5070 sizeof(vsi->info.tc_mapping));
5071}
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5087{
5088 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5089 struct i40e_vsi_context ctxt;
5090 int ret = 0;
5091 int i;
5092
5093
5094 if (vsi->tc_config.enabled_tc == enabled_tc)
5095 return ret;
5096
5097
5098 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5099 if (enabled_tc & BIT(i))
5100 bw_share[i] = 1;
5101 }
5102
5103 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5104 if (ret) {
5105 dev_info(&vsi->back->pdev->dev,
5106 "Failed configuring TC map %d for VSI %d\n",
5107 enabled_tc, vsi->seid);
5108 goto out;
5109 }
5110
5111
5112 ctxt.seid = vsi->seid;
5113 ctxt.pf_num = vsi->back->hw.pf_id;
5114 ctxt.vf_num = 0;
5115 ctxt.uplink_seid = vsi->uplink_seid;
5116 ctxt.info = vsi->info;
5117 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5118
5119 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5120 ctxt.info.valid_sections |=
5121 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5122 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5123 }
5124
5125
5126 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5127 if (ret) {
5128 dev_info(&vsi->back->pdev->dev,
5129 "Update vsi tc config failed, err %s aq_err %s\n",
5130 i40e_stat_str(&vsi->back->hw, ret),
5131 i40e_aq_str(&vsi->back->hw,
5132 vsi->back->hw.aq.asq_last_status));
5133 goto out;
5134 }
5135
5136 i40e_vsi_update_queue_map(vsi, &ctxt);
5137 vsi->info.valid_sections = 0;
5138
5139
5140 ret = i40e_vsi_get_bw_info(vsi);
5141 if (ret) {
5142 dev_info(&vsi->back->pdev->dev,
5143 "Failed updating vsi bw info, err %s aq_err %s\n",
5144 i40e_stat_str(&vsi->back->hw, ret),
5145 i40e_aq_str(&vsi->back->hw,
5146 vsi->back->hw.aq.asq_last_status));
5147 goto out;
5148 }
5149
5150
5151 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5152out:
5153 return ret;
5154}
5155
5156
5157
5158
5159
5160
5161
5162
5163int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
5164{
5165 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
5166 struct i40e_pf *pf = veb->pf;
5167 int ret = 0;
5168 int i;
5169
5170
5171 if (!enabled_tc || veb->enabled_tc == enabled_tc)
5172 return ret;
5173
5174 bw_data.tc_valid_bits = enabled_tc;
5175
5176
5177
5178 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5179 if (enabled_tc & BIT(i))
5180 bw_data.tc_bw_share_credits[i] = 1;
5181 }
5182
5183 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
5184 &bw_data, NULL);
5185 if (ret) {
5186 dev_info(&pf->pdev->dev,
5187 "VEB bw config failed, err %s aq_err %s\n",
5188 i40e_stat_str(&pf->hw, ret),
5189 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5190 goto out;
5191 }
5192
5193
5194 ret = i40e_veb_get_bw_info(veb);
5195 if (ret) {
5196 dev_info(&pf->pdev->dev,
5197 "Failed getting veb bw config, err %s aq_err %s\n",
5198 i40e_stat_str(&pf->hw, ret),
5199 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5200 }
5201
5202out:
5203 return ret;
5204}
5205
5206#ifdef CONFIG_I40E_DCB
5207
5208
5209
5210
5211
5212
5213
5214
5215static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5216{
5217 u8 tc_map = 0;
5218 int ret;
5219 u8 v;
5220
5221
5222 tc_map = i40e_pf_get_tc_map(pf);
5223 for (v = 0; v < I40E_MAX_VEB; v++) {
5224 if (!pf->veb[v])
5225 continue;
5226 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5227 if (ret) {
5228 dev_info(&pf->pdev->dev,
5229 "Failed configuring TC for VEB seid=%d\n",
5230 pf->veb[v]->seid);
5231
5232 }
5233 }
5234
5235
5236 for (v = 0; v < pf->num_alloc_vsi; v++) {
5237 if (!pf->vsi[v])
5238 continue;
5239
5240
5241
5242
5243 if (v == pf->lan_vsi)
5244 tc_map = i40e_pf_get_tc_map(pf);
5245 else
5246 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
5247
5248 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5249 if (ret) {
5250 dev_info(&pf->pdev->dev,
5251 "Failed configuring TC for VSI seid=%d\n",
5252 pf->vsi[v]->seid);
5253
5254 } else {
5255
5256 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5257 if (pf->vsi[v]->netdev)
5258 i40e_dcbnl_set_all(pf->vsi[v]);
5259 }
5260 }
5261}
5262
5263
5264
5265
5266
5267
5268
5269
5270static int i40e_resume_port_tx(struct i40e_pf *pf)
5271{
5272 struct i40e_hw *hw = &pf->hw;
5273 int ret;
5274
5275 ret = i40e_aq_resume_port_tx(hw, NULL);
5276 if (ret) {
5277 dev_info(&pf->pdev->dev,
5278 "Resume Port Tx failed, err %s aq_err %s\n",
5279 i40e_stat_str(&pf->hw, ret),
5280 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5281
5282 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
5283 i40e_service_event_schedule(pf);
5284 }
5285
5286 return ret;
5287}
5288
5289
5290
5291
5292
5293
5294
5295
5296static int i40e_init_pf_dcb(struct i40e_pf *pf)
5297{
5298 struct i40e_hw *hw = &pf->hw;
5299 int err = 0;
5300
5301
5302 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
5303 goto out;
5304
5305
5306 err = i40e_init_dcb(hw);
5307 if (!err) {
5308
5309 if ((!hw->func_caps.dcb) ||
5310 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5311 dev_info(&pf->pdev->dev,
5312 "DCBX offload is not supported or is disabled for this PF.\n");
5313 } else {
5314
5315 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5316 DCB_CAP_DCBX_VER_IEEE;
5317
5318 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5319
5320
5321
5322 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5323 pf->flags |= I40E_FLAG_DCB_ENABLED;
5324 else
5325 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5326 dev_dbg(&pf->pdev->dev,
5327 "DCBX offload is supported for this PF.\n");
5328 }
5329 } else {
5330 dev_info(&pf->pdev->dev,
5331 "Query for DCB configuration failed, err %s aq_err %s\n",
5332 i40e_stat_str(&pf->hw, err),
5333 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5334 }
5335
5336out:
5337 return err;
5338}
5339#endif
5340#define SPEED_SIZE 14
5341#define FC_SIZE 8
5342
5343
5344
5345
5346void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5347{
5348 enum i40e_aq_link_speed new_speed;
5349 char *speed = "Unknown";
5350 char *fc = "Unknown";
5351 char *fec = "";
5352 char *req_fec = "";
5353 char *an = "";
5354
5355 new_speed = vsi->back->hw.phy.link_info.link_speed;
5356
5357 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
5358 return;
5359 vsi->current_isup = isup;
5360 vsi->current_speed = new_speed;
5361 if (!isup) {
5362 netdev_info(vsi->netdev, "NIC Link is Down\n");
5363 return;
5364 }
5365
5366
5367
5368
5369 if (vsi->back->hw.func_caps.npar_enable &&
5370 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5371 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5372 netdev_warn(vsi->netdev,
5373 "The partition detected link speed that is less than 10Gbps\n");
5374
5375 switch (vsi->back->hw.phy.link_info.link_speed) {
5376 case I40E_LINK_SPEED_40GB:
5377 speed = "40 G";
5378 break;
5379 case I40E_LINK_SPEED_20GB:
5380 speed = "20 G";
5381 break;
5382 case I40E_LINK_SPEED_25GB:
5383 speed = "25 G";
5384 break;
5385 case I40E_LINK_SPEED_10GB:
5386 speed = "10 G";
5387 break;
5388 case I40E_LINK_SPEED_1GB:
5389 speed = "1000 M";
5390 break;
5391 case I40E_LINK_SPEED_100MB:
5392 speed = "100 M";
5393 break;
5394 default:
5395 break;
5396 }
5397
5398 switch (vsi->back->hw.fc.current_mode) {
5399 case I40E_FC_FULL:
5400 fc = "RX/TX";
5401 break;
5402 case I40E_FC_TX_PAUSE:
5403 fc = "TX";
5404 break;
5405 case I40E_FC_RX_PAUSE:
5406 fc = "RX";
5407 break;
5408 default:
5409 fc = "None";
5410 break;
5411 }
5412
5413 if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
5414 req_fec = ", Requested FEC: None";
5415 fec = ", FEC: None";
5416 an = ", Autoneg: False";
5417
5418 if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
5419 an = ", Autoneg: True";
5420
5421 if (vsi->back->hw.phy.link_info.fec_info &
5422 I40E_AQ_CONFIG_FEC_KR_ENA)
5423 fec = ", FEC: CL74 FC-FEC/BASE-R";
5424 else if (vsi->back->hw.phy.link_info.fec_info &
5425 I40E_AQ_CONFIG_FEC_RS_ENA)
5426 fec = ", FEC: CL108 RS-FEC";
5427
5428
5429
5430
5431 if (vsi->back->hw.phy.link_info.req_fec_info &
5432 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
5433 if (vsi->back->hw.phy.link_info.req_fec_info &
5434 I40E_AQ_REQUEST_FEC_RS)
5435 req_fec = ", Requested FEC: CL108 RS-FEC";
5436 else
5437 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
5438 }
5439 }
5440
5441 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
5442 speed, req_fec, fec, an, fc);
5443}
5444
5445
5446
5447
5448
5449static int i40e_up_complete(struct i40e_vsi *vsi)
5450{
5451 struct i40e_pf *pf = vsi->back;
5452 int err;
5453
5454 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5455 i40e_vsi_configure_msix(vsi);
5456 else
5457 i40e_configure_msi_and_legacy(vsi);
5458
5459
5460 err = i40e_vsi_start_rings(vsi);
5461 if (err)
5462 return err;
5463
5464 clear_bit(__I40E_VSI_DOWN, vsi->state);
5465 i40e_napi_enable_all(vsi);
5466 i40e_vsi_enable_irq(vsi);
5467
5468 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5469 (vsi->netdev)) {
5470 i40e_print_link_message(vsi, true);
5471 netif_tx_start_all_queues(vsi->netdev);
5472 netif_carrier_on(vsi->netdev);
5473 } else if (vsi->netdev) {
5474 i40e_print_link_message(vsi, false);
5475
5476 if ((pf->hw.phy.link_info.link_info &
5477 I40E_AQ_MEDIA_AVAILABLE) &&
5478 (!(pf->hw.phy.link_info.an_info &
5479 I40E_AQ_QUALIFIED_MODULE)))
5480 netdev_err(vsi->netdev,
5481 "the driver failed to link because an unqualified module was detected.");
5482 }
5483
5484
5485 if (vsi->type == I40E_VSI_FDIR) {
5486
5487 pf->fd_add_err = 0;
5488 pf->fd_atr_cnt = 0;
5489 i40e_fdir_filter_restore(vsi);
5490 }
5491
5492
5493
5494
5495 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5496 i40e_service_event_schedule(pf);
5497
5498 return 0;
5499}
5500
5501
5502
5503
5504
5505
5506
5507
5508static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5509{
5510 struct i40e_pf *pf = vsi->back;
5511
5512 WARN_ON(in_interrupt());
5513 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
5514 usleep_range(1000, 2000);
5515 i40e_down(vsi);
5516
5517 i40e_up(vsi);
5518 clear_bit(__I40E_CONFIG_BUSY, pf->state);
5519}
5520
5521
5522
5523
5524
5525int i40e_up(struct i40e_vsi *vsi)
5526{
5527 int err;
5528
5529 err = i40e_vsi_configure(vsi);
5530 if (!err)
5531 err = i40e_up_complete(vsi);
5532
5533 return err;
5534}
5535
5536
5537
5538
5539
5540void i40e_down(struct i40e_vsi *vsi)
5541{
5542 int i;
5543
5544
5545
5546
5547 if (vsi->netdev) {
5548 netif_carrier_off(vsi->netdev);
5549 netif_tx_disable(vsi->netdev);
5550 }
5551 i40e_vsi_disable_irq(vsi);
5552 i40e_vsi_stop_rings(vsi);
5553 i40e_napi_disable_all(vsi);
5554
5555 for (i = 0; i < vsi->num_queue_pairs; i++) {
5556 i40e_clean_tx_ring(vsi->tx_rings[i]);
5557 if (i40e_enabled_xdp_vsi(vsi))
5558 i40e_clean_tx_ring(vsi->xdp_rings[i]);
5559 i40e_clean_rx_ring(vsi->rx_rings[i]);
5560 }
5561
5562}
5563
5564
5565
5566
5567
5568
5569static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5570{
5571 struct i40e_netdev_priv *np = netdev_priv(netdev);
5572 struct i40e_vsi *vsi = np->vsi;
5573 struct i40e_pf *pf = vsi->back;
5574 u8 enabled_tc = 0;
5575 int ret = -EINVAL;
5576 int i;
5577
5578
5579 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5580 netdev_info(netdev, "DCB is not enabled for adapter\n");
5581 goto exit;
5582 }
5583
5584
5585 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5586 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5587 goto exit;
5588 }
5589
5590
5591 if (tc > i40e_pf_get_num_tc(pf)) {
5592 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5593 goto exit;
5594 }
5595
5596
5597 for (i = 0; i < tc; i++)
5598 enabled_tc |= BIT(i);
5599
5600
5601 if (enabled_tc == vsi->tc_config.enabled_tc)
5602 return 0;
5603
5604
5605 i40e_quiesce_vsi(vsi);
5606
5607
5608 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5609 if (ret) {
5610 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5611 vsi->seid);
5612 goto exit;
5613 }
5614
5615
5616 i40e_unquiesce_vsi(vsi);
5617
5618exit:
5619 return ret;
5620}
5621
5622static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
5623 void *type_data)
5624{
5625 struct tc_mqprio_qopt *mqprio = type_data;
5626
5627 if (type != TC_SETUP_MQPRIO)
5628 return -EOPNOTSUPP;
5629
5630 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
5631
5632 return i40e_setup_tc(netdev, mqprio->num_tc);
5633}
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647int i40e_open(struct net_device *netdev)
5648{
5649 struct i40e_netdev_priv *np = netdev_priv(netdev);
5650 struct i40e_vsi *vsi = np->vsi;
5651 struct i40e_pf *pf = vsi->back;
5652 int err;
5653
5654
5655 if (test_bit(__I40E_TESTING, pf->state) ||
5656 test_bit(__I40E_BAD_EEPROM, pf->state))
5657 return -EBUSY;
5658
5659 netif_carrier_off(netdev);
5660
5661 err = i40e_vsi_open(vsi);
5662 if (err)
5663 return err;
5664
5665
5666 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5667 TCP_FLAG_FIN) >> 16);
5668 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5669 TCP_FLAG_FIN |
5670 TCP_FLAG_CWR) >> 16);
5671 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5672
5673 udp_tunnel_get_rx_info(netdev);
5674
5675 return 0;
5676}
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688int i40e_vsi_open(struct i40e_vsi *vsi)
5689{
5690 struct i40e_pf *pf = vsi->back;
5691 char int_name[I40E_INT_NAME_STR_LEN];
5692 int err;
5693
5694
5695 err = i40e_vsi_setup_tx_resources(vsi);
5696 if (err)
5697 goto err_setup_tx;
5698 err = i40e_vsi_setup_rx_resources(vsi);
5699 if (err)
5700 goto err_setup_rx;
5701
5702 err = i40e_vsi_configure(vsi);
5703 if (err)
5704 goto err_setup_rx;
5705
5706 if (vsi->netdev) {
5707 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5708 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5709 err = i40e_vsi_request_irq(vsi, int_name);
5710 if (err)
5711 goto err_setup_rx;
5712
5713
5714 err = netif_set_real_num_tx_queues(vsi->netdev,
5715 vsi->num_queue_pairs);
5716 if (err)
5717 goto err_set_queues;
5718
5719 err = netif_set_real_num_rx_queues(vsi->netdev,
5720 vsi->num_queue_pairs);
5721 if (err)
5722 goto err_set_queues;
5723
5724 } else if (vsi->type == I40E_VSI_FDIR) {
5725 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5726 dev_driver_string(&pf->pdev->dev),
5727 dev_name(&pf->pdev->dev));
5728 err = i40e_vsi_request_irq(vsi, int_name);
5729
5730 } else {
5731 err = -EINVAL;
5732 goto err_setup_rx;
5733 }
5734
5735 err = i40e_up_complete(vsi);
5736 if (err)
5737 goto err_up_complete;
5738
5739 return 0;
5740
5741err_up_complete:
5742 i40e_down(vsi);
5743err_set_queues:
5744 i40e_vsi_free_irq(vsi);
5745err_setup_rx:
5746 i40e_vsi_free_rx_resources(vsi);
5747err_setup_tx:
5748 i40e_vsi_free_tx_resources(vsi);
5749 if (vsi == pf->vsi[pf->lan_vsi])
5750 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
5751
5752 return err;
5753}
5754
5755
5756
5757
5758
5759
5760
5761
5762static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5763{
5764 struct i40e_fdir_filter *filter;
5765 struct i40e_flex_pit *pit_entry, *tmp;
5766 struct hlist_node *node2;
5767
5768 hlist_for_each_entry_safe(filter, node2,
5769 &pf->fdir_filter_list, fdir_node) {
5770 hlist_del(&filter->fdir_node);
5771 kfree(filter);
5772 }
5773
5774 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
5775 list_del(&pit_entry->list);
5776 kfree(pit_entry);
5777 }
5778 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
5779
5780 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
5781 list_del(&pit_entry->list);
5782 kfree(pit_entry);
5783 }
5784 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
5785
5786 pf->fdir_pf_active_filters = 0;
5787 pf->fd_tcp4_filter_cnt = 0;
5788 pf->fd_udp4_filter_cnt = 0;
5789 pf->fd_sctp4_filter_cnt = 0;
5790 pf->fd_ip4_filter_cnt = 0;
5791
5792
5793 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5794 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5795 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5796
5797
5798 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5799 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5800 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5801
5802
5803 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5804 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5805 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5806
5807
5808 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5809 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
5810}
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822int i40e_close(struct net_device *netdev)
5823{
5824 struct i40e_netdev_priv *np = netdev_priv(netdev);
5825 struct i40e_vsi *vsi = np->vsi;
5826
5827 i40e_vsi_close(vsi);
5828
5829 return 0;
5830}
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
5844{
5845 u32 val;
5846
5847 WARN_ON(in_interrupt());
5848
5849
5850
5851 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5862 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5863 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5864 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5865
5866 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5867
5868
5869
5870
5871
5872 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5873 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5874 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5875 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5876 i40e_flush(&pf->hw);
5877
5878 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5889 i40e_handle_reset_warning(pf, lock_acquired);
5890
5891 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5892 int v;
5893
5894
5895 dev_info(&pf->pdev->dev,
5896 "VSI reinit requested\n");
5897 for (v = 0; v < pf->num_alloc_vsi; v++) {
5898 struct i40e_vsi *vsi = pf->vsi[v];
5899
5900 if (vsi != NULL &&
5901 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
5902 vsi->state))
5903 i40e_vsi_reinit_locked(pf->vsi[v]);
5904 }
5905 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5906 int v;
5907
5908
5909 dev_info(&pf->pdev->dev, "VSI down requested\n");
5910 for (v = 0; v < pf->num_alloc_vsi; v++) {
5911 struct i40e_vsi *vsi = pf->vsi[v];
5912
5913 if (vsi != NULL &&
5914 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
5915 vsi->state)) {
5916 set_bit(__I40E_VSI_DOWN, vsi->state);
5917 i40e_down(vsi);
5918 }
5919 }
5920 } else {
5921 dev_info(&pf->pdev->dev,
5922 "bad reset request 0x%08x\n", reset_flags);
5923 }
5924}
5925
5926#ifdef CONFIG_I40E_DCB
5927
5928
5929
5930
5931
5932
5933bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5934 struct i40e_dcbx_config *old_cfg,
5935 struct i40e_dcbx_config *new_cfg)
5936{
5937 bool need_reconfig = false;
5938
5939
5940 if (memcmp(&new_cfg->etscfg,
5941 &old_cfg->etscfg,
5942 sizeof(new_cfg->etscfg))) {
5943
5944 if (memcmp(&new_cfg->etscfg.prioritytable,
5945 &old_cfg->etscfg.prioritytable,
5946 sizeof(new_cfg->etscfg.prioritytable))) {
5947 need_reconfig = true;
5948 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5949 }
5950
5951 if (memcmp(&new_cfg->etscfg.tcbwtable,
5952 &old_cfg->etscfg.tcbwtable,
5953 sizeof(new_cfg->etscfg.tcbwtable)))
5954 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5955
5956 if (memcmp(&new_cfg->etscfg.tsatable,
5957 &old_cfg->etscfg.tsatable,
5958 sizeof(new_cfg->etscfg.tsatable)))
5959 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5960 }
5961
5962
5963 if (memcmp(&new_cfg->pfc,
5964 &old_cfg->pfc,
5965 sizeof(new_cfg->pfc))) {
5966 need_reconfig = true;
5967 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5968 }
5969
5970
5971 if (memcmp(&new_cfg->app,
5972 &old_cfg->app,
5973 sizeof(new_cfg->app))) {
5974 need_reconfig = true;
5975 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5976 }
5977
5978 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5979 return need_reconfig;
5980}
5981
5982
5983
5984
5985
5986
5987static int i40e_handle_lldp_event(struct i40e_pf *pf,
5988 struct i40e_arq_event_info *e)
5989{
5990 struct i40e_aqc_lldp_get_mib *mib =
5991 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5992 struct i40e_hw *hw = &pf->hw;
5993 struct i40e_dcbx_config tmp_dcbx_cfg;
5994 bool need_reconfig = false;
5995 int ret = 0;
5996 u8 type;
5997
5998
5999 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
6000 return ret;
6001
6002
6003 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
6004 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
6005 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
6006 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
6007 return ret;
6008
6009
6010 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
6011 dev_dbg(&pf->pdev->dev,
6012 "LLDP event mib type %s\n", type ? "remote" : "local");
6013 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
6014
6015 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
6016 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
6017 &hw->remote_dcbx_config);
6018 goto exit;
6019 }
6020
6021
6022 tmp_dcbx_cfg = hw->local_dcbx_config;
6023
6024
6025 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
6026
6027 ret = i40e_get_dcb_config(&pf->hw);
6028 if (ret) {
6029 dev_info(&pf->pdev->dev,
6030 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
6031 i40e_stat_str(&pf->hw, ret),
6032 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6033 goto exit;
6034 }
6035
6036
6037 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
6038 sizeof(tmp_dcbx_cfg))) {
6039 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
6040 goto exit;
6041 }
6042
6043 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
6044 &hw->local_dcbx_config);
6045
6046 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
6047
6048 if (!need_reconfig)
6049 goto exit;
6050
6051
6052 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6053 pf->flags |= I40E_FLAG_DCB_ENABLED;
6054 else
6055 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6056
6057 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6058
6059 i40e_pf_quiesce_all_vsi(pf);
6060
6061
6062 i40e_dcb_reconfigure(pf);
6063
6064 ret = i40e_resume_port_tx(pf);
6065
6066 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6067
6068 if (ret)
6069 goto exit;
6070
6071
6072 ret = i40e_pf_wait_queues_disabled(pf);
6073 if (ret) {
6074
6075 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6076 i40e_service_event_schedule(pf);
6077 } else {
6078 i40e_pf_unquiesce_all_vsi(pf);
6079 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
6080 I40E_FLAG_CLIENT_L2_CHANGE);
6081 }
6082
6083exit:
6084 return ret;
6085}
6086#endif
6087
6088
6089
6090
6091
6092
6093
6094void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
6095{
6096 rtnl_lock();
6097 i40e_do_reset(pf, reset_flags, true);
6098 rtnl_unlock();
6099}
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
6110 struct i40e_arq_event_info *e)
6111{
6112 struct i40e_aqc_lan_overflow *data =
6113 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
6114 u32 queue = le32_to_cpu(data->prtdcb_rupto);
6115 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
6116 struct i40e_hw *hw = &pf->hw;
6117 struct i40e_vf *vf;
6118 u16 vf_id;
6119
6120 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
6121 queue, qtx_ctl);
6122
6123
6124 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
6125 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
6126 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
6127 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
6128 vf_id -= hw->func_caps.vf_base_id;
6129 vf = &pf->vf[vf_id];
6130 i40e_vc_notify_vf_reset(vf);
6131
6132 msleep(20);
6133 i40e_reset_vf(vf, false);
6134 }
6135}
6136
6137
6138
6139
6140
6141u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
6142{
6143 u32 val, fcnt_prog;
6144
6145 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6146 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
6147 return fcnt_prog;
6148}
6149
6150
6151
6152
6153
6154u32 i40e_get_current_fd_count(struct i40e_pf *pf)
6155{
6156 u32 val, fcnt_prog;
6157
6158 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6159 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
6160 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
6161 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
6162 return fcnt_prog;
6163}
6164
6165
6166
6167
6168
6169u32 i40e_get_global_fd_count(struct i40e_pf *pf)
6170{
6171 u32 val, fcnt_prog;
6172
6173 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
6174 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
6175 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
6176 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
6177 return fcnt_prog;
6178}
6179
6180
6181
6182
6183
6184void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6185{
6186 struct i40e_fdir_filter *filter;
6187 u32 fcnt_prog, fcnt_avail;
6188 struct hlist_node *node;
6189
6190 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6191 return;
6192
6193
6194 fcnt_prog = i40e_get_global_fd_count(pf);
6195 fcnt_avail = pf->fdir_pf_filter_count;
6196 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
6197 (pf->fd_add_err == 0) ||
6198 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
6199 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
6200 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
6201 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
6202 (I40E_DEBUG_FD & pf->hw.debug_mask))
6203 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
6204 }
6205 }
6206
6207
6208
6209
6210
6211 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
6212 (pf->fd_tcp4_filter_cnt == 0)) {
6213 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
6214 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6215 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
6216 (I40E_DEBUG_FD & pf->hw.debug_mask))
6217 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
6218 }
6219 }
6220
6221
6222 if (pf->fd_inv > 0) {
6223 hlist_for_each_entry_safe(filter, node,
6224 &pf->fdir_filter_list, fdir_node) {
6225 if (filter->fd_id == pf->fd_inv) {
6226 hlist_del(&filter->fdir_node);
6227 kfree(filter);
6228 pf->fdir_pf_active_filters--;
6229 }
6230 }
6231 }
6232}
6233
6234#define I40E_MIN_FD_FLUSH_INTERVAL 10
6235#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
6236
6237
6238
6239
6240static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6241{
6242 unsigned long min_flush_time;
6243 int flush_wait_retry = 50;
6244 bool disable_atr = false;
6245 int fd_room;
6246 int reg;
6247
6248 if (!time_after(jiffies, pf->fd_flush_timestamp +
6249 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
6250 return;
6251
6252
6253
6254
6255 min_flush_time = pf->fd_flush_timestamp +
6256 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
6257 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
6258
6259 if (!(time_after(jiffies, min_flush_time)) &&
6260 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
6261 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6262 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
6263 disable_atr = true;
6264 }
6265
6266 pf->fd_flush_timestamp = jiffies;
6267 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
6268
6269 wr32(&pf->hw, I40E_PFQF_CTL_1,
6270 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6271 i40e_flush(&pf->hw);
6272 pf->fd_flush_cnt++;
6273 pf->fd_add_err = 0;
6274 do {
6275
6276 usleep_range(5000, 6000);
6277 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6278 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6279 break;
6280 } while (flush_wait_retry--);
6281 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6282 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6283 } else {
6284
6285 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6286 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
6287 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6288 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
6289 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6290 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6291 }
6292}
6293
6294
6295
6296
6297
6298u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6299{
6300 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6301}
6302
6303
6304
6305
6306
6307
6308#define I40E_MAX_FD_PROGRAM_ERROR 256
6309
6310
6311
6312
6313
6314static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6315{
6316
6317
6318 if (test_bit(__I40E_DOWN, pf->state))
6319 return;
6320
6321 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6322 i40e_fdir_flush_and_replay(pf);
6323
6324 i40e_fdir_check_and_reenable(pf);
6325
6326}
6327
6328
6329
6330
6331
6332
6333static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6334{
6335 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
6336 return;
6337
6338 switch (vsi->type) {
6339 case I40E_VSI_MAIN:
6340 if (!vsi->netdev || !vsi->netdev_registered)
6341 break;
6342
6343 if (link_up) {
6344 netif_carrier_on(vsi->netdev);
6345 netif_tx_wake_all_queues(vsi->netdev);
6346 } else {
6347 netif_carrier_off(vsi->netdev);
6348 netif_tx_stop_all_queues(vsi->netdev);
6349 }
6350 break;
6351
6352 case I40E_VSI_SRIOV:
6353 case I40E_VSI_VMDQ2:
6354 case I40E_VSI_CTRL:
6355 case I40E_VSI_IWARP:
6356 case I40E_VSI_MIRROR:
6357 default:
6358
6359 break;
6360 }
6361}
6362
6363
6364
6365
6366
6367
6368static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6369{
6370 struct i40e_pf *pf;
6371 int i;
6372
6373 if (!veb || !veb->pf)
6374 return;
6375 pf = veb->pf;
6376
6377
6378 for (i = 0; i < I40E_MAX_VEB; i++)
6379 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6380 i40e_veb_link_event(pf->veb[i], link_up);
6381
6382
6383 for (i = 0; i < pf->num_alloc_vsi; i++)
6384 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6385 i40e_vsi_link_event(pf->vsi[i], link_up);
6386}
6387
6388
6389
6390
6391
6392static void i40e_link_event(struct i40e_pf *pf)
6393{
6394 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6395 u8 new_link_speed, old_link_speed;
6396 i40e_status status;
6397 bool new_link, old_link;
6398
6399
6400 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6401
6402
6403 pf->hw.phy.get_link_info = true;
6404
6405 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6406
6407 status = i40e_get_link_status(&pf->hw, &new_link);
6408
6409
6410 if (status == I40E_SUCCESS) {
6411 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
6412 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
6413 } else {
6414
6415
6416
6417 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
6418 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6419 status);
6420 return;
6421 }
6422
6423 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6424 new_link_speed = pf->hw.phy.link_info.link_speed;
6425
6426 if (new_link == old_link &&
6427 new_link_speed == old_link_speed &&
6428 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
6429 new_link == netif_carrier_ok(vsi->netdev)))
6430 return;
6431
6432 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
6433 i40e_print_link_message(vsi, new_link);
6434
6435
6436
6437
6438 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6439 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6440 else
6441 i40e_vsi_link_event(vsi, new_link);
6442
6443 if (pf->vf)
6444 i40e_vc_notify_link_state(pf);
6445
6446 if (pf->flags & I40E_FLAG_PTP)
6447 i40e_ptp_set_increment(pf);
6448}
6449
6450
6451
6452
6453
6454static void i40e_watchdog_subtask(struct i40e_pf *pf)
6455{
6456 int i;
6457
6458
6459 if (test_bit(__I40E_DOWN, pf->state) ||
6460 test_bit(__I40E_CONFIG_BUSY, pf->state))
6461 return;
6462
6463
6464 if (time_before(jiffies, (pf->service_timer_previous +
6465 pf->service_timer_period)))
6466 return;
6467 pf->service_timer_previous = jiffies;
6468
6469 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
6470 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
6471 i40e_link_event(pf);
6472
6473
6474
6475
6476 for (i = 0; i < pf->num_alloc_vsi; i++)
6477 if (pf->vsi[i] && pf->vsi[i]->netdev)
6478 i40e_update_stats(pf->vsi[i]);
6479
6480 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6481
6482 for (i = 0; i < I40E_MAX_VEB; i++)
6483 if (pf->veb[i])
6484 i40e_update_veb_stats(pf->veb[i]);
6485 }
6486
6487 i40e_ptp_rx_hang(pf);
6488 i40e_ptp_tx_hang(pf);
6489}
6490
6491
6492
6493
6494
6495static void i40e_reset_subtask(struct i40e_pf *pf)
6496{
6497 u32 reset_flags = 0;
6498
6499 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
6500 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6501 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
6502 }
6503 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
6504 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6505 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6506 }
6507 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
6508 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6509 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
6510 }
6511 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
6512 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6513 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
6514 }
6515 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6516 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6517 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
6518 }
6519
6520
6521
6522
6523 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
6524 i40e_prep_for_reset(pf, false);
6525 i40e_reset(pf);
6526 i40e_rebuild(pf, false, false);
6527 }
6528
6529
6530 if (reset_flags &&
6531 !test_bit(__I40E_DOWN, pf->state) &&
6532 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
6533 i40e_do_reset(pf, reset_flags, false);
6534 }
6535}
6536
6537
6538
6539
6540
6541
6542static void i40e_handle_link_event(struct i40e_pf *pf,
6543 struct i40e_arq_event_info *e)
6544{
6545 struct i40e_aqc_get_link_status *status =
6546 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6547
6548
6549
6550
6551
6552
6553
6554 i40e_link_event(pf);
6555
6556
6557 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6558 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6559 (!(status->link_info & I40E_AQ_LINK_UP)))
6560 dev_err(&pf->pdev->dev,
6561 "The driver failed to link because an unqualified module was detected.\n");
6562}
6563
6564
6565
6566
6567
6568static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6569{
6570 struct i40e_arq_event_info event;
6571 struct i40e_hw *hw = &pf->hw;
6572 u16 pending, i = 0;
6573 i40e_status ret;
6574 u16 opcode;
6575 u32 oldval;
6576 u32 val;
6577
6578
6579 if (test_bit(__I40E_RESET_FAILED, pf->state))
6580 return;
6581
6582
6583 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6584 oldval = val;
6585 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6586 if (hw->debug_mask & I40E_DEBUG_AQ)
6587 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6588 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6589 }
6590 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6591 if (hw->debug_mask & I40E_DEBUG_AQ)
6592 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6593 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6594 pf->arq_overflows++;
6595 }
6596 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6597 if (hw->debug_mask & I40E_DEBUG_AQ)
6598 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6599 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6600 }
6601 if (oldval != val)
6602 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6603
6604 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6605 oldval = val;
6606 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6607 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6608 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6609 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6610 }
6611 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6612 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6613 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6614 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6615 }
6616 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6617 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6618 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6619 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6620 }
6621 if (oldval != val)
6622 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6623
6624 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6625 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6626 if (!event.msg_buf)
6627 return;
6628
6629 do {
6630 ret = i40e_clean_arq_element(hw, &event, &pending);
6631 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6632 break;
6633 else if (ret) {
6634 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6635 break;
6636 }
6637
6638 opcode = le16_to_cpu(event.desc.opcode);
6639 switch (opcode) {
6640
6641 case i40e_aqc_opc_get_link_status:
6642 i40e_handle_link_event(pf, &event);
6643 break;
6644 case i40e_aqc_opc_send_msg_to_pf:
6645 ret = i40e_vc_process_vf_msg(pf,
6646 le16_to_cpu(event.desc.retval),
6647 le32_to_cpu(event.desc.cookie_high),
6648 le32_to_cpu(event.desc.cookie_low),
6649 event.msg_buf,
6650 event.msg_len);
6651 break;
6652 case i40e_aqc_opc_lldp_update_mib:
6653 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6654#ifdef CONFIG_I40E_DCB
6655 rtnl_lock();
6656 ret = i40e_handle_lldp_event(pf, &event);
6657 rtnl_unlock();
6658#endif
6659 break;
6660 case i40e_aqc_opc_event_lan_overflow:
6661 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6662 i40e_handle_lan_overflow_event(pf, &event);
6663 break;
6664 case i40e_aqc_opc_send_msg_to_peer:
6665 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6666 break;
6667 case i40e_aqc_opc_nvm_erase:
6668 case i40e_aqc_opc_nvm_update:
6669 case i40e_aqc_opc_oem_post_update:
6670 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6671 "ARQ NVM operation 0x%04x completed\n",
6672 opcode);
6673 break;
6674 default:
6675 dev_info(&pf->pdev->dev,
6676 "ARQ: Unknown event 0x%04x ignored\n",
6677 opcode);
6678 break;
6679 }
6680 } while (i++ < pf->adminq_work_limit);
6681
6682 if (i < pf->adminq_work_limit)
6683 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
6684
6685
6686 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6687 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6688 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6689 i40e_flush(hw);
6690
6691 kfree(event.msg_buf);
6692}
6693
6694
6695
6696
6697
6698static void i40e_verify_eeprom(struct i40e_pf *pf)
6699{
6700 int err;
6701
6702 err = i40e_diag_eeprom_test(&pf->hw);
6703 if (err) {
6704
6705 err = i40e_diag_eeprom_test(&pf->hw);
6706 if (err) {
6707 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6708 err);
6709 set_bit(__I40E_BAD_EEPROM, pf->state);
6710 }
6711 }
6712
6713 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
6714 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6715 clear_bit(__I40E_BAD_EEPROM, pf->state);
6716 }
6717}
6718
6719
6720
6721
6722
6723
6724
6725static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6726{
6727 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6728 struct i40e_vsi_context ctxt;
6729 int ret;
6730
6731 ctxt.seid = pf->main_vsi_seid;
6732 ctxt.pf_num = pf->hw.pf_id;
6733 ctxt.vf_num = 0;
6734 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6735 if (ret) {
6736 dev_info(&pf->pdev->dev,
6737 "couldn't get PF vsi config, err %s aq_err %s\n",
6738 i40e_stat_str(&pf->hw, ret),
6739 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6740 return;
6741 }
6742 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6743 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6744 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6745
6746 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6747 if (ret) {
6748 dev_info(&pf->pdev->dev,
6749 "update vsi switch failed, err %s aq_err %s\n",
6750 i40e_stat_str(&pf->hw, ret),
6751 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6752 }
6753}
6754
6755
6756
6757
6758
6759
6760
6761static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6762{
6763 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6764 struct i40e_vsi_context ctxt;
6765 int ret;
6766
6767 ctxt.seid = pf->main_vsi_seid;
6768 ctxt.pf_num = pf->hw.pf_id;
6769 ctxt.vf_num = 0;
6770 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6771 if (ret) {
6772 dev_info(&pf->pdev->dev,
6773 "couldn't get PF vsi config, err %s aq_err %s\n",
6774 i40e_stat_str(&pf->hw, ret),
6775 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6776 return;
6777 }
6778 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6779 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6780 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6781
6782 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6783 if (ret) {
6784 dev_info(&pf->pdev->dev,
6785 "update vsi switch failed, err %s aq_err %s\n",
6786 i40e_stat_str(&pf->hw, ret),
6787 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6788 }
6789}
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799static void i40e_config_bridge_mode(struct i40e_veb *veb)
6800{
6801 struct i40e_pf *pf = veb->pf;
6802
6803 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6804 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6805 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6806 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6807 i40e_disable_pf_switch_lb(pf);
6808 else
6809 i40e_enable_pf_switch_lb(pf);
6810}
6811
6812
6813
6814
6815
6816
6817
6818
6819
6820
6821static int i40e_reconstitute_veb(struct i40e_veb *veb)
6822{
6823 struct i40e_vsi *ctl_vsi = NULL;
6824 struct i40e_pf *pf = veb->pf;
6825 int v, veb_idx;
6826 int ret;
6827
6828
6829 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6830 if (pf->vsi[v] &&
6831 pf->vsi[v]->veb_idx == veb->idx &&
6832 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6833 ctl_vsi = pf->vsi[v];
6834 break;
6835 }
6836 }
6837 if (!ctl_vsi) {
6838 dev_info(&pf->pdev->dev,
6839 "missing owner VSI for veb_idx %d\n", veb->idx);
6840 ret = -ENOENT;
6841 goto end_reconstitute;
6842 }
6843 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6844 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6845 ret = i40e_add_vsi(ctl_vsi);
6846 if (ret) {
6847 dev_info(&pf->pdev->dev,
6848 "rebuild of veb_idx %d owner VSI failed: %d\n",
6849 veb->idx, ret);
6850 goto end_reconstitute;
6851 }
6852 i40e_vsi_reset_stats(ctl_vsi);
6853
6854
6855 ret = i40e_add_veb(veb, ctl_vsi);
6856 if (ret)
6857 goto end_reconstitute;
6858
6859 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6860 veb->bridge_mode = BRIDGE_MODE_VEB;
6861 else
6862 veb->bridge_mode = BRIDGE_MODE_VEPA;
6863 i40e_config_bridge_mode(veb);
6864
6865
6866 for (v = 0; v < pf->num_alloc_vsi; v++) {
6867 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6868 continue;
6869
6870 if (pf->vsi[v]->veb_idx == veb->idx) {
6871 struct i40e_vsi *vsi = pf->vsi[v];
6872
6873 vsi->uplink_seid = veb->seid;
6874 ret = i40e_add_vsi(vsi);
6875 if (ret) {
6876 dev_info(&pf->pdev->dev,
6877 "rebuild of vsi_idx %d failed: %d\n",
6878 v, ret);
6879 goto end_reconstitute;
6880 }
6881 i40e_vsi_reset_stats(vsi);
6882 }
6883 }
6884
6885
6886 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6887 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6888 pf->veb[veb_idx]->uplink_seid = veb->seid;
6889 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6890 if (ret)
6891 break;
6892 }
6893 }
6894
6895end_reconstitute:
6896 return ret;
6897}
6898
6899
6900
6901
6902
6903static int i40e_get_capabilities(struct i40e_pf *pf)
6904{
6905 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6906 u16 data_size;
6907 int buf_len;
6908 int err;
6909
6910 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6911 do {
6912 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6913 if (!cap_buf)
6914 return -ENOMEM;
6915
6916
6917 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6918 &data_size,
6919 i40e_aqc_opc_list_func_capabilities,
6920 NULL);
6921
6922 kfree(cap_buf);
6923
6924 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6925
6926 buf_len = data_size;
6927 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6928 dev_info(&pf->pdev->dev,
6929 "capability discovery failed, err %s aq_err %s\n",
6930 i40e_stat_str(&pf->hw, err),
6931 i40e_aq_str(&pf->hw,
6932 pf->hw.aq.asq_last_status));
6933 return -ENODEV;
6934 }
6935 } while (err);
6936
6937 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6938 dev_info(&pf->pdev->dev,
6939 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6940 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6941 pf->hw.func_caps.num_msix_vectors,
6942 pf->hw.func_caps.num_msix_vectors_vf,
6943 pf->hw.func_caps.fd_filters_guaranteed,
6944 pf->hw.func_caps.fd_filters_best_effort,
6945 pf->hw.func_caps.num_tx_qp,
6946 pf->hw.func_caps.num_vsis);
6947
6948#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6949 + pf->hw.func_caps.num_vfs)
6950 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6951 dev_info(&pf->pdev->dev,
6952 "got num_vsis %d, setting num_vsis to %d\n",
6953 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6954 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6955 }
6956
6957 return 0;
6958}
6959
6960static int i40e_vsi_clear(struct i40e_vsi *vsi);
6961
6962
6963
6964
6965
6966static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6967{
6968 struct i40e_vsi *vsi;
6969
6970
6971
6972
6973 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6974 static const u32 hkey[] = {
6975 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6976 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6977 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6978 0x95b3a76d};
6979 int i;
6980
6981 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6982 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6983 }
6984
6985 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6986 return;
6987
6988
6989 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
6990
6991
6992 if (!vsi) {
6993 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6994 pf->vsi[pf->lan_vsi]->seid, 0);
6995 if (!vsi) {
6996 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6997 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6998 return;
6999 }
7000 }
7001
7002 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
7003}
7004
7005
7006
7007
7008
7009static void i40e_fdir_teardown(struct i40e_pf *pf)
7010{
7011 struct i40e_vsi *vsi;
7012
7013 i40e_fdir_filter_exit(pf);
7014 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7015 if (vsi)
7016 i40e_vsi_release(vsi);
7017}
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
7028{
7029 struct i40e_hw *hw = &pf->hw;
7030 i40e_status ret = 0;
7031 u32 v;
7032
7033 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
7034 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7035 return;
7036 if (i40e_check_asq_alive(&pf->hw))
7037 i40e_vc_notify_reset(pf);
7038
7039 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
7040
7041
7042
7043 if (!lock_acquired)
7044 rtnl_lock();
7045 i40e_pf_quiesce_all_vsi(pf);
7046 if (!lock_acquired)
7047 rtnl_unlock();
7048
7049 for (v = 0; v < pf->num_alloc_vsi; v++) {
7050 if (pf->vsi[v])
7051 pf->vsi[v]->seid = 0;
7052 }
7053
7054 i40e_shutdown_adminq(&pf->hw);
7055
7056
7057 if (hw->hmc.hmc_obj) {
7058 ret = i40e_shutdown_lan_hmc(hw);
7059 if (ret)
7060 dev_warn(&pf->pdev->dev,
7061 "shutdown_lan_hmc failed: %d\n", ret);
7062 }
7063}
7064
7065
7066
7067
7068
7069static void i40e_send_version(struct i40e_pf *pf)
7070{
7071 struct i40e_driver_version dv;
7072
7073 dv.major_version = DRV_VERSION_MAJOR;
7074 dv.minor_version = DRV_VERSION_MINOR;
7075 dv.build_version = DRV_VERSION_BUILD;
7076 dv.subbuild_version = 0;
7077 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
7078 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7079}
7080
7081
7082
7083
7084
7085static void i40e_get_oem_version(struct i40e_hw *hw)
7086{
7087 u16 block_offset = 0xffff;
7088 u16 block_length = 0;
7089 u16 capabilities = 0;
7090 u16 gen_snap = 0;
7091 u16 release = 0;
7092
7093#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
7094#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
7095#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
7096#define I40E_NVM_OEM_GEN_OFFSET 0x02
7097#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
7098#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
7099#define I40E_NVM_OEM_LENGTH 3
7100
7101
7102 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
7103 if (block_offset == 0xffff)
7104 return;
7105
7106
7107 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
7108 &block_length);
7109 if (block_length < I40E_NVM_OEM_LENGTH)
7110 return;
7111
7112
7113 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
7114 &capabilities);
7115 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
7116 return;
7117
7118 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
7119 &gen_snap);
7120 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
7121 &release);
7122 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
7123 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
7124}
7125
7126
7127
7128
7129
7130static int i40e_reset(struct i40e_pf *pf)
7131{
7132 struct i40e_hw *hw = &pf->hw;
7133 i40e_status ret;
7134
7135 ret = i40e_pf_reset(hw);
7136 if (ret) {
7137 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
7138 set_bit(__I40E_RESET_FAILED, pf->state);
7139 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7140 } else {
7141 pf->pfr_count++;
7142 }
7143 return ret;
7144}
7145
7146
7147
7148
7149
7150
7151
7152
7153static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7154{
7155 struct i40e_hw *hw = &pf->hw;
7156 u8 set_fc_aq_fail = 0;
7157 i40e_status ret;
7158 u32 val;
7159 int v;
7160
7161 if (test_bit(__I40E_DOWN, pf->state))
7162 goto clear_recovery;
7163 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
7164
7165
7166 ret = i40e_init_adminq(&pf->hw);
7167 if (ret) {
7168 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
7169 i40e_stat_str(&pf->hw, ret),
7170 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7171 goto clear_recovery;
7172 }
7173 i40e_get_oem_version(&pf->hw);
7174
7175
7176 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
7177 i40e_verify_eeprom(pf);
7178
7179 i40e_clear_pxe_mode(hw);
7180 ret = i40e_get_capabilities(pf);
7181 if (ret)
7182 goto end_core_reset;
7183
7184 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7185 hw->func_caps.num_rx_qp, 0, 0);
7186 if (ret) {
7187 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
7188 goto end_core_reset;
7189 }
7190 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7191 if (ret) {
7192 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
7193 goto end_core_reset;
7194 }
7195
7196#ifdef CONFIG_I40E_DCB
7197 ret = i40e_init_pf_dcb(pf);
7198 if (ret) {
7199 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
7200 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7201
7202 }
7203#endif
7204
7205 if (!lock_acquired)
7206 rtnl_lock();
7207 ret = i40e_setup_pf_switch(pf, reinit);
7208 if (ret)
7209 goto end_unlock;
7210
7211
7212
7213
7214 ret = i40e_aq_set_phy_int_mask(&pf->hw,
7215 ~(I40E_AQ_EVENT_LINK_UPDOWN |
7216 I40E_AQ_EVENT_MEDIA_NA |
7217 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7218 if (ret)
7219 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
7220 i40e_stat_str(&pf->hw, ret),
7221 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7222
7223
7224 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
7225 if (ret)
7226 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
7227 i40e_stat_str(&pf->hw, ret),
7228 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7229
7230
7231
7232
7233
7234
7235
7236
7237 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
7238 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
7239
7240 for (v = 0; v < I40E_MAX_VEB; v++) {
7241 if (!pf->veb[v])
7242 continue;
7243
7244 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
7245 pf->veb[v]->uplink_seid == 0) {
7246 ret = i40e_reconstitute_veb(pf->veb[v]);
7247
7248 if (!ret)
7249 continue;
7250
7251
7252
7253
7254
7255
7256
7257 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
7258 dev_info(&pf->pdev->dev,
7259 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
7260 ret);
7261 pf->vsi[pf->lan_vsi]->uplink_seid
7262 = pf->mac_seid;
7263 break;
7264 } else if (pf->veb[v]->uplink_seid == 0) {
7265 dev_info(&pf->pdev->dev,
7266 "rebuild of orphan VEB failed: %d\n",
7267 ret);
7268 }
7269 }
7270 }
7271 }
7272
7273 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
7274 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
7275
7276 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
7277 if (ret) {
7278 dev_info(&pf->pdev->dev,
7279 "rebuild of Main VSI failed: %d\n", ret);
7280 goto end_unlock;
7281 }
7282 }
7283
7284
7285
7286
7287
7288#define I40E_REG_MSS 0x000E64DC
7289#define I40E_REG_MSS_MIN_MASK 0x3FF0000
7290#define I40E_64BYTE_MSS 0x400000
7291 val = rd32(hw, I40E_REG_MSS);
7292 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
7293 val &= ~I40E_REG_MSS_MIN_MASK;
7294 val |= I40E_64BYTE_MSS;
7295 wr32(hw, I40E_REG_MSS, val);
7296 }
7297
7298 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
7299 msleep(75);
7300 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
7301 if (ret)
7302 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
7303 i40e_stat_str(&pf->hw, ret),
7304 i40e_aq_str(&pf->hw,
7305 pf->hw.aq.asq_last_status));
7306 }
7307
7308 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7309 ret = i40e_setup_misc_vector(pf);
7310
7311
7312
7313
7314
7315
7316
7317 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
7318 pf->main_vsi_seid);
7319
7320
7321 i40e_pf_unquiesce_all_vsi(pf);
7322
7323
7324 if (!lock_acquired)
7325 rtnl_unlock();
7326
7327 i40e_reset_all_vfs(pf, true);
7328
7329
7330 i40e_send_version(pf);
7331
7332
7333 goto end_core_reset;
7334
7335end_unlock:
7336 if (!lock_acquired)
7337 rtnl_unlock();
7338end_core_reset:
7339 clear_bit(__I40E_RESET_FAILED, pf->state);
7340clear_recovery:
7341 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7342}
7343
7344
7345
7346
7347
7348
7349
7350
7351static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
7352 bool lock_acquired)
7353{
7354 int ret;
7355
7356
7357
7358
7359 ret = i40e_reset(pf);
7360 if (!ret)
7361 i40e_rebuild(pf, reinit, lock_acquired);
7362}
7363
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
7374{
7375 i40e_prep_for_reset(pf, lock_acquired);
7376 i40e_reset_and_rebuild(pf, false, lock_acquired);
7377}
7378
7379
7380
7381
7382
7383
7384
7385static void i40e_handle_mdd_event(struct i40e_pf *pf)
7386{
7387 struct i40e_hw *hw = &pf->hw;
7388 bool mdd_detected = false;
7389 bool pf_mdd_detected = false;
7390 struct i40e_vf *vf;
7391 u32 reg;
7392 int i;
7393
7394 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
7395 return;
7396
7397
7398 reg = rd32(hw, I40E_GL_MDET_TX);
7399 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7400 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7401 I40E_GL_MDET_TX_PF_NUM_SHIFT;
7402 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7403 I40E_GL_MDET_TX_VF_NUM_SHIFT;
7404 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7405 I40E_GL_MDET_TX_EVENT_SHIFT;
7406 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7407 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7408 pf->hw.func_caps.base_queue;
7409 if (netif_msg_tx_err(pf))
7410 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7411 event, queue, pf_num, vf_num);
7412 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7413 mdd_detected = true;
7414 }
7415 reg = rd32(hw, I40E_GL_MDET_RX);
7416 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7417 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7418 I40E_GL_MDET_RX_FUNCTION_SHIFT;
7419 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7420 I40E_GL_MDET_RX_EVENT_SHIFT;
7421 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7422 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7423 pf->hw.func_caps.base_queue;
7424 if (netif_msg_rx_err(pf))
7425 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7426 event, queue, func);
7427 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7428 mdd_detected = true;
7429 }
7430
7431 if (mdd_detected) {
7432 reg = rd32(hw, I40E_PF_MDET_TX);
7433 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7434 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7435 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7436 pf_mdd_detected = true;
7437 }
7438 reg = rd32(hw, I40E_PF_MDET_RX);
7439 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7440 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7441 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7442 pf_mdd_detected = true;
7443 }
7444
7445 if (pf_mdd_detected) {
7446 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7447 i40e_service_event_schedule(pf);
7448 }
7449 }
7450
7451
7452 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7453 vf = &(pf->vf[i]);
7454 reg = rd32(hw, I40E_VP_MDET_TX(i));
7455 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7456 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7457 vf->num_mdd_events++;
7458 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7459 i);
7460 }
7461
7462 reg = rd32(hw, I40E_VP_MDET_RX(i));
7463 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7464 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7465 vf->num_mdd_events++;
7466 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7467 i);
7468 }
7469
7470 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7471 dev_info(&pf->pdev->dev,
7472 "Too many MDD events on VF %d, disabled\n", i);
7473 dev_info(&pf->pdev->dev,
7474 "Use PF Control I/F to re-enable the VF\n");
7475 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
7476 }
7477 }
7478
7479
7480 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
7481 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7482 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7483 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7484 i40e_flush(hw);
7485}
7486
7487static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
7488{
7489 switch (port->type) {
7490 case UDP_TUNNEL_TYPE_VXLAN:
7491 return "vxlan";
7492 case UDP_TUNNEL_TYPE_GENEVE:
7493 return "geneve";
7494 default:
7495 return "unknown";
7496 }
7497}
7498
7499
7500
7501
7502
7503static void i40e_sync_udp_filters(struct i40e_pf *pf)
7504{
7505 int i;
7506
7507
7508 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7509 if (pf->udp_ports[i].port)
7510 pf->pending_udp_bitmap |= BIT_ULL(i);
7511 }
7512
7513 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
7514}
7515
7516
7517
7518
7519
7520static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7521{
7522 struct i40e_hw *hw = &pf->hw;
7523 i40e_status ret;
7524 u16 port;
7525 int i;
7526
7527 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7528 return;
7529
7530 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7531
7532 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7533 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7534 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7535 port = pf->udp_ports[i].port;
7536 if (port)
7537 ret = i40e_aq_add_udp_tunnel(hw, port,
7538 pf->udp_ports[i].type,
7539 NULL, NULL);
7540 else
7541 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7542
7543 if (ret) {
7544 dev_info(&pf->pdev->dev,
7545 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7546 i40e_tunnel_name(&pf->udp_ports[i]),
7547 port ? "add" : "delete",
7548 port, i,
7549 i40e_stat_str(&pf->hw, ret),
7550 i40e_aq_str(&pf->hw,
7551 pf->hw.aq.asq_last_status));
7552 pf->udp_ports[i].port = 0;
7553 }
7554 }
7555 }
7556}
7557
7558
7559
7560
7561
7562static void i40e_service_task(struct work_struct *work)
7563{
7564 struct i40e_pf *pf = container_of(work,
7565 struct i40e_pf,
7566 service_task);
7567 unsigned long start_time = jiffies;
7568
7569
7570 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7571 return;
7572
7573 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
7574 return;
7575
7576 i40e_detect_recover_hung(pf);
7577 i40e_sync_filters_subtask(pf);
7578 i40e_reset_subtask(pf);
7579 i40e_handle_mdd_event(pf);
7580 i40e_vc_process_vflr_event(pf);
7581 i40e_watchdog_subtask(pf);
7582 i40e_fdir_reinit_subtask(pf);
7583 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
7584
7585 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
7586 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
7587 } else {
7588 i40e_client_subtask(pf);
7589 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
7590 i40e_notify_client_of_l2_param_changes(
7591 pf->vsi[pf->lan_vsi]);
7592 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
7593 }
7594 }
7595 i40e_sync_filters_subtask(pf);
7596 i40e_sync_udp_filters_subtask(pf);
7597 i40e_clean_adminq_subtask(pf);
7598
7599
7600 smp_mb__before_atomic();
7601 clear_bit(__I40E_SERVICE_SCHED, pf->state);
7602
7603
7604
7605
7606
7607 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7608 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
7609 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
7610 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
7611 i40e_service_event_schedule(pf);
7612}
7613
7614
7615
7616
7617
7618static void i40e_service_timer(unsigned long data)
7619{
7620 struct i40e_pf *pf = (struct i40e_pf *)data;
7621
7622 mod_timer(&pf->service_timer,
7623 round_jiffies(jiffies + pf->service_timer_period));
7624 i40e_service_event_schedule(pf);
7625}
7626
7627
7628
7629
7630
7631static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7632{
7633 struct i40e_pf *pf = vsi->back;
7634
7635 switch (vsi->type) {
7636 case I40E_VSI_MAIN:
7637 vsi->alloc_queue_pairs = pf->num_lan_qps;
7638 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7639 I40E_REQ_DESCRIPTOR_MULTIPLE);
7640 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7641 vsi->num_q_vectors = pf->num_lan_msix;
7642 else
7643 vsi->num_q_vectors = 1;
7644
7645 break;
7646
7647 case I40E_VSI_FDIR:
7648 vsi->alloc_queue_pairs = 1;
7649 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7650 I40E_REQ_DESCRIPTOR_MULTIPLE);
7651 vsi->num_q_vectors = pf->num_fdsb_msix;
7652 break;
7653
7654 case I40E_VSI_VMDQ2:
7655 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7656 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7657 I40E_REQ_DESCRIPTOR_MULTIPLE);
7658 vsi->num_q_vectors = pf->num_vmdq_msix;
7659 break;
7660
7661 case I40E_VSI_SRIOV:
7662 vsi->alloc_queue_pairs = pf->num_vf_qps;
7663 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7664 I40E_REQ_DESCRIPTOR_MULTIPLE);
7665 break;
7666
7667 default:
7668 WARN_ON(1);
7669 return -ENODATA;
7670 }
7671
7672 return 0;
7673}
7674
7675
7676
7677
7678
7679
7680
7681
7682
7683static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7684{
7685 struct i40e_ring **next_rings;
7686 int size;
7687 int ret = 0;
7688
7689
7690 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
7691 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
7692 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7693 if (!vsi->tx_rings)
7694 return -ENOMEM;
7695 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
7696 if (i40e_enabled_xdp_vsi(vsi)) {
7697 vsi->xdp_rings = next_rings;
7698 next_rings += vsi->alloc_queue_pairs;
7699 }
7700 vsi->rx_rings = next_rings;
7701
7702 if (alloc_qvectors) {
7703
7704 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7705 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7706 if (!vsi->q_vectors) {
7707 ret = -ENOMEM;
7708 goto err_vectors;
7709 }
7710 }
7711 return ret;
7712
7713err_vectors:
7714 kfree(vsi->tx_rings);
7715 return ret;
7716}
7717
7718
7719
7720
7721
7722
7723
7724
7725
7726static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7727{
7728 int ret = -ENODEV;
7729 struct i40e_vsi *vsi;
7730 int vsi_idx;
7731 int i;
7732
7733
7734 mutex_lock(&pf->switch_mutex);
7735
7736
7737
7738
7739
7740
7741
7742 i = pf->next_vsi;
7743 while (i < pf->num_alloc_vsi && pf->vsi[i])
7744 i++;
7745 if (i >= pf->num_alloc_vsi) {
7746 i = 0;
7747 while (i < pf->next_vsi && pf->vsi[i])
7748 i++;
7749 }
7750
7751 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7752 vsi_idx = i;
7753 } else {
7754 ret = -ENODEV;
7755 goto unlock_pf;
7756 }
7757 pf->next_vsi = ++i;
7758
7759 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7760 if (!vsi) {
7761 ret = -ENOMEM;
7762 goto unlock_pf;
7763 }
7764 vsi->type = type;
7765 vsi->back = pf;
7766 set_bit(__I40E_VSI_DOWN, vsi->state);
7767 vsi->flags = 0;
7768 vsi->idx = vsi_idx;
7769 vsi->int_rate_limit = 0;
7770 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7771 pf->rss_table_size : 64;
7772 vsi->netdev_registered = false;
7773 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7774 hash_init(vsi->mac_filter_hash);
7775 vsi->irqs_ready = false;
7776
7777 ret = i40e_set_num_rings_in_vsi(vsi);
7778 if (ret)
7779 goto err_rings;
7780
7781 ret = i40e_vsi_alloc_arrays(vsi, true);
7782 if (ret)
7783 goto err_rings;
7784
7785
7786 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7787
7788
7789 spin_lock_init(&vsi->mac_filter_hash_lock);
7790 pf->vsi[vsi_idx] = vsi;
7791 ret = vsi_idx;
7792 goto unlock_pf;
7793
7794err_rings:
7795 pf->next_vsi = i - 1;
7796 kfree(vsi);
7797unlock_pf:
7798 mutex_unlock(&pf->switch_mutex);
7799 return ret;
7800}
7801
7802
7803
7804
7805
7806
7807
7808
7809
7810static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7811{
7812
7813 if (free_qvectors) {
7814 kfree(vsi->q_vectors);
7815 vsi->q_vectors = NULL;
7816 }
7817 kfree(vsi->tx_rings);
7818 vsi->tx_rings = NULL;
7819 vsi->rx_rings = NULL;
7820 vsi->xdp_rings = NULL;
7821}
7822
7823
7824
7825
7826
7827
7828static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7829{
7830 if (!vsi)
7831 return;
7832
7833 kfree(vsi->rss_hkey_user);
7834 vsi->rss_hkey_user = NULL;
7835
7836 kfree(vsi->rss_lut_user);
7837 vsi->rss_lut_user = NULL;
7838}
7839
7840
7841
7842
7843
7844static int i40e_vsi_clear(struct i40e_vsi *vsi)
7845{
7846 struct i40e_pf *pf;
7847
7848 if (!vsi)
7849 return 0;
7850
7851 if (!vsi->back)
7852 goto free_vsi;
7853 pf = vsi->back;
7854
7855 mutex_lock(&pf->switch_mutex);
7856 if (!pf->vsi[vsi->idx]) {
7857 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7858 vsi->idx, vsi->idx, vsi, vsi->type);
7859 goto unlock_vsi;
7860 }
7861
7862 if (pf->vsi[vsi->idx] != vsi) {
7863 dev_err(&pf->pdev->dev,
7864 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7865 pf->vsi[vsi->idx]->idx,
7866 pf->vsi[vsi->idx],
7867 pf->vsi[vsi->idx]->type,
7868 vsi->idx, vsi, vsi->type);
7869 goto unlock_vsi;
7870 }
7871
7872
7873 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7874 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7875
7876 i40e_vsi_free_arrays(vsi, true);
7877 i40e_clear_rss_config_user(vsi);
7878
7879 pf->vsi[vsi->idx] = NULL;
7880 if (vsi->idx < pf->next_vsi)
7881 pf->next_vsi = vsi->idx;
7882
7883unlock_vsi:
7884 mutex_unlock(&pf->switch_mutex);
7885free_vsi:
7886 kfree(vsi);
7887
7888 return 0;
7889}
7890
7891
7892
7893
7894
7895static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7896{
7897 int i;
7898
7899 if (vsi->tx_rings && vsi->tx_rings[0]) {
7900 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7901 kfree_rcu(vsi->tx_rings[i], rcu);
7902 vsi->tx_rings[i] = NULL;
7903 vsi->rx_rings[i] = NULL;
7904 if (vsi->xdp_rings)
7905 vsi->xdp_rings[i] = NULL;
7906 }
7907 }
7908}
7909
7910
7911
7912
7913
7914static int i40e_alloc_rings(struct i40e_vsi *vsi)
7915{
7916 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
7917 struct i40e_pf *pf = vsi->back;
7918 struct i40e_ring *ring;
7919
7920
7921 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7922
7923 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
7924 if (!ring)
7925 goto err_out;
7926
7927 ring->queue_index = i;
7928 ring->reg_idx = vsi->base_queue + i;
7929 ring->ring_active = false;
7930 ring->vsi = vsi;
7931 ring->netdev = vsi->netdev;
7932 ring->dev = &pf->pdev->dev;
7933 ring->count = vsi->num_desc;
7934 ring->size = 0;
7935 ring->dcb_tc = 0;
7936 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
7937 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7938 ring->tx_itr_setting = pf->tx_itr_default;
7939 vsi->tx_rings[i] = ring++;
7940
7941 if (!i40e_enabled_xdp_vsi(vsi))
7942 goto setup_rx;
7943
7944 ring->queue_index = vsi->alloc_queue_pairs + i;
7945 ring->reg_idx = vsi->base_queue + ring->queue_index;
7946 ring->ring_active = false;
7947 ring->vsi = vsi;
7948 ring->netdev = NULL;
7949 ring->dev = &pf->pdev->dev;
7950 ring->count = vsi->num_desc;
7951 ring->size = 0;
7952 ring->dcb_tc = 0;
7953 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
7954 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7955 set_ring_xdp(ring);
7956 ring->tx_itr_setting = pf->tx_itr_default;
7957 vsi->xdp_rings[i] = ring++;
7958
7959setup_rx:
7960 ring->queue_index = i;
7961 ring->reg_idx = vsi->base_queue + i;
7962 ring->ring_active = false;
7963 ring->vsi = vsi;
7964 ring->netdev = vsi->netdev;
7965 ring->dev = &pf->pdev->dev;
7966 ring->count = vsi->num_desc;
7967 ring->size = 0;
7968 ring->dcb_tc = 0;
7969 ring->rx_itr_setting = pf->rx_itr_default;
7970 vsi->rx_rings[i] = ring;
7971 }
7972
7973 return 0;
7974
7975err_out:
7976 i40e_vsi_clear_rings(vsi);
7977 return -ENOMEM;
7978}
7979
7980
7981
7982
7983
7984
7985
7986
7987static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7988{
7989 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7990 I40E_MIN_MSIX, vectors);
7991 if (vectors < 0) {
7992 dev_info(&pf->pdev->dev,
7993 "MSI-X vector reservation failed: %d\n", vectors);
7994 vectors = 0;
7995 }
7996
7997 return vectors;
7998}
7999
8000
8001
8002
8003
8004
8005
8006
8007
8008static int i40e_init_msix(struct i40e_pf *pf)
8009{
8010 struct i40e_hw *hw = &pf->hw;
8011 int cpus, extra_vectors;
8012 int vectors_left;
8013 int v_budget, i;
8014 int v_actual;
8015 int iwarp_requested = 0;
8016
8017 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8018 return -ENODEV;
8019
8020
8021
8022
8023
8024
8025
8026
8027
8028
8029
8030
8031
8032
8033
8034
8035 vectors_left = hw->func_caps.num_msix_vectors;
8036 v_budget = 0;
8037
8038
8039 if (vectors_left) {
8040 v_budget++;
8041 vectors_left--;
8042 }
8043
8044
8045
8046
8047
8048
8049
8050
8051 cpus = num_online_cpus();
8052 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
8053 vectors_left -= pf->num_lan_msix;
8054
8055
8056 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8057 if (vectors_left) {
8058 pf->num_fdsb_msix = 1;
8059 v_budget++;
8060 vectors_left--;
8061 } else {
8062 pf->num_fdsb_msix = 0;
8063 }
8064 }
8065
8066
8067 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8068 iwarp_requested = pf->num_iwarp_msix;
8069
8070 if (!vectors_left)
8071 pf->num_iwarp_msix = 0;
8072 else if (vectors_left < pf->num_iwarp_msix)
8073 pf->num_iwarp_msix = 1;
8074 v_budget += pf->num_iwarp_msix;
8075 vectors_left -= pf->num_iwarp_msix;
8076 }
8077
8078
8079 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
8080 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
8081 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
8082
8083 if (!vectors_left) {
8084 pf->num_vmdq_msix = 0;
8085 pf->num_vmdq_qps = 0;
8086 } else {
8087
8088
8089
8090
8091
8092
8093 if (vmdq_vecs < vmdq_vecs_wanted)
8094 pf->num_vmdq_qps = 1;
8095 pf->num_vmdq_msix = pf->num_vmdq_qps;
8096
8097 v_budget += vmdq_vecs;
8098 vectors_left -= vmdq_vecs;
8099 }
8100 }
8101
8102
8103
8104
8105
8106
8107
8108
8109
8110
8111 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
8112 pf->num_lan_msix += extra_vectors;
8113 vectors_left -= extra_vectors;
8114
8115 WARN(vectors_left < 0,
8116 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
8117
8118 v_budget += pf->num_lan_msix;
8119 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
8120 GFP_KERNEL);
8121 if (!pf->msix_entries)
8122 return -ENOMEM;
8123
8124 for (i = 0; i < v_budget; i++)
8125 pf->msix_entries[i].entry = i;
8126 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
8127
8128 if (v_actual < I40E_MIN_MSIX) {
8129 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
8130 kfree(pf->msix_entries);
8131 pf->msix_entries = NULL;
8132 pci_disable_msix(pf->pdev);
8133 return -ENODEV;
8134
8135 } else if (v_actual == I40E_MIN_MSIX) {
8136
8137 pf->num_vmdq_vsis = 0;
8138 pf->num_vmdq_qps = 0;
8139 pf->num_lan_qps = 1;
8140 pf->num_lan_msix = 1;
8141
8142 } else if (!vectors_left) {
8143
8144
8145
8146
8147
8148 int vec;
8149
8150 dev_info(&pf->pdev->dev,
8151 "MSI-X vector limit reached, attempting to redistribute vectors\n");
8152
8153 vec = v_actual - 1;
8154
8155
8156 pf->num_vmdq_msix = 1;
8157 pf->num_vmdq_vsis = 1;
8158 pf->num_vmdq_qps = 1;
8159
8160
8161 switch (vec) {
8162 case 2:
8163 pf->num_lan_msix = 1;
8164 break;
8165 case 3:
8166 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8167 pf->num_lan_msix = 1;
8168 pf->num_iwarp_msix = 1;
8169 } else {
8170 pf->num_lan_msix = 2;
8171 }
8172 break;
8173 default:
8174 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8175 pf->num_iwarp_msix = min_t(int, (vec / 3),
8176 iwarp_requested);
8177 pf->num_vmdq_vsis = min_t(int, (vec / 3),
8178 I40E_DEFAULT_NUM_VMDQ_VSI);
8179 } else {
8180 pf->num_vmdq_vsis = min_t(int, (vec / 2),
8181 I40E_DEFAULT_NUM_VMDQ_VSI);
8182 }
8183 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8184 pf->num_fdsb_msix = 1;
8185 vec--;
8186 }
8187 pf->num_lan_msix = min_t(int,
8188 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
8189 pf->num_lan_msix);
8190 pf->num_lan_qps = pf->num_lan_msix;
8191 break;
8192 }
8193 }
8194
8195 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8196 (pf->num_fdsb_msix == 0)) {
8197 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
8198 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8199 }
8200 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8201 (pf->num_vmdq_msix == 0)) {
8202 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
8203 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
8204 }
8205
8206 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
8207 (pf->num_iwarp_msix == 0)) {
8208 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
8209 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
8210 }
8211 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
8212 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
8213 pf->num_lan_msix,
8214 pf->num_vmdq_msix * pf->num_vmdq_vsis,
8215 pf->num_fdsb_msix,
8216 pf->num_iwarp_msix);
8217
8218 return v_actual;
8219}
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
8230{
8231 struct i40e_q_vector *q_vector;
8232
8233
8234 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
8235 if (!q_vector)
8236 return -ENOMEM;
8237
8238 q_vector->vsi = vsi;
8239 q_vector->v_idx = v_idx;
8240 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
8241
8242 if (vsi->netdev)
8243 netif_napi_add(vsi->netdev, &q_vector->napi,
8244 i40e_napi_poll, NAPI_POLL_WEIGHT);
8245
8246 q_vector->rx.latency_range = I40E_LOW_LATENCY;
8247 q_vector->tx.latency_range = I40E_LOW_LATENCY;
8248
8249
8250 vsi->q_vectors[v_idx] = q_vector;
8251
8252 return 0;
8253}
8254
8255
8256
8257
8258
8259
8260
8261
8262static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
8263{
8264 struct i40e_pf *pf = vsi->back;
8265 int err, v_idx, num_q_vectors, current_cpu;
8266
8267
8268 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
8269 num_q_vectors = vsi->num_q_vectors;
8270 else if (vsi == pf->vsi[pf->lan_vsi])
8271 num_q_vectors = 1;
8272 else
8273 return -EINVAL;
8274
8275 current_cpu = cpumask_first(cpu_online_mask);
8276
8277 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
8278 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
8279 if (err)
8280 goto err_out;
8281 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
8282 if (unlikely(current_cpu >= nr_cpu_ids))
8283 current_cpu = cpumask_first(cpu_online_mask);
8284 }
8285
8286 return 0;
8287
8288err_out:
8289 while (v_idx--)
8290 i40e_free_q_vector(vsi, v_idx);
8291
8292 return err;
8293}
8294
8295
8296
8297
8298
8299static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
8300{
8301 int vectors = 0;
8302 ssize_t size;
8303
8304 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8305 vectors = i40e_init_msix(pf);
8306 if (vectors < 0) {
8307 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
8308 I40E_FLAG_IWARP_ENABLED |
8309 I40E_FLAG_RSS_ENABLED |
8310 I40E_FLAG_DCB_CAPABLE |
8311 I40E_FLAG_DCB_ENABLED |
8312 I40E_FLAG_SRIOV_ENABLED |
8313 I40E_FLAG_FD_SB_ENABLED |
8314 I40E_FLAG_FD_ATR_ENABLED |
8315 I40E_FLAG_VMDQ_ENABLED);
8316
8317
8318 i40e_determine_queue_usage(pf);
8319 }
8320 }
8321
8322 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8323 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
8324 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
8325 vectors = pci_enable_msi(pf->pdev);
8326 if (vectors < 0) {
8327 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
8328 vectors);
8329 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
8330 }
8331 vectors = 1;
8332 }
8333
8334 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
8335 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
8336
8337
8338 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
8339 pf->irq_pile = kzalloc(size, GFP_KERNEL);
8340 if (!pf->irq_pile) {
8341 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
8342 return -ENOMEM;
8343 }
8344 pf->irq_pile->num_entries = vectors;
8345 pf->irq_pile->search_hint = 0;
8346
8347
8348 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
8349
8350 return 0;
8351}
8352
8353
8354
8355
8356
8357
8358
8359
8360
8361static int i40e_setup_misc_vector(struct i40e_pf *pf)
8362{
8363 struct i40e_hw *hw = &pf->hw;
8364 int err = 0;
8365
8366
8367
8368
8369 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
8370 err = request_irq(pf->msix_entries[0].vector,
8371 i40e_intr, 0, pf->int_name, pf);
8372 if (err) {
8373 dev_info(&pf->pdev->dev,
8374 "request_irq for %s failed: %d\n",
8375 pf->int_name, err);
8376 return -EFAULT;
8377 }
8378 }
8379
8380 i40e_enable_misc_int_causes(pf);
8381
8382
8383 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
8384 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
8385
8386 i40e_flush(hw);
8387
8388 i40e_irq_dynamic_enable_icr0(pf, true);
8389
8390 return err;
8391}
8392
8393
8394
8395
8396
8397
8398static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8399 u8 *lut, u16 lut_size)
8400{
8401 struct i40e_pf *pf = vsi->back;
8402 struct i40e_hw *hw = &pf->hw;
8403 int ret = 0;
8404
8405 if (seed) {
8406 struct i40e_aqc_get_set_rss_key_data *seed_dw =
8407 (struct i40e_aqc_get_set_rss_key_data *)seed;
8408 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
8409 if (ret) {
8410 dev_info(&pf->pdev->dev,
8411 "Cannot set RSS key, err %s aq_err %s\n",
8412 i40e_stat_str(hw, ret),
8413 i40e_aq_str(hw, hw->aq.asq_last_status));
8414 return ret;
8415 }
8416 }
8417 if (lut) {
8418 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8419
8420 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8421 if (ret) {
8422 dev_info(&pf->pdev->dev,
8423 "Cannot set RSS lut, err %s aq_err %s\n",
8424 i40e_stat_str(hw, ret),
8425 i40e_aq_str(hw, hw->aq.asq_last_status));
8426 return ret;
8427 }
8428 }
8429 return ret;
8430}
8431
8432
8433
8434
8435
8436
8437
8438
8439
8440
8441static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8442 u8 *lut, u16 lut_size)
8443{
8444 struct i40e_pf *pf = vsi->back;
8445 struct i40e_hw *hw = &pf->hw;
8446 int ret = 0;
8447
8448 if (seed) {
8449 ret = i40e_aq_get_rss_key(hw, vsi->id,
8450 (struct i40e_aqc_get_set_rss_key_data *)seed);
8451 if (ret) {
8452 dev_info(&pf->pdev->dev,
8453 "Cannot get RSS key, err %s aq_err %s\n",
8454 i40e_stat_str(&pf->hw, ret),
8455 i40e_aq_str(&pf->hw,
8456 pf->hw.aq.asq_last_status));
8457 return ret;
8458 }
8459 }
8460
8461 if (lut) {
8462 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8463
8464 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8465 if (ret) {
8466 dev_info(&pf->pdev->dev,
8467 "Cannot get RSS lut, err %s aq_err %s\n",
8468 i40e_stat_str(&pf->hw, ret),
8469 i40e_aq_str(&pf->hw,
8470 pf->hw.aq.asq_last_status));
8471 return ret;
8472 }
8473 }
8474
8475 return ret;
8476}
8477
8478
8479
8480
8481
8482static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8483{
8484 u8 seed[I40E_HKEY_ARRAY_SIZE];
8485 struct i40e_pf *pf = vsi->back;
8486 u8 *lut;
8487 int ret;
8488
8489 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
8490 return 0;
8491
8492 if (!vsi->rss_size)
8493 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8494 vsi->num_queue_pairs);
8495 if (!vsi->rss_size)
8496 return -EINVAL;
8497
8498 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8499 if (!lut)
8500 return -ENOMEM;
8501
8502
8503
8504 if (vsi->rss_lut_user)
8505 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8506 else
8507 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8508 if (vsi->rss_hkey_user)
8509 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8510 else
8511 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8512 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8513 kfree(lut);
8514
8515 return ret;
8516}
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8528 const u8 *lut, u16 lut_size)
8529{
8530 struct i40e_pf *pf = vsi->back;
8531 struct i40e_hw *hw = &pf->hw;
8532 u16 vf_id = vsi->vf_id;
8533 u8 i;
8534
8535
8536 if (seed) {
8537 u32 *seed_dw = (u32 *)seed;
8538
8539 if (vsi->type == I40E_VSI_MAIN) {
8540 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8541 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
8542 } else if (vsi->type == I40E_VSI_SRIOV) {
8543 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8544 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
8545 } else {
8546 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8547 }
8548 }
8549
8550 if (lut) {
8551 u32 *lut_dw = (u32 *)lut;
8552
8553 if (vsi->type == I40E_VSI_MAIN) {
8554 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8555 return -EINVAL;
8556 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8557 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8558 } else if (vsi->type == I40E_VSI_SRIOV) {
8559 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8560 return -EINVAL;
8561 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8562 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
8563 } else {
8564 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8565 }
8566 }
8567 i40e_flush(hw);
8568
8569 return 0;
8570}
8571
8572
8573
8574
8575
8576
8577
8578
8579
8580
8581static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8582 u8 *lut, u16 lut_size)
8583{
8584 struct i40e_pf *pf = vsi->back;
8585 struct i40e_hw *hw = &pf->hw;
8586 u16 i;
8587
8588 if (seed) {
8589 u32 *seed_dw = (u32 *)seed;
8590
8591 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8592 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8593 }
8594 if (lut) {
8595 u32 *lut_dw = (u32 *)lut;
8596
8597 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8598 return -EINVAL;
8599 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8600 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8601 }
8602
8603 return 0;
8604}
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8616{
8617 struct i40e_pf *pf = vsi->back;
8618
8619 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
8620 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8621 else
8622 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8623}
8624
8625
8626
8627
8628
8629
8630
8631
8632
8633
8634int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8635{
8636 struct i40e_pf *pf = vsi->back;
8637
8638 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
8639 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8640 else
8641 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8642}
8643
8644
8645
8646
8647
8648
8649
8650
8651void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8652 u16 rss_table_size, u16 rss_size)
8653{
8654 u16 i;
8655
8656 for (i = 0; i < rss_table_size; i++)
8657 lut[i] = i % rss_size;
8658}
8659
8660
8661
8662
8663
8664static int i40e_pf_config_rss(struct i40e_pf *pf)
8665{
8666 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8667 u8 seed[I40E_HKEY_ARRAY_SIZE];
8668 u8 *lut;
8669 struct i40e_hw *hw = &pf->hw;
8670 u32 reg_val;
8671 u64 hena;
8672 int ret;
8673
8674
8675 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8676 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8677 hena |= i40e_pf_get_default_rss_hena(pf);
8678
8679 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8680 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8681
8682
8683 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8684 reg_val = (pf->rss_table_size == 512) ?
8685 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8686 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8687 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8688
8689
8690 if (!vsi->rss_size) {
8691 u16 qcount;
8692
8693 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8694 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8695 }
8696 if (!vsi->rss_size)
8697 return -EINVAL;
8698
8699 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8700 if (!lut)
8701 return -ENOMEM;
8702
8703
8704 if (vsi->rss_lut_user)
8705 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8706 else
8707 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8708
8709
8710
8711
8712 if (vsi->rss_hkey_user)
8713 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8714 else
8715 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8716 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8717 kfree(lut);
8718
8719 return ret;
8720}
8721
8722
8723
8724
8725
8726
8727
8728
8729
8730
8731int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8732{
8733 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8734 int new_rss_size;
8735
8736 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8737 return 0;
8738
8739 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8740
8741 if (queue_count != vsi->num_queue_pairs) {
8742 u16 qcount;
8743
8744 vsi->req_queue_pairs = queue_count;
8745 i40e_prep_for_reset(pf, true);
8746
8747 pf->alloc_rss_size = new_rss_size;
8748
8749 i40e_reset_and_rebuild(pf, true, true);
8750
8751
8752
8753
8754 if (queue_count < vsi->rss_size) {
8755 i40e_clear_rss_config_user(vsi);
8756 dev_dbg(&pf->pdev->dev,
8757 "discard user configured hash keys and lut\n");
8758 }
8759
8760
8761 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8762 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8763
8764 i40e_pf_config_rss(pf);
8765 }
8766 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8767 vsi->req_queue_pairs, pf->rss_size_max);
8768 return pf->alloc_rss_size;
8769}
8770
8771
8772
8773
8774
8775i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
8776{
8777 i40e_status status;
8778 bool min_valid, max_valid;
8779 u32 max_bw, min_bw;
8780
8781 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8782 &min_valid, &max_valid);
8783
8784 if (!status) {
8785 if (min_valid)
8786 pf->min_bw = min_bw;
8787 if (max_valid)
8788 pf->max_bw = max_bw;
8789 }
8790
8791 return status;
8792}
8793
8794
8795
8796
8797
8798i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
8799{
8800 struct i40e_aqc_configure_partition_bw_data bw_data;
8801 i40e_status status;
8802
8803
8804 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8805 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
8806 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
8807
8808
8809 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8810
8811 return status;
8812}
8813
8814
8815
8816
8817
8818i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
8819{
8820
8821 enum i40e_admin_queue_err last_aq_status;
8822 i40e_status ret;
8823 u16 nvm_word;
8824
8825 if (pf->hw.partition_id != 1) {
8826 dev_info(&pf->pdev->dev,
8827 "Commit BW only works on partition 1! This is partition %d",
8828 pf->hw.partition_id);
8829 ret = I40E_NOT_SUPPORTED;
8830 goto bw_commit_out;
8831 }
8832
8833
8834 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8835 last_aq_status = pf->hw.aq.asq_last_status;
8836 if (ret) {
8837 dev_info(&pf->pdev->dev,
8838 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8839 i40e_stat_str(&pf->hw, ret),
8840 i40e_aq_str(&pf->hw, last_aq_status));
8841 goto bw_commit_out;
8842 }
8843
8844
8845 ret = i40e_aq_read_nvm(&pf->hw,
8846 I40E_SR_NVM_CONTROL_WORD,
8847 0x10, sizeof(nvm_word), &nvm_word,
8848 false, NULL);
8849
8850
8851
8852 last_aq_status = pf->hw.aq.asq_last_status;
8853 i40e_release_nvm(&pf->hw);
8854 if (ret) {
8855 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8856 i40e_stat_str(&pf->hw, ret),
8857 i40e_aq_str(&pf->hw, last_aq_status));
8858 goto bw_commit_out;
8859 }
8860
8861
8862 msleep(50);
8863
8864
8865 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8866 last_aq_status = pf->hw.aq.asq_last_status;
8867 if (ret) {
8868 dev_info(&pf->pdev->dev,
8869 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8870 i40e_stat_str(&pf->hw, ret),
8871 i40e_aq_str(&pf->hw, last_aq_status));
8872 goto bw_commit_out;
8873 }
8874
8875
8876
8877
8878 ret = i40e_aq_update_nvm(&pf->hw,
8879 I40E_SR_NVM_CONTROL_WORD,
8880 0x10, sizeof(nvm_word),
8881 &nvm_word, true, NULL);
8882
8883
8884
8885 last_aq_status = pf->hw.aq.asq_last_status;
8886 i40e_release_nvm(&pf->hw);
8887 if (ret)
8888 dev_info(&pf->pdev->dev,
8889 "BW settings NOT SAVED, err %s aq_err %s\n",
8890 i40e_stat_str(&pf->hw, ret),
8891 i40e_aq_str(&pf->hw, last_aq_status));
8892bw_commit_out:
8893
8894 return ret;
8895}
8896
8897
8898
8899
8900
8901
8902
8903
8904
8905static int i40e_sw_init(struct i40e_pf *pf)
8906{
8907 int err = 0;
8908 int size;
8909
8910
8911 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8912 I40E_FLAG_MSI_ENABLED |
8913 I40E_FLAG_MSIX_ENABLED;
8914
8915
8916 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8917 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8918
8919
8920
8921
8922 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8923 pf->alloc_rss_size = 1;
8924 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8925 pf->rss_size_max = min_t(int, pf->rss_size_max,
8926 pf->hw.func_caps.num_tx_qp);
8927 if (pf->hw.func_caps.rss) {
8928 pf->flags |= I40E_FLAG_RSS_ENABLED;
8929 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8930 num_online_cpus());
8931 }
8932
8933
8934 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8935 pf->flags |= I40E_FLAG_MFP_ENABLED;
8936 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8937 if (i40e_get_partition_bw_setting(pf)) {
8938 dev_warn(&pf->pdev->dev,
8939 "Could not get partition bw settings\n");
8940 } else {
8941 dev_info(&pf->pdev->dev,
8942 "Partition BW Min = %8.8x, Max = %8.8x\n",
8943 pf->min_bw, pf->max_bw);
8944
8945
8946 i40e_set_partition_bw_setting(pf);
8947 }
8948 }
8949
8950 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8951 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8952 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8953 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8954 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8955 pf->hw.num_partitions > 1)
8956 dev_info(&pf->pdev->dev,
8957 "Flow Director Sideband mode Disabled in MFP mode\n");
8958 else
8959 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8960 pf->fdir_pf_filter_count =
8961 pf->hw.func_caps.fd_filters_guaranteed;
8962 pf->hw.fdir_shared_filter_count =
8963 pf->hw.func_caps.fd_filters_best_effort;
8964 }
8965
8966 if (pf->hw.mac.type == I40E_MAC_X722) {
8967 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
8968 I40E_HW_128_QP_RSS_CAPABLE |
8969 I40E_HW_ATR_EVICT_CAPABLE |
8970 I40E_HW_WB_ON_ITR_CAPABLE |
8971 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8972 I40E_HW_NO_PCI_LINK_CHECK |
8973 I40E_HW_USE_SET_LLDP_MIB |
8974 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
8975 I40E_HW_PTP_L4_CAPABLE |
8976 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
8977 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
8978
8979#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
8980 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
8981 I40E_FDEVICT_PCTYPE_DEFAULT) {
8982 dev_warn(&pf->pdev->dev,
8983 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
8984 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
8985 }
8986 } else if ((pf->hw.aq.api_maj_ver > 1) ||
8987 ((pf->hw.aq.api_maj_ver == 1) &&
8988 (pf->hw.aq.api_min_ver > 4))) {
8989
8990 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
8991 }
8992
8993
8994 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
8995 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
8996
8997 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
8998 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8999 (pf->hw.aq.fw_maj_ver < 4))) {
9000 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
9001
9002 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
9003 }
9004
9005
9006 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9007 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9008 (pf->hw.aq.fw_maj_ver < 4)))
9009 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
9010
9011
9012 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9013 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
9014 (pf->hw.aq.fw_maj_ver >= 5)))
9015 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
9016
9017 if (pf->hw.func_caps.vmdq) {
9018 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
9019 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
9020 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
9021 }
9022
9023 if (pf->hw.func_caps.iwarp) {
9024 pf->flags |= I40E_FLAG_IWARP_ENABLED;
9025
9026 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
9027 }
9028
9029#ifdef CONFIG_PCI_IOV
9030 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
9031 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
9032 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
9033 pf->num_req_vfs = min_t(int,
9034 pf->hw.func_caps.num_vfs,
9035 I40E_MAX_VF_COUNT);
9036 }
9037#endif
9038 pf->eeprom_version = 0xDEAD;
9039 pf->lan_veb = I40E_NO_VEB;
9040 pf->lan_vsi = I40E_NO_VSI;
9041
9042
9043 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
9044
9045
9046 size = sizeof(struct i40e_lump_tracking)
9047 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
9048 pf->qp_pile = kzalloc(size, GFP_KERNEL);
9049 if (!pf->qp_pile) {
9050 err = -ENOMEM;
9051 goto sw_init_done;
9052 }
9053 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
9054 pf->qp_pile->search_hint = 0;
9055
9056 pf->tx_timeout_recovery_level = 1;
9057
9058 mutex_init(&pf->switch_mutex);
9059
9060sw_init_done:
9061 return err;
9062}
9063
9064
9065
9066
9067
9068
9069
9070
9071bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
9072{
9073 bool need_reset = false;
9074
9075
9076
9077
9078 if (features & NETIF_F_NTUPLE) {
9079
9080 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9081 need_reset = true;
9082
9083 if (pf->num_fdsb_msix > 0)
9084 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9085 } else {
9086
9087 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9088 need_reset = true;
9089 i40e_fdir_filter_exit(pf);
9090 }
9091 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
9092 I40E_FLAG_FD_SB_AUTO_DISABLED);
9093
9094 pf->fd_add_err = 0;
9095 pf->fd_atr_cnt = 0;
9096
9097 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
9098 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
9099 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9100 (I40E_DEBUG_FD & pf->hw.debug_mask))
9101 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
9102 }
9103 }
9104 return need_reset;
9105}
9106
9107
9108
9109
9110
9111static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
9112{
9113 struct i40e_pf *pf = vsi->back;
9114 struct i40e_hw *hw = &pf->hw;
9115 u16 vf_id = vsi->vf_id;
9116 u8 i;
9117
9118 if (vsi->type == I40E_VSI_MAIN) {
9119 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
9120 wr32(hw, I40E_PFQF_HLUT(i), 0);
9121 } else if (vsi->type == I40E_VSI_SRIOV) {
9122 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
9123 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
9124 } else {
9125 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
9126 }
9127}
9128
9129
9130
9131
9132
9133
9134
9135static int i40e_set_features(struct net_device *netdev,
9136 netdev_features_t features)
9137{
9138 struct i40e_netdev_priv *np = netdev_priv(netdev);
9139 struct i40e_vsi *vsi = np->vsi;
9140 struct i40e_pf *pf = vsi->back;
9141 bool need_reset;
9142
9143 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
9144 i40e_pf_config_rss(pf);
9145 else if (!(features & NETIF_F_RXHASH) &&
9146 netdev->features & NETIF_F_RXHASH)
9147 i40e_clear_rss_lut(vsi);
9148
9149 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9150 i40e_vlan_stripping_enable(vsi);
9151 else
9152 i40e_vlan_stripping_disable(vsi);
9153
9154 need_reset = i40e_set_ntuple(pf, features);
9155
9156 if (need_reset)
9157 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
9158
9159 return 0;
9160}
9161
9162
9163
9164
9165
9166
9167
9168
9169static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
9170{
9171 u8 i;
9172
9173 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9174 if (pf->udp_ports[i].port == port)
9175 return i;
9176 }
9177
9178 return i;
9179}
9180
9181
9182
9183
9184
9185
9186static void i40e_udp_tunnel_add(struct net_device *netdev,
9187 struct udp_tunnel_info *ti)
9188{
9189 struct i40e_netdev_priv *np = netdev_priv(netdev);
9190 struct i40e_vsi *vsi = np->vsi;
9191 struct i40e_pf *pf = vsi->back;
9192 u16 port = ntohs(ti->port);
9193 u8 next_idx;
9194 u8 idx;
9195
9196 idx = i40e_get_udp_port_idx(pf, port);
9197
9198
9199 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9200 netdev_info(netdev, "port %d already offloaded\n", port);
9201 return;
9202 }
9203
9204
9205 next_idx = i40e_get_udp_port_idx(pf, 0);
9206
9207 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9208 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
9209 port);
9210 return;
9211 }
9212
9213 switch (ti->type) {
9214 case UDP_TUNNEL_TYPE_VXLAN:
9215 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
9216 break;
9217 case UDP_TUNNEL_TYPE_GENEVE:
9218 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
9219 return;
9220 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
9221 break;
9222 default:
9223 return;
9224 }
9225
9226
9227 pf->udp_ports[next_idx].port = port;
9228 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
9229 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9230}
9231
9232
9233
9234
9235
9236
9237static void i40e_udp_tunnel_del(struct net_device *netdev,
9238 struct udp_tunnel_info *ti)
9239{
9240 struct i40e_netdev_priv *np = netdev_priv(netdev);
9241 struct i40e_vsi *vsi = np->vsi;
9242 struct i40e_pf *pf = vsi->back;
9243 u16 port = ntohs(ti->port);
9244 u8 idx;
9245
9246 idx = i40e_get_udp_port_idx(pf, port);
9247
9248
9249 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
9250 goto not_found;
9251
9252 switch (ti->type) {
9253 case UDP_TUNNEL_TYPE_VXLAN:
9254 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
9255 goto not_found;
9256 break;
9257 case UDP_TUNNEL_TYPE_GENEVE:
9258 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
9259 goto not_found;
9260 break;
9261 default:
9262 goto not_found;
9263 }
9264
9265
9266
9267
9268 pf->udp_ports[idx].port = 0;
9269 pf->pending_udp_bitmap |= BIT_ULL(idx);
9270 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9271
9272 return;
9273not_found:
9274 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
9275 port);
9276}
9277
9278static int i40e_get_phys_port_id(struct net_device *netdev,
9279 struct netdev_phys_item_id *ppid)
9280{
9281 struct i40e_netdev_priv *np = netdev_priv(netdev);
9282 struct i40e_pf *pf = np->vsi->back;
9283 struct i40e_hw *hw = &pf->hw;
9284
9285 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
9286 return -EOPNOTSUPP;
9287
9288 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
9289 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
9290
9291 return 0;
9292}
9293
9294
9295
9296
9297
9298
9299
9300
9301
9302static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9303 struct net_device *dev,
9304 const unsigned char *addr, u16 vid,
9305 u16 flags)
9306{
9307 struct i40e_netdev_priv *np = netdev_priv(dev);
9308 struct i40e_pf *pf = np->vsi->back;
9309 int err = 0;
9310
9311 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
9312 return -EOPNOTSUPP;
9313
9314 if (vid) {
9315 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
9316 return -EINVAL;
9317 }
9318
9319
9320
9321
9322 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
9323 netdev_info(dev, "FDB only supports static addresses\n");
9324 return -EINVAL;
9325 }
9326
9327 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
9328 err = dev_uc_add_excl(dev, addr);
9329 else if (is_multicast_ether_addr(addr))
9330 err = dev_mc_add_excl(dev, addr);
9331 else
9332 err = -EINVAL;
9333
9334
9335 if (err == -EEXIST && !(flags & NLM_F_EXCL))
9336 err = 0;
9337
9338 return err;
9339}
9340
9341
9342
9343
9344
9345
9346
9347
9348
9349
9350
9351
9352
9353
9354
9355static int i40e_ndo_bridge_setlink(struct net_device *dev,
9356 struct nlmsghdr *nlh,
9357 u16 flags)
9358{
9359 struct i40e_netdev_priv *np = netdev_priv(dev);
9360 struct i40e_vsi *vsi = np->vsi;
9361 struct i40e_pf *pf = vsi->back;
9362 struct i40e_veb *veb = NULL;
9363 struct nlattr *attr, *br_spec;
9364 int i, rem;
9365
9366
9367 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9368 return -EOPNOTSUPP;
9369
9370
9371 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9372 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9373 veb = pf->veb[i];
9374 }
9375
9376 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9377
9378 nla_for_each_nested(attr, br_spec, rem) {
9379 __u16 mode;
9380
9381 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9382 continue;
9383
9384 mode = nla_get_u16(attr);
9385 if ((mode != BRIDGE_MODE_VEPA) &&
9386 (mode != BRIDGE_MODE_VEB))
9387 return -EINVAL;
9388
9389
9390 if (!veb) {
9391 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9392 vsi->tc_config.enabled_tc);
9393 if (veb) {
9394 veb->bridge_mode = mode;
9395 i40e_config_bridge_mode(veb);
9396 } else {
9397
9398 return -ENOENT;
9399 }
9400 break;
9401 } else if (mode != veb->bridge_mode) {
9402
9403 veb->bridge_mode = mode;
9404
9405 if (mode == BRIDGE_MODE_VEB)
9406 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9407 else
9408 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9409 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
9410 true);
9411 break;
9412 }
9413 }
9414
9415 return 0;
9416}
9417
9418
9419
9420
9421
9422
9423
9424
9425
9426
9427
9428
9429
9430static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9431 struct net_device *dev,
9432 u32 __always_unused filter_mask,
9433 int nlflags)
9434{
9435 struct i40e_netdev_priv *np = netdev_priv(dev);
9436 struct i40e_vsi *vsi = np->vsi;
9437 struct i40e_pf *pf = vsi->back;
9438 struct i40e_veb *veb = NULL;
9439 int i;
9440
9441
9442 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9443 return -EOPNOTSUPP;
9444
9445
9446 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9447 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9448 veb = pf->veb[i];
9449 }
9450
9451 if (!veb)
9452 return 0;
9453
9454 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9455 0, 0, nlflags, filter_mask, NULL);
9456}
9457
9458
9459
9460
9461
9462
9463
9464static netdev_features_t i40e_features_check(struct sk_buff *skb,
9465 struct net_device *dev,
9466 netdev_features_t features)
9467{
9468 size_t len;
9469
9470
9471
9472
9473
9474 if (skb->ip_summed != CHECKSUM_PARTIAL)
9475 return features;
9476
9477
9478
9479
9480 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
9481 features &= ~NETIF_F_GSO_MASK;
9482
9483
9484 len = skb_network_header(skb) - skb->data;
9485 if (len & ~(63 * 2))
9486 goto out_err;
9487
9488
9489 len = skb_transport_header(skb) - skb_network_header(skb);
9490 if (len & ~(127 * 4))
9491 goto out_err;
9492
9493 if (skb->encapsulation) {
9494
9495 len = skb_inner_network_header(skb) - skb_transport_header(skb);
9496 if (len & ~(127 * 2))
9497 goto out_err;
9498
9499
9500 len = skb_inner_transport_header(skb) -
9501 skb_inner_network_header(skb);
9502 if (len & ~(127 * 4))
9503 goto out_err;
9504 }
9505
9506
9507
9508
9509
9510
9511 return features;
9512out_err:
9513 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9514}
9515
9516
9517
9518
9519
9520
9521static int i40e_xdp_setup(struct i40e_vsi *vsi,
9522 struct bpf_prog *prog)
9523{
9524 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9525 struct i40e_pf *pf = vsi->back;
9526 struct bpf_prog *old_prog;
9527 bool need_reset;
9528 int i;
9529
9530
9531 if (frame_size > vsi->rx_buf_len)
9532 return -EINVAL;
9533
9534 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
9535 return 0;
9536
9537
9538 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
9539
9540 if (need_reset)
9541 i40e_prep_for_reset(pf, true);
9542
9543 old_prog = xchg(&vsi->xdp_prog, prog);
9544
9545 if (need_reset)
9546 i40e_reset_and_rebuild(pf, true, true);
9547
9548 for (i = 0; i < vsi->num_queue_pairs; i++)
9549 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
9550
9551 if (old_prog)
9552 bpf_prog_put(old_prog);
9553
9554 return 0;
9555}
9556
9557
9558
9559
9560
9561
9562static int i40e_xdp(struct net_device *dev,
9563 struct netdev_xdp *xdp)
9564{
9565 struct i40e_netdev_priv *np = netdev_priv(dev);
9566 struct i40e_vsi *vsi = np->vsi;
9567
9568 if (vsi->type != I40E_VSI_MAIN)
9569 return -EINVAL;
9570
9571 switch (xdp->command) {
9572 case XDP_SETUP_PROG:
9573 return i40e_xdp_setup(vsi, xdp->prog);
9574 case XDP_QUERY_PROG:
9575 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
9576 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
9577 return 0;
9578 default:
9579 return -EINVAL;
9580 }
9581}
9582
9583static const struct net_device_ops i40e_netdev_ops = {
9584 .ndo_open = i40e_open,
9585 .ndo_stop = i40e_close,
9586 .ndo_start_xmit = i40e_lan_xmit_frame,
9587 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9588 .ndo_set_rx_mode = i40e_set_rx_mode,
9589 .ndo_validate_addr = eth_validate_addr,
9590 .ndo_set_mac_address = i40e_set_mac,
9591 .ndo_change_mtu = i40e_change_mtu,
9592 .ndo_do_ioctl = i40e_ioctl,
9593 .ndo_tx_timeout = i40e_tx_timeout,
9594 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9595 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9596#ifdef CONFIG_NET_POLL_CONTROLLER
9597 .ndo_poll_controller = i40e_netpoll,
9598#endif
9599 .ndo_setup_tc = __i40e_setup_tc,
9600 .ndo_set_features = i40e_set_features,
9601 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9602 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
9603 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
9604 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9605 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9606 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9607 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9608 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9609 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
9610 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9611 .ndo_fdb_add = i40e_ndo_fdb_add,
9612 .ndo_features_check = i40e_features_check,
9613 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9614 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
9615 .ndo_xdp = i40e_xdp,
9616};
9617
9618
9619
9620
9621
9622
9623
9624static int i40e_config_netdev(struct i40e_vsi *vsi)
9625{
9626 struct i40e_pf *pf = vsi->back;
9627 struct i40e_hw *hw = &pf->hw;
9628 struct i40e_netdev_priv *np;
9629 struct net_device *netdev;
9630 u8 broadcast[ETH_ALEN];
9631 u8 mac_addr[ETH_ALEN];
9632 int etherdev_size;
9633 netdev_features_t hw_enc_features;
9634 netdev_features_t hw_features;
9635
9636 etherdev_size = sizeof(struct i40e_netdev_priv);
9637 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9638 if (!netdev)
9639 return -ENOMEM;
9640
9641 vsi->netdev = netdev;
9642 np = netdev_priv(netdev);
9643 np->vsi = vsi;
9644
9645 hw_enc_features = NETIF_F_SG |
9646 NETIF_F_IP_CSUM |
9647 NETIF_F_IPV6_CSUM |
9648 NETIF_F_HIGHDMA |
9649 NETIF_F_SOFT_FEATURES |
9650 NETIF_F_TSO |
9651 NETIF_F_TSO_ECN |
9652 NETIF_F_TSO6 |
9653 NETIF_F_GSO_GRE |
9654 NETIF_F_GSO_GRE_CSUM |
9655 NETIF_F_GSO_PARTIAL |
9656 NETIF_F_GSO_UDP_TUNNEL |
9657 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9658 NETIF_F_SCTP_CRC |
9659 NETIF_F_RXHASH |
9660 NETIF_F_RXCSUM |
9661 0;
9662
9663 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
9664 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9665
9666 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9667
9668 netdev->hw_enc_features |= hw_enc_features;
9669
9670
9671 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
9672
9673 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9674 netdev->hw_features |= NETIF_F_NTUPLE;
9675 hw_features = hw_enc_features |
9676 NETIF_F_HW_VLAN_CTAG_TX |
9677 NETIF_F_HW_VLAN_CTAG_RX;
9678
9679 netdev->hw_features |= hw_features;
9680
9681 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9682 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9683
9684 if (vsi->type == I40E_VSI_MAIN) {
9685 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9686 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697 i40e_rm_default_mac_filter(vsi, mac_addr);
9698 spin_lock_bh(&vsi->mac_filter_hash_lock);
9699 i40e_add_mac_filter(vsi, mac_addr);
9700 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9701 } else {
9702
9703
9704
9705
9706
9707 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
9708 IFNAMSIZ - 4,
9709 pf->vsi[pf->lan_vsi]->netdev->name);
9710 random_ether_addr(mac_addr);
9711
9712 spin_lock_bh(&vsi->mac_filter_hash_lock);
9713 i40e_add_mac_filter(vsi, mac_addr);
9714 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9715 }
9716
9717
9718
9719
9720
9721
9722
9723
9724
9725
9726
9727
9728
9729
9730 eth_broadcast_addr(broadcast);
9731 spin_lock_bh(&vsi->mac_filter_hash_lock);
9732 i40e_add_mac_filter(vsi, broadcast);
9733 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9734
9735 ether_addr_copy(netdev->dev_addr, mac_addr);
9736 ether_addr_copy(netdev->perm_addr, mac_addr);
9737
9738 netdev->priv_flags |= IFF_UNICAST_FLT;
9739 netdev->priv_flags |= IFF_SUPP_NOFCS;
9740
9741 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9742
9743 netdev->netdev_ops = &i40e_netdev_ops;
9744 netdev->watchdog_timeo = 5 * HZ;
9745 i40e_set_ethtool_ops(netdev);
9746
9747
9748 netdev->min_mtu = ETH_MIN_MTU;
9749 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
9750
9751 return 0;
9752}
9753
9754
9755
9756
9757
9758
9759
9760static void i40e_vsi_delete(struct i40e_vsi *vsi)
9761{
9762
9763 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9764 return;
9765
9766 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9767}
9768
9769
9770
9771
9772
9773
9774
9775int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9776{
9777 struct i40e_veb *veb;
9778 struct i40e_pf *pf = vsi->back;
9779
9780
9781 if (vsi->veb_idx == I40E_NO_VEB)
9782 return 1;
9783
9784 veb = pf->veb[vsi->veb_idx];
9785 if (!veb) {
9786 dev_info(&pf->pdev->dev,
9787 "There is no veb associated with the bridge\n");
9788 return -ENOENT;
9789 }
9790
9791
9792 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9793 return 0;
9794 } else {
9795
9796 return 1;
9797 }
9798
9799
9800 return 0;
9801}
9802
9803
9804
9805
9806
9807
9808
9809
9810static int i40e_add_vsi(struct i40e_vsi *vsi)
9811{
9812 int ret = -ENODEV;
9813 struct i40e_pf *pf = vsi->back;
9814 struct i40e_hw *hw = &pf->hw;
9815 struct i40e_vsi_context ctxt;
9816 struct i40e_mac_filter *f;
9817 struct hlist_node *h;
9818 int bkt;
9819
9820 u8 enabled_tc = 0x1;
9821 int f_count = 0;
9822
9823 memset(&ctxt, 0, sizeof(ctxt));
9824 switch (vsi->type) {
9825 case I40E_VSI_MAIN:
9826
9827
9828
9829
9830
9831 ctxt.seid = pf->main_vsi_seid;
9832 ctxt.pf_num = pf->hw.pf_id;
9833 ctxt.vf_num = 0;
9834 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9835 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9836 if (ret) {
9837 dev_info(&pf->pdev->dev,
9838 "couldn't get PF vsi config, err %s aq_err %s\n",
9839 i40e_stat_str(&pf->hw, ret),
9840 i40e_aq_str(&pf->hw,
9841 pf->hw.aq.asq_last_status));
9842 return -ENOENT;
9843 }
9844 vsi->info = ctxt.info;
9845 vsi->info.valid_sections = 0;
9846
9847 vsi->seid = ctxt.seid;
9848 vsi->id = ctxt.vsi_number;
9849
9850 enabled_tc = i40e_pf_get_tc_map(pf);
9851
9852
9853 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9854 !(pf->hw.func_caps.iscsi)) {
9855 memset(&ctxt, 0, sizeof(ctxt));
9856 ctxt.seid = pf->main_vsi_seid;
9857 ctxt.pf_num = pf->hw.pf_id;
9858 ctxt.vf_num = 0;
9859 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9860 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9861 if (ret) {
9862 dev_info(&pf->pdev->dev,
9863 "update vsi failed, err %s aq_err %s\n",
9864 i40e_stat_str(&pf->hw, ret),
9865 i40e_aq_str(&pf->hw,
9866 pf->hw.aq.asq_last_status));
9867 ret = -ENOENT;
9868 goto err;
9869 }
9870
9871 i40e_vsi_update_queue_map(vsi, &ctxt);
9872 vsi->info.valid_sections = 0;
9873 } else {
9874
9875
9876
9877
9878
9879
9880 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9881 if (ret) {
9882
9883
9884
9885 dev_info(&pf->pdev->dev,
9886 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9887 enabled_tc,
9888 i40e_stat_str(&pf->hw, ret),
9889 i40e_aq_str(&pf->hw,
9890 pf->hw.aq.asq_last_status));
9891 }
9892 }
9893 break;
9894
9895 case I40E_VSI_FDIR:
9896 ctxt.pf_num = hw->pf_id;
9897 ctxt.vf_num = 0;
9898 ctxt.uplink_seid = vsi->uplink_seid;
9899 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9900 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9901 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9902 (i40e_is_vsi_uplink_mode_veb(vsi))) {
9903 ctxt.info.valid_sections |=
9904 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9905 ctxt.info.switch_id =
9906 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9907 }
9908 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9909 break;
9910
9911 case I40E_VSI_VMDQ2:
9912 ctxt.pf_num = hw->pf_id;
9913 ctxt.vf_num = 0;
9914 ctxt.uplink_seid = vsi->uplink_seid;
9915 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9916 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9917
9918
9919
9920
9921 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9922 ctxt.info.valid_sections |=
9923 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9924 ctxt.info.switch_id =
9925 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9926 }
9927
9928
9929 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9930 break;
9931
9932 case I40E_VSI_SRIOV:
9933 ctxt.pf_num = hw->pf_id;
9934 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9935 ctxt.uplink_seid = vsi->uplink_seid;
9936 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9937 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9938
9939
9940
9941
9942 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9943 ctxt.info.valid_sections |=
9944 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9945 ctxt.info.switch_id =
9946 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9947 }
9948
9949 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9950 ctxt.info.valid_sections |=
9951 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9952 ctxt.info.queueing_opt_flags |=
9953 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9954 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9955 }
9956
9957 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9958 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9959 if (pf->vf[vsi->vf_id].spoofchk) {
9960 ctxt.info.valid_sections |=
9961 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9962 ctxt.info.sec_flags |=
9963 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9964 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9965 }
9966
9967 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9968 break;
9969
9970 case I40E_VSI_IWARP:
9971
9972 break;
9973
9974 default:
9975 return -ENODEV;
9976 }
9977
9978 if (vsi->type != I40E_VSI_MAIN) {
9979 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9980 if (ret) {
9981 dev_info(&vsi->back->pdev->dev,
9982 "add vsi failed, err %s aq_err %s\n",
9983 i40e_stat_str(&pf->hw, ret),
9984 i40e_aq_str(&pf->hw,
9985 pf->hw.aq.asq_last_status));
9986 ret = -ENOENT;
9987 goto err;
9988 }
9989 vsi->info = ctxt.info;
9990 vsi->info.valid_sections = 0;
9991 vsi->seid = ctxt.seid;
9992 vsi->id = ctxt.vsi_number;
9993 }
9994
9995 vsi->active_filters = 0;
9996 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
9997 spin_lock_bh(&vsi->mac_filter_hash_lock);
9998
9999 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
10000 f->state = I40E_FILTER_NEW;
10001 f_count++;
10002 }
10003 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10004
10005 if (f_count) {
10006 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
10007 pf->flags |= I40E_FLAG_FILTER_SYNC;
10008 }
10009
10010
10011 ret = i40e_vsi_get_bw_info(vsi);
10012 if (ret) {
10013 dev_info(&pf->pdev->dev,
10014 "couldn't get vsi bw info, err %s aq_err %s\n",
10015 i40e_stat_str(&pf->hw, ret),
10016 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10017
10018 ret = 0;
10019 }
10020
10021err:
10022 return ret;
10023}
10024
10025
10026
10027
10028
10029
10030
10031int i40e_vsi_release(struct i40e_vsi *vsi)
10032{
10033 struct i40e_mac_filter *f;
10034 struct hlist_node *h;
10035 struct i40e_veb *veb = NULL;
10036 struct i40e_pf *pf;
10037 u16 uplink_seid;
10038 int i, n, bkt;
10039
10040 pf = vsi->back;
10041
10042
10043 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
10044 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
10045 vsi->seid, vsi->uplink_seid);
10046 return -ENODEV;
10047 }
10048 if (vsi == pf->vsi[pf->lan_vsi] &&
10049 !test_bit(__I40E_DOWN, pf->state)) {
10050 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
10051 return -ENODEV;
10052 }
10053
10054 uplink_seid = vsi->uplink_seid;
10055 if (vsi->type != I40E_VSI_SRIOV) {
10056 if (vsi->netdev_registered) {
10057 vsi->netdev_registered = false;
10058 if (vsi->netdev) {
10059
10060 unregister_netdev(vsi->netdev);
10061 }
10062 } else {
10063 i40e_vsi_close(vsi);
10064 }
10065 i40e_vsi_disable_irq(vsi);
10066 }
10067
10068 spin_lock_bh(&vsi->mac_filter_hash_lock);
10069
10070
10071 if (vsi->netdev) {
10072 __dev_uc_unsync(vsi->netdev, NULL);
10073 __dev_mc_unsync(vsi->netdev, NULL);
10074 }
10075
10076
10077 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
10078 __i40e_del_filter(vsi, f);
10079
10080 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10081
10082 i40e_sync_vsi_filters(vsi);
10083
10084 i40e_vsi_delete(vsi);
10085 i40e_vsi_free_q_vectors(vsi);
10086 if (vsi->netdev) {
10087 free_netdev(vsi->netdev);
10088 vsi->netdev = NULL;
10089 }
10090 i40e_vsi_clear_rings(vsi);
10091 i40e_vsi_clear(vsi);
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
10102 if (pf->vsi[i] &&
10103 pf->vsi[i]->uplink_seid == uplink_seid &&
10104 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10105 n++;
10106 }
10107 }
10108 for (i = 0; i < I40E_MAX_VEB; i++) {
10109 if (!pf->veb[i])
10110 continue;
10111 if (pf->veb[i]->uplink_seid == uplink_seid)
10112 n++;
10113 if (pf->veb[i]->seid == uplink_seid)
10114 veb = pf->veb[i];
10115 }
10116 if (n == 0 && veb && veb->uplink_seid != 0)
10117 i40e_veb_release(veb);
10118
10119 return 0;
10120}
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
10133{
10134 int ret = -ENOENT;
10135 struct i40e_pf *pf = vsi->back;
10136
10137 if (vsi->q_vectors[0]) {
10138 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
10139 vsi->seid);
10140 return -EEXIST;
10141 }
10142
10143 if (vsi->base_vector) {
10144 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
10145 vsi->seid, vsi->base_vector);
10146 return -EEXIST;
10147 }
10148
10149 ret = i40e_vsi_alloc_q_vectors(vsi);
10150 if (ret) {
10151 dev_info(&pf->pdev->dev,
10152 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
10153 vsi->num_q_vectors, vsi->seid, ret);
10154 vsi->num_q_vectors = 0;
10155 goto vector_setup_out;
10156 }
10157
10158
10159
10160
10161 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10162 return ret;
10163 if (vsi->num_q_vectors)
10164 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
10165 vsi->num_q_vectors, vsi->idx);
10166 if (vsi->base_vector < 0) {
10167 dev_info(&pf->pdev->dev,
10168 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
10169 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
10170 i40e_vsi_free_q_vectors(vsi);
10171 ret = -ENOENT;
10172 goto vector_setup_out;
10173 }
10174
10175vector_setup_out:
10176 return ret;
10177}
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
10189{
10190 u16 alloc_queue_pairs;
10191 struct i40e_pf *pf;
10192 u8 enabled_tc;
10193 int ret;
10194
10195 if (!vsi)
10196 return NULL;
10197
10198 pf = vsi->back;
10199
10200 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10201 i40e_vsi_clear_rings(vsi);
10202
10203 i40e_vsi_free_arrays(vsi, false);
10204 i40e_set_num_rings_in_vsi(vsi);
10205 ret = i40e_vsi_alloc_arrays(vsi, false);
10206 if (ret)
10207 goto err_vsi;
10208
10209 alloc_queue_pairs = vsi->alloc_queue_pairs *
10210 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10211
10212 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10213 if (ret < 0) {
10214 dev_info(&pf->pdev->dev,
10215 "failed to get tracking for %d queues for VSI %d err %d\n",
10216 alloc_queue_pairs, vsi->seid, ret);
10217 goto err_vsi;
10218 }
10219 vsi->base_queue = ret;
10220
10221
10222
10223
10224 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10225 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10226 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10227 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10228 if (vsi->type == I40E_VSI_MAIN)
10229 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
10230
10231
10232 ret = i40e_alloc_rings(vsi);
10233 if (ret)
10234 goto err_rings;
10235
10236
10237 i40e_vsi_map_rings_to_vectors(vsi);
10238 return vsi;
10239
10240err_rings:
10241 i40e_vsi_free_q_vectors(vsi);
10242 if (vsi->netdev_registered) {
10243 vsi->netdev_registered = false;
10244 unregister_netdev(vsi->netdev);
10245 free_netdev(vsi->netdev);
10246 vsi->netdev = NULL;
10247 }
10248 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10249err_vsi:
10250 i40e_vsi_clear(vsi);
10251 return NULL;
10252}
10253
10254
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266
10267struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
10268 u16 uplink_seid, u32 param1)
10269{
10270 struct i40e_vsi *vsi = NULL;
10271 struct i40e_veb *veb = NULL;
10272 u16 alloc_queue_pairs;
10273 int ret, i;
10274 int v_idx;
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289 for (i = 0; i < I40E_MAX_VEB; i++) {
10290 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
10291 veb = pf->veb[i];
10292 break;
10293 }
10294 }
10295
10296 if (!veb && uplink_seid != pf->mac_seid) {
10297
10298 for (i = 0; i < pf->num_alloc_vsi; i++) {
10299 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
10300 vsi = pf->vsi[i];
10301 break;
10302 }
10303 }
10304 if (!vsi) {
10305 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
10306 uplink_seid);
10307 return NULL;
10308 }
10309
10310 if (vsi->uplink_seid == pf->mac_seid)
10311 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
10312 vsi->tc_config.enabled_tc);
10313 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
10314 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
10315 vsi->tc_config.enabled_tc);
10316 if (veb) {
10317 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
10318 dev_info(&vsi->back->pdev->dev,
10319 "New VSI creation error, uplink seid of LAN VSI expected.\n");
10320 return NULL;
10321 }
10322
10323
10324
10325
10326 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
10327 veb->bridge_mode = BRIDGE_MODE_VEPA;
10328 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
10329 }
10330 i40e_config_bridge_mode(veb);
10331 }
10332 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
10333 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
10334 veb = pf->veb[i];
10335 }
10336 if (!veb) {
10337 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
10338 return NULL;
10339 }
10340
10341 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10342 uplink_seid = veb->seid;
10343 }
10344
10345
10346 v_idx = i40e_vsi_mem_alloc(pf, type);
10347 if (v_idx < 0)
10348 goto err_alloc;
10349 vsi = pf->vsi[v_idx];
10350 if (!vsi)
10351 goto err_alloc;
10352 vsi->type = type;
10353 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
10354
10355 if (type == I40E_VSI_MAIN)
10356 pf->lan_vsi = v_idx;
10357 else if (type == I40E_VSI_SRIOV)
10358 vsi->vf_id = param1;
10359
10360 alloc_queue_pairs = vsi->alloc_queue_pairs *
10361 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10362
10363 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10364 if (ret < 0) {
10365 dev_info(&pf->pdev->dev,
10366 "failed to get tracking for %d queues for VSI %d err=%d\n",
10367 alloc_queue_pairs, vsi->seid, ret);
10368 goto err_vsi;
10369 }
10370 vsi->base_queue = ret;
10371
10372
10373 vsi->uplink_seid = uplink_seid;
10374 ret = i40e_add_vsi(vsi);
10375 if (ret)
10376 goto err_vsi;
10377
10378 switch (vsi->type) {
10379
10380 case I40E_VSI_MAIN:
10381 case I40E_VSI_VMDQ2:
10382 ret = i40e_config_netdev(vsi);
10383 if (ret)
10384 goto err_netdev;
10385 ret = register_netdev(vsi->netdev);
10386 if (ret)
10387 goto err_netdev;
10388 vsi->netdev_registered = true;
10389 netif_carrier_off(vsi->netdev);
10390#ifdef CONFIG_I40E_DCB
10391
10392 i40e_dcbnl_setup(vsi);
10393#endif
10394
10395
10396 case I40E_VSI_FDIR:
10397
10398 ret = i40e_vsi_setup_vectors(vsi);
10399 if (ret)
10400 goto err_msix;
10401
10402 ret = i40e_alloc_rings(vsi);
10403 if (ret)
10404 goto err_rings;
10405
10406
10407 i40e_vsi_map_rings_to_vectors(vsi);
10408
10409 i40e_vsi_reset_stats(vsi);
10410 break;
10411
10412 default:
10413
10414 break;
10415 }
10416
10417 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
10418 (vsi->type == I40E_VSI_VMDQ2)) {
10419 ret = i40e_vsi_config_rss(vsi);
10420 }
10421 return vsi;
10422
10423err_rings:
10424 i40e_vsi_free_q_vectors(vsi);
10425err_msix:
10426 if (vsi->netdev_registered) {
10427 vsi->netdev_registered = false;
10428 unregister_netdev(vsi->netdev);
10429 free_netdev(vsi->netdev);
10430 vsi->netdev = NULL;
10431 }
10432err_netdev:
10433 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10434err_vsi:
10435 i40e_vsi_clear(vsi);
10436err_alloc:
10437 return NULL;
10438}
10439
10440
10441
10442
10443
10444
10445
10446static int i40e_veb_get_bw_info(struct i40e_veb *veb)
10447{
10448 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
10449 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
10450 struct i40e_pf *pf = veb->pf;
10451 struct i40e_hw *hw = &pf->hw;
10452 u32 tc_bw_max;
10453 int ret = 0;
10454 int i;
10455
10456 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10457 &bw_data, NULL);
10458 if (ret) {
10459 dev_info(&pf->pdev->dev,
10460 "query veb bw config failed, err %s aq_err %s\n",
10461 i40e_stat_str(&pf->hw, ret),
10462 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10463 goto out;
10464 }
10465
10466 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10467 &ets_data, NULL);
10468 if (ret) {
10469 dev_info(&pf->pdev->dev,
10470 "query veb bw ets config failed, err %s aq_err %s\n",
10471 i40e_stat_str(&pf->hw, ret),
10472 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10473 goto out;
10474 }
10475
10476 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
10477 veb->bw_max_quanta = ets_data.tc_bw_max;
10478 veb->is_abs_credits = bw_data.absolute_credits_enable;
10479 veb->enabled_tc = ets_data.tc_valid_bits;
10480 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
10481 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
10482 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10483 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
10484 veb->bw_tc_limit_credits[i] =
10485 le16_to_cpu(bw_data.tc_bw_limits[i]);
10486 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
10487 }
10488
10489out:
10490 return ret;
10491}
10492
10493
10494
10495
10496
10497
10498
10499
10500static int i40e_veb_mem_alloc(struct i40e_pf *pf)
10501{
10502 int ret = -ENOENT;
10503 struct i40e_veb *veb;
10504 int i;
10505
10506
10507 mutex_lock(&pf->switch_mutex);
10508
10509
10510
10511
10512
10513
10514
10515 i = 0;
10516 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10517 i++;
10518 if (i >= I40E_MAX_VEB) {
10519 ret = -ENOMEM;
10520 goto err_alloc_veb;
10521 }
10522
10523 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10524 if (!veb) {
10525 ret = -ENOMEM;
10526 goto err_alloc_veb;
10527 }
10528 veb->pf = pf;
10529 veb->idx = i;
10530 veb->enabled_tc = 1;
10531
10532 pf->veb[i] = veb;
10533 ret = i;
10534err_alloc_veb:
10535 mutex_unlock(&pf->switch_mutex);
10536 return ret;
10537}
10538
10539
10540
10541
10542
10543
10544
10545
10546static void i40e_switch_branch_release(struct i40e_veb *branch)
10547{
10548 struct i40e_pf *pf = branch->pf;
10549 u16 branch_seid = branch->seid;
10550 u16 veb_idx = branch->idx;
10551 int i;
10552
10553
10554 for (i = 0; i < I40E_MAX_VEB; i++) {
10555 if (!pf->veb[i])
10556 continue;
10557 if (pf->veb[i]->uplink_seid == branch->seid)
10558 i40e_switch_branch_release(pf->veb[i]);
10559 }
10560
10561
10562
10563
10564
10565
10566 for (i = 0; i < pf->num_alloc_vsi; i++) {
10567 if (!pf->vsi[i])
10568 continue;
10569 if (pf->vsi[i]->uplink_seid == branch_seid &&
10570 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10571 i40e_vsi_release(pf->vsi[i]);
10572 }
10573 }
10574
10575
10576
10577
10578
10579
10580 if (pf->veb[veb_idx])
10581 i40e_veb_release(pf->veb[veb_idx]);
10582}
10583
10584
10585
10586
10587
10588static void i40e_veb_clear(struct i40e_veb *veb)
10589{
10590 if (!veb)
10591 return;
10592
10593 if (veb->pf) {
10594 struct i40e_pf *pf = veb->pf;
10595
10596 mutex_lock(&pf->switch_mutex);
10597 if (pf->veb[veb->idx] == veb)
10598 pf->veb[veb->idx] = NULL;
10599 mutex_unlock(&pf->switch_mutex);
10600 }
10601
10602 kfree(veb);
10603}
10604
10605
10606
10607
10608
10609void i40e_veb_release(struct i40e_veb *veb)
10610{
10611 struct i40e_vsi *vsi = NULL;
10612 struct i40e_pf *pf;
10613 int i, n = 0;
10614
10615 pf = veb->pf;
10616
10617
10618 for (i = 0; i < pf->num_alloc_vsi; i++) {
10619 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10620 n++;
10621 vsi = pf->vsi[i];
10622 }
10623 }
10624 if (n != 1) {
10625 dev_info(&pf->pdev->dev,
10626 "can't remove VEB %d with %d VSIs left\n",
10627 veb->seid, n);
10628 return;
10629 }
10630
10631
10632 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10633 if (veb->uplink_seid) {
10634 vsi->uplink_seid = veb->uplink_seid;
10635 if (veb->uplink_seid == pf->mac_seid)
10636 vsi->veb_idx = I40E_NO_VEB;
10637 else
10638 vsi->veb_idx = veb->veb_idx;
10639 } else {
10640
10641 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10642 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10643 }
10644
10645 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10646 i40e_veb_clear(veb);
10647}
10648
10649
10650
10651
10652
10653
10654static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10655{
10656 struct i40e_pf *pf = veb->pf;
10657 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10658 int ret;
10659
10660 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10661 veb->enabled_tc, false,
10662 &veb->seid, enable_stats, NULL);
10663
10664
10665 if (ret) {
10666 dev_info(&pf->pdev->dev,
10667 "couldn't add VEB, err %s aq_err %s\n",
10668 i40e_stat_str(&pf->hw, ret),
10669 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10670 return -EPERM;
10671 }
10672
10673
10674 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10675 &veb->stats_idx, NULL, NULL, NULL);
10676 if (ret) {
10677 dev_info(&pf->pdev->dev,
10678 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10679 i40e_stat_str(&pf->hw, ret),
10680 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10681 return -EPERM;
10682 }
10683 ret = i40e_veb_get_bw_info(veb);
10684 if (ret) {
10685 dev_info(&pf->pdev->dev,
10686 "couldn't get VEB bw info, err %s aq_err %s\n",
10687 i40e_stat_str(&pf->hw, ret),
10688 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10689 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10690 return -ENOENT;
10691 }
10692
10693 vsi->uplink_seid = veb->seid;
10694 vsi->veb_idx = veb->idx;
10695 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10696
10697 return 0;
10698}
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10717 u16 uplink_seid, u16 vsi_seid,
10718 u8 enabled_tc)
10719{
10720 struct i40e_veb *veb, *uplink_veb = NULL;
10721 int vsi_idx, veb_idx;
10722 int ret;
10723
10724
10725 if ((uplink_seid == 0 || vsi_seid == 0) &&
10726 (uplink_seid + vsi_seid != 0)) {
10727 dev_info(&pf->pdev->dev,
10728 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10729 uplink_seid, vsi_seid);
10730 return NULL;
10731 }
10732
10733
10734 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10735 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10736 break;
10737 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10738 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10739 vsi_seid);
10740 return NULL;
10741 }
10742
10743 if (uplink_seid && uplink_seid != pf->mac_seid) {
10744 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10745 if (pf->veb[veb_idx] &&
10746 pf->veb[veb_idx]->seid == uplink_seid) {
10747 uplink_veb = pf->veb[veb_idx];
10748 break;
10749 }
10750 }
10751 if (!uplink_veb) {
10752 dev_info(&pf->pdev->dev,
10753 "uplink seid %d not found\n", uplink_seid);
10754 return NULL;
10755 }
10756 }
10757
10758
10759 veb_idx = i40e_veb_mem_alloc(pf);
10760 if (veb_idx < 0)
10761 goto err_alloc;
10762 veb = pf->veb[veb_idx];
10763 veb->flags = flags;
10764 veb->uplink_seid = uplink_seid;
10765 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10766 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10767
10768
10769 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10770 if (ret)
10771 goto err_veb;
10772 if (vsi_idx == pf->lan_vsi)
10773 pf->lan_veb = veb->idx;
10774
10775 return veb;
10776
10777err_veb:
10778 i40e_veb_clear(veb);
10779err_alloc:
10780 return NULL;
10781}
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10793 struct i40e_aqc_switch_config_element_resp *ele,
10794 u16 num_reported, bool printconfig)
10795{
10796 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10797 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10798 u8 element_type = ele->element_type;
10799 u16 seid = le16_to_cpu(ele->seid);
10800
10801 if (printconfig)
10802 dev_info(&pf->pdev->dev,
10803 "type=%d seid=%d uplink=%d downlink=%d\n",
10804 element_type, seid, uplink_seid, downlink_seid);
10805
10806 switch (element_type) {
10807 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10808 pf->mac_seid = seid;
10809 break;
10810 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10811
10812 if (uplink_seid != pf->mac_seid)
10813 break;
10814 if (pf->lan_veb == I40E_NO_VEB) {
10815 int v;
10816
10817
10818 for (v = 0; v < I40E_MAX_VEB; v++) {
10819 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10820 pf->lan_veb = v;
10821 break;
10822 }
10823 }
10824 if (pf->lan_veb == I40E_NO_VEB) {
10825 v = i40e_veb_mem_alloc(pf);
10826 if (v < 0)
10827 break;
10828 pf->lan_veb = v;
10829 }
10830 }
10831
10832 pf->veb[pf->lan_veb]->seid = seid;
10833 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10834 pf->veb[pf->lan_veb]->pf = pf;
10835 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10836 break;
10837 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10838 if (num_reported != 1)
10839 break;
10840
10841
10842
10843 pf->mac_seid = uplink_seid;
10844 pf->pf_seid = downlink_seid;
10845 pf->main_vsi_seid = seid;
10846 if (printconfig)
10847 dev_info(&pf->pdev->dev,
10848 "pf_seid=%d main_vsi_seid=%d\n",
10849 pf->pf_seid, pf->main_vsi_seid);
10850 break;
10851 case I40E_SWITCH_ELEMENT_TYPE_PF:
10852 case I40E_SWITCH_ELEMENT_TYPE_VF:
10853 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10854 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10855 case I40E_SWITCH_ELEMENT_TYPE_PE:
10856 case I40E_SWITCH_ELEMENT_TYPE_PA:
10857
10858 break;
10859 default:
10860 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10861 element_type, seid);
10862 break;
10863 }
10864}
10865
10866
10867
10868
10869
10870
10871
10872
10873
10874int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10875{
10876 struct i40e_aqc_get_switch_config_resp *sw_config;
10877 u16 next_seid = 0;
10878 int ret = 0;
10879 u8 *aq_buf;
10880 int i;
10881
10882 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10883 if (!aq_buf)
10884 return -ENOMEM;
10885
10886 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10887 do {
10888 u16 num_reported, num_total;
10889
10890 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10891 I40E_AQ_LARGE_BUF,
10892 &next_seid, NULL);
10893 if (ret) {
10894 dev_info(&pf->pdev->dev,
10895 "get switch config failed err %s aq_err %s\n",
10896 i40e_stat_str(&pf->hw, ret),
10897 i40e_aq_str(&pf->hw,
10898 pf->hw.aq.asq_last_status));
10899 kfree(aq_buf);
10900 return -ENOENT;
10901 }
10902
10903 num_reported = le16_to_cpu(sw_config->header.num_reported);
10904 num_total = le16_to_cpu(sw_config->header.num_total);
10905
10906 if (printconfig)
10907 dev_info(&pf->pdev->dev,
10908 "header: %d reported %d total\n",
10909 num_reported, num_total);
10910
10911 for (i = 0; i < num_reported; i++) {
10912 struct i40e_aqc_switch_config_element_resp *ele =
10913 &sw_config->element[i];
10914
10915 i40e_setup_pf_switch_element(pf, ele, num_reported,
10916 printconfig);
10917 }
10918 } while (next_seid != 0);
10919
10920 kfree(aq_buf);
10921 return ret;
10922}
10923
10924
10925
10926
10927
10928
10929
10930
10931static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10932{
10933 u16 flags = 0;
10934 int ret;
10935
10936
10937 ret = i40e_fetch_switch_configuration(pf, false);
10938 if (ret) {
10939 dev_info(&pf->pdev->dev,
10940 "couldn't fetch switch config, err %s aq_err %s\n",
10941 i40e_stat_str(&pf->hw, ret),
10942 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10943 return ret;
10944 }
10945 i40e_pf_reset_stats(pf);
10946
10947
10948
10949
10950
10951
10952
10953 if ((pf->hw.pf_id == 0) &&
10954 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10955 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10956
10957 if (pf->hw.pf_id == 0) {
10958 u16 valid_flags;
10959
10960 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10961 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10962 NULL);
10963 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10964 dev_info(&pf->pdev->dev,
10965 "couldn't set switch config bits, err %s aq_err %s\n",
10966 i40e_stat_str(&pf->hw, ret),
10967 i40e_aq_str(&pf->hw,
10968 pf->hw.aq.asq_last_status));
10969
10970 }
10971 }
10972
10973
10974 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10975 struct i40e_vsi *vsi = NULL;
10976 u16 uplink_seid;
10977
10978
10979
10980
10981 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10982 uplink_seid = pf->veb[pf->lan_veb]->seid;
10983 else
10984 uplink_seid = pf->mac_seid;
10985 if (pf->lan_vsi == I40E_NO_VSI)
10986 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10987 else if (reinit)
10988 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10989 if (!vsi) {
10990 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10991 i40e_fdir_teardown(pf);
10992 return -EAGAIN;
10993 }
10994 } else {
10995
10996 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10997
10998 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10999 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
11000 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
11001 }
11002 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
11003
11004 i40e_fdir_sb_setup(pf);
11005
11006
11007 ret = i40e_setup_pf_filter_control(pf);
11008 if (ret) {
11009 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
11010 ret);
11011
11012 }
11013
11014
11015
11016
11017 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
11018 i40e_pf_config_rss(pf);
11019
11020
11021 i40e_link_event(pf);
11022
11023
11024 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
11025 I40E_AQ_AN_COMPLETED) ? true : false);
11026
11027 i40e_ptp_init(pf);
11028
11029
11030 i40e_sync_udp_filters(pf);
11031
11032 return ret;
11033}
11034
11035
11036
11037
11038
11039static void i40e_determine_queue_usage(struct i40e_pf *pf)
11040{
11041 int queues_left;
11042
11043 pf->num_lan_qps = 0;
11044
11045
11046
11047
11048
11049 queues_left = pf->hw.func_caps.num_tx_qp;
11050
11051 if ((queues_left == 1) ||
11052 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
11053
11054 queues_left = 0;
11055 pf->alloc_rss_size = pf->num_lan_qps = 1;
11056
11057
11058 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11059 I40E_FLAG_IWARP_ENABLED |
11060 I40E_FLAG_FD_SB_ENABLED |
11061 I40E_FLAG_FD_ATR_ENABLED |
11062 I40E_FLAG_DCB_CAPABLE |
11063 I40E_FLAG_DCB_ENABLED |
11064 I40E_FLAG_SRIOV_ENABLED |
11065 I40E_FLAG_VMDQ_ENABLED);
11066 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
11067 I40E_FLAG_FD_SB_ENABLED |
11068 I40E_FLAG_FD_ATR_ENABLED |
11069 I40E_FLAG_DCB_CAPABLE))) {
11070
11071 pf->alloc_rss_size = pf->num_lan_qps = 1;
11072 queues_left -= pf->num_lan_qps;
11073
11074 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11075 I40E_FLAG_IWARP_ENABLED |
11076 I40E_FLAG_FD_SB_ENABLED |
11077 I40E_FLAG_FD_ATR_ENABLED |
11078 I40E_FLAG_DCB_ENABLED |
11079 I40E_FLAG_VMDQ_ENABLED);
11080 } else {
11081
11082 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
11083 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
11084 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
11085 I40E_FLAG_DCB_ENABLED);
11086 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
11087 }
11088 pf->num_lan_qps = max_t(int, pf->rss_size_max,
11089 num_online_cpus());
11090 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
11091 pf->hw.func_caps.num_tx_qp);
11092
11093 queues_left -= pf->num_lan_qps;
11094 }
11095
11096 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11097 if (queues_left > 1) {
11098 queues_left -= 1;
11099 } else {
11100 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11101 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
11102 }
11103 }
11104
11105 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11106 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
11107 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
11108 (queues_left / pf->num_vf_qps));
11109 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
11110 }
11111
11112 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11113 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
11114 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
11115 (queues_left / pf->num_vmdq_qps));
11116 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
11117 }
11118
11119 pf->queues_left = queues_left;
11120 dev_dbg(&pf->pdev->dev,
11121 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
11122 pf->hw.func_caps.num_tx_qp,
11123 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
11124 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
11125 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
11126 queues_left);
11127}
11128
11129
11130
11131
11132
11133
11134
11135
11136
11137
11138
11139
11140static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
11141{
11142 struct i40e_filter_control_settings *settings = &pf->filter_settings;
11143
11144 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
11145
11146
11147 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
11148 settings->enable_fdir = true;
11149
11150
11151 settings->enable_ethtype = true;
11152 settings->enable_macvlan = true;
11153
11154 if (i40e_set_filter_control(&pf->hw, settings))
11155 return -ENOENT;
11156
11157 return 0;
11158}
11159
11160#define INFO_STRING_LEN 255
11161#define REMAIN(__x) (INFO_STRING_LEN - (__x))
11162static void i40e_print_features(struct i40e_pf *pf)
11163{
11164 struct i40e_hw *hw = &pf->hw;
11165 char *buf;
11166 int i;
11167
11168 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
11169 if (!buf)
11170 return;
11171
11172 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
11173#ifdef CONFIG_PCI_IOV
11174 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
11175#endif
11176 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
11177 pf->hw.func_caps.num_vsis,
11178 pf->vsi[pf->lan_vsi]->num_queue_pairs);
11179 if (pf->flags & I40E_FLAG_RSS_ENABLED)
11180 i += snprintf(&buf[i], REMAIN(i), " RSS");
11181 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
11182 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
11183 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11184 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
11185 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
11186 }
11187 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
11188 i += snprintf(&buf[i], REMAIN(i), " DCB");
11189 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
11190 i += snprintf(&buf[i], REMAIN(i), " Geneve");
11191 if (pf->flags & I40E_FLAG_PTP)
11192 i += snprintf(&buf[i], REMAIN(i), " PTP");
11193 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
11194 i += snprintf(&buf[i], REMAIN(i), " VEB");
11195 else
11196 i += snprintf(&buf[i], REMAIN(i), " VEPA");
11197
11198 dev_info(&pf->pdev->dev, "%s\n", buf);
11199 kfree(buf);
11200 WARN_ON(i > INFO_STRING_LEN);
11201}
11202
11203
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
11214{
11215 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
11216 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
11217}
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227
11228
11229
11230static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11231{
11232 struct i40e_aq_get_phy_abilities_resp abilities;
11233 struct i40e_pf *pf;
11234 struct i40e_hw *hw;
11235 static u16 pfs_found;
11236 u16 wol_nvm_bits;
11237 u16 link_status;
11238 int err;
11239 u32 val;
11240 u32 i;
11241 u8 set_fc_aq_fail;
11242
11243 err = pci_enable_device_mem(pdev);
11244 if (err)
11245 return err;
11246
11247
11248 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11249 if (err) {
11250 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11251 if (err) {
11252 dev_err(&pdev->dev,
11253 "DMA configuration failed: 0x%x\n", err);
11254 goto err_dma;
11255 }
11256 }
11257
11258
11259 err = pci_request_mem_regions(pdev, i40e_driver_name);
11260 if (err) {
11261 dev_info(&pdev->dev,
11262 "pci_request_selected_regions failed %d\n", err);
11263 goto err_pci_reg;
11264 }
11265
11266 pci_enable_pcie_error_reporting(pdev);
11267 pci_set_master(pdev);
11268
11269
11270
11271
11272
11273
11274 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
11275 if (!pf) {
11276 err = -ENOMEM;
11277 goto err_pf_alloc;
11278 }
11279 pf->next_vsi = 0;
11280 pf->pdev = pdev;
11281 set_bit(__I40E_DOWN, pf->state);
11282
11283 hw = &pf->hw;
11284 hw->back = pf;
11285
11286 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
11287 I40E_MAX_CSR_SPACE);
11288
11289 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
11290 if (!hw->hw_addr) {
11291 err = -EIO;
11292 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
11293 (unsigned int)pci_resource_start(pdev, 0),
11294 pf->ioremap_len, err);
11295 goto err_ioremap;
11296 }
11297 hw->vendor_id = pdev->vendor;
11298 hw->device_id = pdev->device;
11299 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
11300 hw->subsystem_vendor_id = pdev->subsystem_vendor;
11301 hw->subsystem_device_id = pdev->subsystem_device;
11302 hw->bus.device = PCI_SLOT(pdev->devfn);
11303 hw->bus.func = PCI_FUNC(pdev->devfn);
11304 hw->bus.bus_id = pdev->bus->number;
11305 pf->instance = pfs_found;
11306
11307 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
11308 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
11309
11310
11311
11312
11313 mutex_init(&hw->aq.asq_mutex);
11314 mutex_init(&hw->aq.arq_mutex);
11315
11316 pf->msg_enable = netif_msg_init(debug,
11317 NETIF_MSG_DRV |
11318 NETIF_MSG_PROBE |
11319 NETIF_MSG_LINK);
11320 if (debug < -1)
11321 pf->hw.debug_mask = debug;
11322
11323
11324 if (hw->revision_id == 0 &&
11325 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
11326 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
11327 i40e_flush(hw);
11328 msleep(200);
11329 pf->corer_count++;
11330
11331 i40e_clear_pxe_mode(hw);
11332 }
11333
11334
11335 i40e_clear_hw(hw);
11336 err = i40e_pf_reset(hw);
11337 if (err) {
11338 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
11339 goto err_pf_reset;
11340 }
11341 pf->pfr_count++;
11342
11343 hw->aq.num_arq_entries = I40E_AQ_LEN;
11344 hw->aq.num_asq_entries = I40E_AQ_LEN;
11345 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11346 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11347 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
11348
11349 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
11350 "%s-%s:misc",
11351 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
11352
11353 err = i40e_init_shared_code(hw);
11354 if (err) {
11355 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
11356 err);
11357 goto err_pf_reset;
11358 }
11359
11360
11361 pf->hw.fc.requested_mode = I40E_FC_NONE;
11362
11363 err = i40e_init_adminq(hw);
11364 if (err) {
11365 if (err == I40E_ERR_FIRMWARE_API_VERSION)
11366 dev_info(&pdev->dev,
11367 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
11368 else
11369 dev_info(&pdev->dev,
11370 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
11371
11372 goto err_pf_reset;
11373 }
11374 i40e_get_oem_version(hw);
11375
11376
11377 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
11378 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
11379 hw->aq.api_maj_ver, hw->aq.api_min_ver,
11380 i40e_nvm_version_str(hw));
11381
11382 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
11383 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
11384 dev_info(&pdev->dev,
11385 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
11386 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
11387 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
11388 dev_info(&pdev->dev,
11389 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
11390
11391 i40e_verify_eeprom(pf);
11392
11393
11394 if (hw->revision_id < 1)
11395 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
11396
11397 i40e_clear_pxe_mode(hw);
11398 err = i40e_get_capabilities(pf);
11399 if (err)
11400 goto err_adminq_setup;
11401
11402 err = i40e_sw_init(pf);
11403 if (err) {
11404 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
11405 goto err_sw_init;
11406 }
11407
11408 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
11409 hw->func_caps.num_rx_qp, 0, 0);
11410 if (err) {
11411 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
11412 goto err_init_lan_hmc;
11413 }
11414
11415 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
11416 if (err) {
11417 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
11418 err = -ENOENT;
11419 goto err_configure_lan_hmc;
11420 }
11421
11422
11423
11424
11425
11426 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
11427 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
11428 i40e_aq_stop_lldp(hw, true, NULL);
11429 }
11430
11431
11432 i40e_get_platform_mac_addr(pdev, pf);
11433
11434 if (!is_valid_ether_addr(hw->mac.addr)) {
11435 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
11436 err = -EIO;
11437 goto err_mac_addr;
11438 }
11439 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
11440 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
11441 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
11442 if (is_valid_ether_addr(hw->mac.port_addr))
11443 pf->hw_features |= I40E_HW_PORT_ID_VALID;
11444
11445 pci_set_drvdata(pdev, pf);
11446 pci_save_state(pdev);
11447#ifdef CONFIG_I40E_DCB
11448 err = i40e_init_pf_dcb(pf);
11449 if (err) {
11450 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
11451 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
11452
11453 }
11454#endif
11455
11456
11457 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
11458 pf->service_timer_period = HZ;
11459
11460 INIT_WORK(&pf->service_task, i40e_service_task);
11461 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11462
11463
11464 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
11465 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
11466 pf->wol_en = false;
11467 else
11468 pf->wol_en = true;
11469 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
11470
11471
11472 i40e_determine_queue_usage(pf);
11473 err = i40e_init_interrupt_scheme(pf);
11474 if (err)
11475 goto err_switch_setup;
11476
11477
11478
11479
11480
11481
11482 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11483 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11484 else
11485 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11486
11487
11488 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11489 GFP_KERNEL);
11490 if (!pf->vsi) {
11491 err = -ENOMEM;
11492 goto err_switch_setup;
11493 }
11494
11495#ifdef CONFIG_PCI_IOV
11496
11497 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11498 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11499 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11500 if (pci_num_vf(pdev))
11501 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11502 }
11503#endif
11504 err = i40e_setup_pf_switch(pf, false);
11505 if (err) {
11506 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11507 goto err_vsis;
11508 }
11509
11510
11511 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11512 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11513 dev_dbg(&pf->pdev->dev,
11514 "Set fc with err %s aq_err %s on get_phy_cap\n",
11515 i40e_stat_str(hw, err),
11516 i40e_aq_str(hw, hw->aq.asq_last_status));
11517 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11518 dev_dbg(&pf->pdev->dev,
11519 "Set fc with err %s aq_err %s on set_phy_config\n",
11520 i40e_stat_str(hw, err),
11521 i40e_aq_str(hw, hw->aq.asq_last_status));
11522 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11523 dev_dbg(&pf->pdev->dev,
11524 "Set fc with err %s aq_err %s on get_link_info\n",
11525 i40e_stat_str(hw, err),
11526 i40e_aq_str(hw, hw->aq.asq_last_status));
11527
11528
11529 for (i = 0; i < pf->num_alloc_vsi; i++) {
11530 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11531 i40e_vsi_open(pf->vsi[i]);
11532 break;
11533 }
11534 }
11535
11536
11537
11538
11539 err = i40e_aq_set_phy_int_mask(&pf->hw,
11540 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11541 I40E_AQ_EVENT_MEDIA_NA |
11542 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11543 if (err)
11544 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11545 i40e_stat_str(&pf->hw, err),
11546 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11547
11548
11549
11550
11551
11552 val = rd32(hw, I40E_REG_MSS);
11553 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11554 val &= ~I40E_REG_MSS_MIN_MASK;
11555 val |= I40E_64BYTE_MSS;
11556 wr32(hw, I40E_REG_MSS, val);
11557 }
11558
11559 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
11560 msleep(75);
11561 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11562 if (err)
11563 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11564 i40e_stat_str(&pf->hw, err),
11565 i40e_aq_str(&pf->hw,
11566 pf->hw.aq.asq_last_status));
11567 }
11568
11569
11570
11571
11572 clear_bit(__I40E_DOWN, pf->state);
11573
11574
11575
11576
11577
11578
11579 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11580 err = i40e_setup_misc_vector(pf);
11581 if (err) {
11582 dev_info(&pdev->dev,
11583 "setup of misc vector failed: %d\n", err);
11584 goto err_vsis;
11585 }
11586 }
11587
11588#ifdef CONFIG_PCI_IOV
11589
11590 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11591 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11592 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11593
11594 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11595 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11596 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11597 i40e_flush(hw);
11598
11599 if (pci_num_vf(pdev)) {
11600 dev_info(&pdev->dev,
11601 "Active VFs found, allocating resources.\n");
11602 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11603 if (err)
11604 dev_info(&pdev->dev,
11605 "Error %d allocating resources for existing VFs\n",
11606 err);
11607 }
11608 }
11609#endif
11610
11611 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11612 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11613 pf->num_iwarp_msix,
11614 I40E_IWARP_IRQ_PILE_ID);
11615 if (pf->iwarp_base_vector < 0) {
11616 dev_info(&pdev->dev,
11617 "failed to get tracking for %d vectors for IWARP err=%d\n",
11618 pf->num_iwarp_msix, pf->iwarp_base_vector);
11619 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11620 }
11621 }
11622
11623 i40e_dbg_pf_init(pf);
11624
11625
11626 i40e_send_version(pf);
11627
11628
11629 mod_timer(&pf->service_timer,
11630 round_jiffies(jiffies + pf->service_timer_period));
11631
11632
11633 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11634 err = i40e_lan_add_device(pf);
11635 if (err)
11636 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11637 err);
11638 }
11639
11640#define PCI_SPEED_SIZE 8
11641#define PCI_WIDTH_SIZE 8
11642
11643
11644
11645
11646 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
11647 char speed[PCI_SPEED_SIZE] = "Unknown";
11648 char width[PCI_WIDTH_SIZE] = "Unknown";
11649
11650
11651
11652
11653 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11654 &link_status);
11655
11656 i40e_set_pci_config_data(hw, link_status);
11657
11658 switch (hw->bus.speed) {
11659 case i40e_bus_speed_8000:
11660 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11661 case i40e_bus_speed_5000:
11662 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11663 case i40e_bus_speed_2500:
11664 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11665 default:
11666 break;
11667 }
11668 switch (hw->bus.width) {
11669 case i40e_bus_width_pcie_x8:
11670 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11671 case i40e_bus_width_pcie_x4:
11672 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11673 case i40e_bus_width_pcie_x2:
11674 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11675 case i40e_bus_width_pcie_x1:
11676 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11677 default:
11678 break;
11679 }
11680
11681 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11682 speed, width);
11683
11684 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11685 hw->bus.speed < i40e_bus_speed_8000) {
11686 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11687 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11688 }
11689 }
11690
11691
11692 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11693 if (err)
11694 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11695 i40e_stat_str(&pf->hw, err),
11696 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11697 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11698
11699
11700 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11701 if (err)
11702 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11703 i40e_stat_str(&pf->hw, err),
11704 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11705
11706
11707
11708
11709
11710
11711
11712 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11713 pf->main_vsi_seid);
11714
11715 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11716 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11717 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
11718 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
11719 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
11720
11721 i40e_print_features(pf);
11722
11723 return 0;
11724
11725
11726err_vsis:
11727 set_bit(__I40E_DOWN, pf->state);
11728 i40e_clear_interrupt_scheme(pf);
11729 kfree(pf->vsi);
11730err_switch_setup:
11731 i40e_reset_interrupt_capability(pf);
11732 del_timer_sync(&pf->service_timer);
11733err_mac_addr:
11734err_configure_lan_hmc:
11735 (void)i40e_shutdown_lan_hmc(hw);
11736err_init_lan_hmc:
11737 kfree(pf->qp_pile);
11738err_sw_init:
11739err_adminq_setup:
11740err_pf_reset:
11741 iounmap(hw->hw_addr);
11742err_ioremap:
11743 kfree(pf);
11744err_pf_alloc:
11745 pci_disable_pcie_error_reporting(pdev);
11746 pci_release_mem_regions(pdev);
11747err_pci_reg:
11748err_dma:
11749 pci_disable_device(pdev);
11750 return err;
11751}
11752
11753
11754
11755
11756
11757
11758
11759
11760
11761
11762static void i40e_remove(struct pci_dev *pdev)
11763{
11764 struct i40e_pf *pf = pci_get_drvdata(pdev);
11765 struct i40e_hw *hw = &pf->hw;
11766 i40e_status ret_code;
11767 int i;
11768
11769 i40e_dbg_pf_exit(pf);
11770
11771 i40e_ptp_stop(pf);
11772
11773
11774 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11775 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11776
11777
11778 set_bit(__I40E_SUSPENDED, pf->state);
11779 set_bit(__I40E_DOWN, pf->state);
11780 if (pf->service_timer.data)
11781 del_timer_sync(&pf->service_timer);
11782 if (pf->service_task.func)
11783 cancel_work_sync(&pf->service_task);
11784
11785
11786
11787
11788 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
11789
11790 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11791 i40e_free_vfs(pf);
11792 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11793 }
11794
11795 i40e_fdir_teardown(pf);
11796
11797
11798
11799
11800 for (i = 0; i < I40E_MAX_VEB; i++) {
11801 if (!pf->veb[i])
11802 continue;
11803
11804 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11805 pf->veb[i]->uplink_seid == 0)
11806 i40e_switch_branch_release(pf->veb[i]);
11807 }
11808
11809
11810
11811
11812 if (pf->vsi[pf->lan_vsi])
11813 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11814
11815
11816 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11817 ret_code = i40e_lan_del_device(pf);
11818 if (ret_code)
11819 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11820 ret_code);
11821 }
11822
11823
11824 if (hw->hmc.hmc_obj) {
11825 ret_code = i40e_shutdown_lan_hmc(hw);
11826 if (ret_code)
11827 dev_warn(&pdev->dev,
11828 "Failed to destroy the HMC resources: %d\n",
11829 ret_code);
11830 }
11831
11832
11833 i40e_shutdown_adminq(hw);
11834
11835
11836 mutex_destroy(&hw->aq.arq_mutex);
11837 mutex_destroy(&hw->aq.asq_mutex);
11838
11839
11840 i40e_clear_interrupt_scheme(pf);
11841 for (i = 0; i < pf->num_alloc_vsi; i++) {
11842 if (pf->vsi[i]) {
11843 i40e_vsi_clear_rings(pf->vsi[i]);
11844 i40e_vsi_clear(pf->vsi[i]);
11845 pf->vsi[i] = NULL;
11846 }
11847 }
11848
11849 for (i = 0; i < I40E_MAX_VEB; i++) {
11850 kfree(pf->veb[i]);
11851 pf->veb[i] = NULL;
11852 }
11853
11854 kfree(pf->qp_pile);
11855 kfree(pf->vsi);
11856
11857 iounmap(hw->hw_addr);
11858 kfree(pf);
11859 pci_release_mem_regions(pdev);
11860
11861 pci_disable_pcie_error_reporting(pdev);
11862 pci_disable_device(pdev);
11863}
11864
11865
11866
11867
11868
11869
11870
11871
11872
11873static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11874 enum pci_channel_state error)
11875{
11876 struct i40e_pf *pf = pci_get_drvdata(pdev);
11877
11878 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11879
11880 if (!pf) {
11881 dev_info(&pdev->dev,
11882 "Cannot recover - error happened during device probe\n");
11883 return PCI_ERS_RESULT_DISCONNECT;
11884 }
11885
11886
11887 if (!test_bit(__I40E_SUSPENDED, pf->state))
11888 i40e_prep_for_reset(pf, false);
11889
11890
11891 return PCI_ERS_RESULT_NEED_RESET;
11892}
11893
11894
11895
11896
11897
11898
11899
11900
11901
11902
11903static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11904{
11905 struct i40e_pf *pf = pci_get_drvdata(pdev);
11906 pci_ers_result_t result;
11907 int err;
11908 u32 reg;
11909
11910 dev_dbg(&pdev->dev, "%s\n", __func__);
11911 if (pci_enable_device_mem(pdev)) {
11912 dev_info(&pdev->dev,
11913 "Cannot re-enable PCI device after reset.\n");
11914 result = PCI_ERS_RESULT_DISCONNECT;
11915 } else {
11916 pci_set_master(pdev);
11917 pci_restore_state(pdev);
11918 pci_save_state(pdev);
11919 pci_wake_from_d3(pdev, false);
11920
11921 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11922 if (reg == 0)
11923 result = PCI_ERS_RESULT_RECOVERED;
11924 else
11925 result = PCI_ERS_RESULT_DISCONNECT;
11926 }
11927
11928 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11929 if (err) {
11930 dev_info(&pdev->dev,
11931 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11932 err);
11933
11934 }
11935
11936 return result;
11937}
11938
11939
11940
11941
11942
11943
11944
11945
11946static void i40e_pci_error_resume(struct pci_dev *pdev)
11947{
11948 struct i40e_pf *pf = pci_get_drvdata(pdev);
11949
11950 dev_dbg(&pdev->dev, "%s\n", __func__);
11951 if (test_bit(__I40E_SUSPENDED, pf->state))
11952 return;
11953
11954 i40e_handle_reset_warning(pf, false);
11955}
11956
11957
11958
11959
11960
11961
11962static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
11963{
11964 struct i40e_hw *hw = &pf->hw;
11965 i40e_status ret;
11966 u8 mac_addr[6];
11967 u16 flags = 0;
11968
11969
11970 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
11971 ether_addr_copy(mac_addr,
11972 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
11973 } else {
11974 dev_err(&pf->pdev->dev,
11975 "Failed to retrieve MAC address; using default\n");
11976 ether_addr_copy(mac_addr, hw->mac.addr);
11977 }
11978
11979
11980
11981
11982
11983 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
11984
11985 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
11986 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
11987
11988 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
11989 if (ret) {
11990 dev_err(&pf->pdev->dev,
11991 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
11992 return;
11993 }
11994
11995 flags = I40E_AQC_MC_MAG_EN
11996 | I40E_AQC_WOL_PRESERVE_ON_PFR
11997 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
11998 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
11999 if (ret)
12000 dev_err(&pf->pdev->dev,
12001 "Failed to enable Multicast Magic Packet wake up\n");
12002}
12003
12004
12005
12006
12007
12008static void i40e_shutdown(struct pci_dev *pdev)
12009{
12010 struct i40e_pf *pf = pci_get_drvdata(pdev);
12011 struct i40e_hw *hw = &pf->hw;
12012
12013 set_bit(__I40E_SUSPENDED, pf->state);
12014 set_bit(__I40E_DOWN, pf->state);
12015 rtnl_lock();
12016 i40e_prep_for_reset(pf, true);
12017 rtnl_unlock();
12018
12019 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12020 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12021
12022 del_timer_sync(&pf->service_timer);
12023 cancel_work_sync(&pf->service_task);
12024 i40e_fdir_teardown(pf);
12025
12026
12027
12028
12029 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
12030
12031 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
12032 i40e_enable_mc_magic_wake(pf);
12033
12034 i40e_prep_for_reset(pf, false);
12035
12036 wr32(hw, I40E_PFPM_APM,
12037 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12038 wr32(hw, I40E_PFPM_WUFC,
12039 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12040
12041 i40e_clear_interrupt_scheme(pf);
12042
12043 if (system_state == SYSTEM_POWER_OFF) {
12044 pci_wake_from_d3(pdev, pf->wol_en);
12045 pci_set_power_state(pdev, PCI_D3hot);
12046 }
12047}
12048
12049#ifdef CONFIG_PM
12050
12051
12052
12053
12054static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
12055{
12056 struct i40e_pf *pf = pci_get_drvdata(pdev);
12057 struct i40e_hw *hw = &pf->hw;
12058 int retval = 0;
12059
12060 set_bit(__I40E_SUSPENDED, pf->state);
12061 set_bit(__I40E_DOWN, pf->state);
12062
12063 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
12064 i40e_enable_mc_magic_wake(pf);
12065
12066 i40e_prep_for_reset(pf, false);
12067
12068 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12069 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12070
12071 i40e_stop_misc_vector(pf);
12072 if (pf->msix_entries) {
12073 synchronize_irq(pf->msix_entries[0].vector);
12074 free_irq(pf->msix_entries[0].vector, pf);
12075 }
12076 retval = pci_save_state(pdev);
12077 if (retval)
12078 return retval;
12079
12080 pci_wake_from_d3(pdev, pf->wol_en);
12081 pci_set_power_state(pdev, PCI_D3hot);
12082
12083 return retval;
12084}
12085
12086
12087
12088
12089
12090static int i40e_resume(struct pci_dev *pdev)
12091{
12092 struct i40e_pf *pf = pci_get_drvdata(pdev);
12093 u32 err;
12094
12095 pci_set_power_state(pdev, PCI_D0);
12096 pci_restore_state(pdev);
12097
12098
12099
12100 pci_save_state(pdev);
12101
12102 err = pci_enable_device_mem(pdev);
12103 if (err) {
12104 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
12105 return err;
12106 }
12107 pci_set_master(pdev);
12108
12109
12110 pci_wake_from_d3(pdev, false);
12111
12112
12113 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
12114 clear_bit(__I40E_DOWN, pf->state);
12115 if (pf->msix_entries) {
12116 err = request_irq(pf->msix_entries[0].vector,
12117 i40e_intr, 0, pf->int_name, pf);
12118 if (err) {
12119 dev_err(&pf->pdev->dev,
12120 "request_irq for %s failed: %d\n",
12121 pf->int_name, err);
12122 }
12123 }
12124 i40e_reset_and_rebuild(pf, false, false);
12125 }
12126
12127 return 0;
12128}
12129
12130#endif
12131static const struct pci_error_handlers i40e_err_handler = {
12132 .error_detected = i40e_pci_error_detected,
12133 .slot_reset = i40e_pci_error_slot_reset,
12134 .resume = i40e_pci_error_resume,
12135};
12136
12137static struct pci_driver i40e_driver = {
12138 .name = i40e_driver_name,
12139 .id_table = i40e_pci_tbl,
12140 .probe = i40e_probe,
12141 .remove = i40e_remove,
12142#ifdef CONFIG_PM
12143 .suspend = i40e_suspend,
12144 .resume = i40e_resume,
12145#endif
12146 .shutdown = i40e_shutdown,
12147 .err_handler = &i40e_err_handler,
12148 .sriov_configure = i40e_pci_sriov_configure,
12149};
12150
12151
12152
12153
12154
12155
12156
12157static int __init i40e_init_module(void)
12158{
12159 pr_info("%s: %s - version %s\n", i40e_driver_name,
12160 i40e_driver_string, i40e_driver_version_str);
12161 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
12162
12163
12164
12165
12166
12167
12168
12169
12170 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
12171 if (!i40e_wq) {
12172 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
12173 return -ENOMEM;
12174 }
12175
12176 i40e_dbg_init();
12177 return pci_register_driver(&i40e_driver);
12178}
12179module_init(i40e_init_module);
12180
12181
12182
12183
12184
12185
12186
12187static void __exit i40e_exit_module(void)
12188{
12189 pci_unregister_driver(&i40e_driver);
12190 destroy_workqueue(i40e_wq);
12191 i40e_dbg_exit();
12192}
12193module_exit(i40e_exit_module);
12194