1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/etherdevice.h>
28#include <linux/of_net.h>
29#include <linux/pci.h>
30#include <linux/bpf.h>
31
32
33#include "i40e.h"
34#include "i40e_diag.h"
35#include <net/udp_tunnel.h>
36
37
38
39
40#define CREATE_TRACE_POINTS
41#include "i40e_trace.h"
42
43const char i40e_driver_name[] = "i40e";
44static const char i40e_driver_string[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
46
47#define DRV_KERN "-k"
48
49#define DRV_VERSION_MAJOR 2
50#define DRV_VERSION_MINOR 1
51#define DRV_VERSION_BUILD 14
52#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55const char i40e_driver_version_str[] = DRV_VERSION;
56static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
57
58
59static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
60static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
61static int i40e_add_vsi(struct i40e_vsi *vsi);
62static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
63static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
64static int i40e_setup_misc_vector(struct i40e_pf *pf);
65static void i40e_determine_queue_usage(struct i40e_pf *pf);
66static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
67static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68static int i40e_reset(struct i40e_pf *pf);
69static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
70static void i40e_fdir_sb_setup(struct i40e_pf *pf);
71static int i40e_veb_get_bw_info(struct i40e_veb *veb);
72
73
74
75
76
77
78
79
80static const struct pci_device_id i40e_pci_tbl[] = {
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
100
101 {0, }
102};
103MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
104
105#define I40E_MAX_VF_COUNT 128
106static int debug = -1;
107module_param(debug, uint, 0);
108MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
109
110MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
111MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
115static struct workqueue_struct *i40e_wq;
116
117
118
119
120
121
122
123
124int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
125 u64 size, u32 alignment)
126{
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128
129 mem->size = ALIGN(size, alignment);
130 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
131 &mem->pa, GFP_KERNEL);
132 if (!mem->va)
133 return -ENOMEM;
134
135 return 0;
136}
137
138
139
140
141
142
143int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
144{
145 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
146
147 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
148 mem->va = NULL;
149 mem->pa = 0;
150 mem->size = 0;
151
152 return 0;
153}
154
155
156
157
158
159
160
161int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
162 u32 size)
163{
164 mem->size = size;
165 mem->va = kzalloc(size, GFP_KERNEL);
166
167 if (!mem->va)
168 return -ENOMEM;
169
170 return 0;
171}
172
173
174
175
176
177
178int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
179{
180
181 kfree(mem->va);
182 mem->va = NULL;
183 mem->size = 0;
184
185 return 0;
186}
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
202 u16 needed, u16 id)
203{
204 int ret = -ENOMEM;
205 int i, j;
206
207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 dev_info(&pf->pdev->dev,
209 "param err: pile=%p needed=%d id=0x%04x\n",
210 pile, needed, id);
211 return -EINVAL;
212 }
213
214
215 i = pile->search_hint;
216 while (i < pile->num_entries) {
217
218 if (pile->list[i] & I40E_PILE_VALID_BIT) {
219 i++;
220 continue;
221 }
222
223
224 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
225 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
226 break;
227 }
228
229 if (j == needed) {
230
231 for (j = 0; j < needed; j++)
232 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
233 ret = i;
234 pile->search_hint = i + j;
235 break;
236 }
237
238
239 i += j;
240 }
241
242 return ret;
243}
244
245
246
247
248
249
250
251
252
253static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
254{
255 int valid_id = (id | I40E_PILE_VALID_BIT);
256 int count = 0;
257 int i;
258
259 if (!pile || index >= pile->num_entries)
260 return -EINVAL;
261
262 for (i = index;
263 i < pile->num_entries && pile->list[i] == valid_id;
264 i++) {
265 pile->list[i] = 0;
266 count++;
267 }
268
269 if (count && index < pile->search_hint)
270 pile->search_hint = index;
271
272 return count;
273}
274
275
276
277
278
279
280struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
281{
282 int i;
283
284 for (i = 0; i < pf->num_alloc_vsi; i++)
285 if (pf->vsi[i] && (pf->vsi[i]->id == id))
286 return pf->vsi[i];
287
288 return NULL;
289}
290
291
292
293
294
295
296
297void i40e_service_event_schedule(struct i40e_pf *pf)
298{
299 if (!test_bit(__I40E_DOWN, pf->state) &&
300 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
301 queue_work(i40e_wq, &pf->service_task);
302}
303
304
305
306
307
308
309
310
311
312static void i40e_tx_timeout(struct net_device *netdev)
313{
314 struct i40e_netdev_priv *np = netdev_priv(netdev);
315 struct i40e_vsi *vsi = np->vsi;
316 struct i40e_pf *pf = vsi->back;
317 struct i40e_ring *tx_ring = NULL;
318 unsigned int i, hung_queue = 0;
319 u32 head, val;
320
321 pf->tx_timeout_count++;
322
323
324 for (i = 0; i < netdev->num_tx_queues; i++) {
325 struct netdev_queue *q;
326 unsigned long trans_start;
327
328 q = netdev_get_tx_queue(netdev, i);
329 trans_start = q->trans_start;
330 if (netif_xmit_stopped(q) &&
331 time_after(jiffies,
332 (trans_start + netdev->watchdog_timeo))) {
333 hung_queue = i;
334 break;
335 }
336 }
337
338 if (i == netdev->num_tx_queues) {
339 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
340 } else {
341
342 for (i = 0; i < vsi->num_queue_pairs; i++) {
343 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
344 if (hung_queue ==
345 vsi->tx_rings[i]->queue_index) {
346 tx_ring = vsi->tx_rings[i];
347 break;
348 }
349 }
350 }
351 }
352
353 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
354 pf->tx_timeout_recovery_level = 1;
355 else if (time_before(jiffies,
356 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
357 return;
358
359 if (tx_ring) {
360 head = i40e_get_head(tx_ring);
361
362 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
363 val = rd32(&pf->hw,
364 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
365 tx_ring->vsi->base_vector - 1));
366 else
367 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
368
369 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
370 vsi->seid, hung_queue, tx_ring->next_to_clean,
371 head, tx_ring->next_to_use,
372 readl(tx_ring->tail), val);
373 }
374
375 pf->tx_timeout_last_recovery = jiffies;
376 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
377 pf->tx_timeout_recovery_level, hung_queue);
378
379 switch (pf->tx_timeout_recovery_level) {
380 case 1:
381 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
382 break;
383 case 2:
384 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
385 break;
386 case 3:
387 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
388 break;
389 default:
390 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
391 break;
392 }
393
394 i40e_service_event_schedule(pf);
395 pf->tx_timeout_recovery_level++;
396}
397
398
399
400
401
402
403
404
405struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
406{
407 return &vsi->net_stats;
408}
409
410
411
412
413
414
415static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
416 struct rtnl_link_stats64 *stats)
417{
418 u64 bytes, packets;
419 unsigned int start;
420
421 do {
422 start = u64_stats_fetch_begin_irq(&ring->syncp);
423 packets = ring->stats.packets;
424 bytes = ring->stats.bytes;
425 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
426
427 stats->tx_packets += packets;
428 stats->tx_bytes += bytes;
429}
430
431
432
433
434
435
436
437
438static void i40e_get_netdev_stats_struct(struct net_device *netdev,
439 struct rtnl_link_stats64 *stats)
440{
441 struct i40e_netdev_priv *np = netdev_priv(netdev);
442 struct i40e_ring *tx_ring, *rx_ring;
443 struct i40e_vsi *vsi = np->vsi;
444 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
445 int i;
446
447 if (test_bit(__I40E_VSI_DOWN, vsi->state))
448 return;
449
450 if (!vsi->tx_rings)
451 return;
452
453 rcu_read_lock();
454 for (i = 0; i < vsi->num_queue_pairs; i++) {
455 u64 bytes, packets;
456 unsigned int start;
457
458 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
459 if (!tx_ring)
460 continue;
461 i40e_get_netdev_stats_struct_tx(tx_ring, stats);
462
463 rx_ring = &tx_ring[1];
464
465 do {
466 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
467 packets = rx_ring->stats.packets;
468 bytes = rx_ring->stats.bytes;
469 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
470
471 stats->rx_packets += packets;
472 stats->rx_bytes += bytes;
473
474 if (i40e_enabled_xdp_vsi(vsi))
475 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
476 }
477 rcu_read_unlock();
478
479
480 stats->multicast = vsi_stats->multicast;
481 stats->tx_errors = vsi_stats->tx_errors;
482 stats->tx_dropped = vsi_stats->tx_dropped;
483 stats->rx_errors = vsi_stats->rx_errors;
484 stats->rx_dropped = vsi_stats->rx_dropped;
485 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
486 stats->rx_length_errors = vsi_stats->rx_length_errors;
487}
488
489
490
491
492
493void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
494{
495 struct rtnl_link_stats64 *ns;
496 int i;
497
498 if (!vsi)
499 return;
500
501 ns = i40e_get_vsi_stats_struct(vsi);
502 memset(ns, 0, sizeof(*ns));
503 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
504 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
505 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
506 if (vsi->rx_rings && vsi->rx_rings[0]) {
507 for (i = 0; i < vsi->num_queue_pairs; i++) {
508 memset(&vsi->rx_rings[i]->stats, 0,
509 sizeof(vsi->rx_rings[i]->stats));
510 memset(&vsi->rx_rings[i]->rx_stats, 0,
511 sizeof(vsi->rx_rings[i]->rx_stats));
512 memset(&vsi->tx_rings[i]->stats, 0,
513 sizeof(vsi->tx_rings[i]->stats));
514 memset(&vsi->tx_rings[i]->tx_stats, 0,
515 sizeof(vsi->tx_rings[i]->tx_stats));
516 }
517 }
518 vsi->stat_offsets_loaded = false;
519}
520
521
522
523
524
525void i40e_pf_reset_stats(struct i40e_pf *pf)
526{
527 int i;
528
529 memset(&pf->stats, 0, sizeof(pf->stats));
530 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
531 pf->stat_offsets_loaded = false;
532
533 for (i = 0; i < I40E_MAX_VEB; i++) {
534 if (pf->veb[i]) {
535 memset(&pf->veb[i]->stats, 0,
536 sizeof(pf->veb[i]->stats));
537 memset(&pf->veb[i]->stats_offsets, 0,
538 sizeof(pf->veb[i]->stats_offsets));
539 pf->veb[i]->stat_offsets_loaded = false;
540 }
541 }
542 pf->hw_csum_rx_error = 0;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
561 bool offset_loaded, u64 *offset, u64 *stat)
562{
563 u64 new_data;
564
565 if (hw->device_id == I40E_DEV_ID_QEMU) {
566 new_data = rd32(hw, loreg);
567 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
568 } else {
569 new_data = rd64(hw, loreg);
570 }
571 if (!offset_loaded)
572 *offset = new_data;
573 if (likely(new_data >= *offset))
574 *stat = new_data - *offset;
575 else
576 *stat = (new_data + BIT_ULL(48)) - *offset;
577 *stat &= 0xFFFFFFFFFFFFULL;
578}
579
580
581
582
583
584
585
586
587
588static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
589 bool offset_loaded, u64 *offset, u64 *stat)
590{
591 u32 new_data;
592
593 new_data = rd32(hw, reg);
594 if (!offset_loaded)
595 *offset = new_data;
596 if (likely(new_data >= *offset))
597 *stat = (u32)(new_data - *offset);
598 else
599 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
600}
601
602
603
604
605
606void i40e_update_eth_stats(struct i40e_vsi *vsi)
607{
608 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
609 struct i40e_pf *pf = vsi->back;
610 struct i40e_hw *hw = &pf->hw;
611 struct i40e_eth_stats *oes;
612 struct i40e_eth_stats *es;
613
614 es = &vsi->eth_stats;
615 oes = &vsi->eth_stats_offsets;
616
617
618 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
619 vsi->stat_offsets_loaded,
620 &oes->tx_errors, &es->tx_errors);
621 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
622 vsi->stat_offsets_loaded,
623 &oes->rx_discards, &es->rx_discards);
624 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
625 vsi->stat_offsets_loaded,
626 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
627 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
628 vsi->stat_offsets_loaded,
629 &oes->tx_errors, &es->tx_errors);
630
631 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
632 I40E_GLV_GORCL(stat_idx),
633 vsi->stat_offsets_loaded,
634 &oes->rx_bytes, &es->rx_bytes);
635 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
636 I40E_GLV_UPRCL(stat_idx),
637 vsi->stat_offsets_loaded,
638 &oes->rx_unicast, &es->rx_unicast);
639 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
640 I40E_GLV_MPRCL(stat_idx),
641 vsi->stat_offsets_loaded,
642 &oes->rx_multicast, &es->rx_multicast);
643 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
644 I40E_GLV_BPRCL(stat_idx),
645 vsi->stat_offsets_loaded,
646 &oes->rx_broadcast, &es->rx_broadcast);
647
648 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
649 I40E_GLV_GOTCL(stat_idx),
650 vsi->stat_offsets_loaded,
651 &oes->tx_bytes, &es->tx_bytes);
652 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
653 I40E_GLV_UPTCL(stat_idx),
654 vsi->stat_offsets_loaded,
655 &oes->tx_unicast, &es->tx_unicast);
656 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
657 I40E_GLV_MPTCL(stat_idx),
658 vsi->stat_offsets_loaded,
659 &oes->tx_multicast, &es->tx_multicast);
660 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
661 I40E_GLV_BPTCL(stat_idx),
662 vsi->stat_offsets_loaded,
663 &oes->tx_broadcast, &es->tx_broadcast);
664 vsi->stat_offsets_loaded = true;
665}
666
667
668
669
670
671static void i40e_update_veb_stats(struct i40e_veb *veb)
672{
673 struct i40e_pf *pf = veb->pf;
674 struct i40e_hw *hw = &pf->hw;
675 struct i40e_eth_stats *oes;
676 struct i40e_eth_stats *es;
677 struct i40e_veb_tc_stats *veb_oes;
678 struct i40e_veb_tc_stats *veb_es;
679 int i, idx = 0;
680
681 idx = veb->stats_idx;
682 es = &veb->stats;
683 oes = &veb->stats_offsets;
684 veb_es = &veb->tc_stats;
685 veb_oes = &veb->tc_stats_offsets;
686
687
688 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
689 veb->stat_offsets_loaded,
690 &oes->tx_discards, &es->tx_discards);
691 if (hw->revision_id > 0)
692 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_unknown_protocol,
695 &es->rx_unknown_protocol);
696 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
697 veb->stat_offsets_loaded,
698 &oes->rx_bytes, &es->rx_bytes);
699 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_unicast, &es->rx_unicast);
702 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->rx_multicast, &es->rx_multicast);
705 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->rx_broadcast, &es->rx_broadcast);
708
709 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
710 veb->stat_offsets_loaded,
711 &oes->tx_bytes, &es->tx_bytes);
712 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->tx_unicast, &es->tx_unicast);
715 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
716 veb->stat_offsets_loaded,
717 &oes->tx_multicast, &es->tx_multicast);
718 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->tx_broadcast, &es->tx_broadcast);
721 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
722 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
723 I40E_GLVEBTC_RPCL(i, idx),
724 veb->stat_offsets_loaded,
725 &veb_oes->tc_rx_packets[i],
726 &veb_es->tc_rx_packets[i]);
727 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
728 I40E_GLVEBTC_RBCL(i, idx),
729 veb->stat_offsets_loaded,
730 &veb_oes->tc_rx_bytes[i],
731 &veb_es->tc_rx_bytes[i]);
732 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
733 I40E_GLVEBTC_TPCL(i, idx),
734 veb->stat_offsets_loaded,
735 &veb_oes->tc_tx_packets[i],
736 &veb_es->tc_tx_packets[i]);
737 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
738 I40E_GLVEBTC_TBCL(i, idx),
739 veb->stat_offsets_loaded,
740 &veb_oes->tc_tx_bytes[i],
741 &veb_es->tc_tx_bytes[i]);
742 }
743 veb->stat_offsets_loaded = true;
744}
745
746
747
748
749
750
751
752
753
754
755
756static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
757{
758 struct i40e_pf *pf = vsi->back;
759 struct rtnl_link_stats64 *ons;
760 struct rtnl_link_stats64 *ns;
761 struct i40e_eth_stats *oes;
762 struct i40e_eth_stats *es;
763 u32 tx_restart, tx_busy;
764 struct i40e_ring *p;
765 u32 rx_page, rx_buf;
766 u64 bytes, packets;
767 unsigned int start;
768 u64 tx_linearize;
769 u64 tx_force_wb;
770 u64 rx_p, rx_b;
771 u64 tx_p, tx_b;
772 u16 q;
773
774 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
775 test_bit(__I40E_CONFIG_BUSY, pf->state))
776 return;
777
778 ns = i40e_get_vsi_stats_struct(vsi);
779 ons = &vsi->net_stats_offsets;
780 es = &vsi->eth_stats;
781 oes = &vsi->eth_stats_offsets;
782
783
784
785
786 rx_b = rx_p = 0;
787 tx_b = tx_p = 0;
788 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
789 rx_page = 0;
790 rx_buf = 0;
791 rcu_read_lock();
792 for (q = 0; q < vsi->num_queue_pairs; q++) {
793
794 p = ACCESS_ONCE(vsi->tx_rings[q]);
795
796 do {
797 start = u64_stats_fetch_begin_irq(&p->syncp);
798 packets = p->stats.packets;
799 bytes = p->stats.bytes;
800 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
801 tx_b += bytes;
802 tx_p += packets;
803 tx_restart += p->tx_stats.restart_queue;
804 tx_busy += p->tx_stats.tx_busy;
805 tx_linearize += p->tx_stats.tx_linearize;
806 tx_force_wb += p->tx_stats.tx_force_wb;
807
808
809 p = &p[1];
810 do {
811 start = u64_stats_fetch_begin_irq(&p->syncp);
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 rx_b += bytes;
816 rx_p += packets;
817 rx_buf += p->rx_stats.alloc_buff_failed;
818 rx_page += p->rx_stats.alloc_page_failed;
819 }
820 rcu_read_unlock();
821 vsi->tx_restart = tx_restart;
822 vsi->tx_busy = tx_busy;
823 vsi->tx_linearize = tx_linearize;
824 vsi->tx_force_wb = tx_force_wb;
825 vsi->rx_page_failed = rx_page;
826 vsi->rx_buf_failed = rx_buf;
827
828 ns->rx_packets = rx_p;
829 ns->rx_bytes = rx_b;
830 ns->tx_packets = tx_p;
831 ns->tx_bytes = tx_b;
832
833
834 i40e_update_eth_stats(vsi);
835 ons->tx_errors = oes->tx_errors;
836 ns->tx_errors = es->tx_errors;
837 ons->multicast = oes->rx_multicast;
838 ns->multicast = es->rx_multicast;
839 ons->rx_dropped = oes->rx_discards;
840 ns->rx_dropped = es->rx_discards;
841 ons->tx_dropped = oes->tx_discards;
842 ns->tx_dropped = es->tx_discards;
843
844
845 if (vsi == pf->vsi[pf->lan_vsi]) {
846 ns->rx_crc_errors = pf->stats.crc_errors;
847 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
848 ns->rx_length_errors = pf->stats.rx_length_errors;
849 }
850}
851
852
853
854
855
856static void i40e_update_pf_stats(struct i40e_pf *pf)
857{
858 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
859 struct i40e_hw_port_stats *nsd = &pf->stats;
860 struct i40e_hw *hw = &pf->hw;
861 u32 val;
862 int i;
863
864 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
865 I40E_GLPRT_GORCL(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
868 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
869 I40E_GLPRT_GOTCL(hw->port),
870 pf->stat_offsets_loaded,
871 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
872 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.rx_discards,
875 &nsd->eth.rx_discards);
876 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
877 I40E_GLPRT_UPRCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.rx_unicast,
880 &nsd->eth.rx_unicast);
881 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
882 I40E_GLPRT_MPRCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.rx_multicast,
885 &nsd->eth.rx_multicast);
886 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
887 I40E_GLPRT_BPRCL(hw->port),
888 pf->stat_offsets_loaded,
889 &osd->eth.rx_broadcast,
890 &nsd->eth.rx_broadcast);
891 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
892 I40E_GLPRT_UPTCL(hw->port),
893 pf->stat_offsets_loaded,
894 &osd->eth.tx_unicast,
895 &nsd->eth.tx_unicast);
896 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
897 I40E_GLPRT_MPTCL(hw->port),
898 pf->stat_offsets_loaded,
899 &osd->eth.tx_multicast,
900 &nsd->eth.tx_multicast);
901 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
902 I40E_GLPRT_BPTCL(hw->port),
903 pf->stat_offsets_loaded,
904 &osd->eth.tx_broadcast,
905 &nsd->eth.tx_broadcast);
906
907 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
908 pf->stat_offsets_loaded,
909 &osd->tx_dropped_link_down,
910 &nsd->tx_dropped_link_down);
911
912 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->crc_errors, &nsd->crc_errors);
915
916 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->illegal_bytes, &nsd->illegal_bytes);
919
920 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->mac_local_faults,
923 &nsd->mac_local_faults);
924 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->mac_remote_faults,
927 &nsd->mac_remote_faults);
928
929 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->rx_length_errors,
932 &nsd->rx_length_errors);
933
934 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->link_xon_rx, &nsd->link_xon_rx);
937 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->link_xon_tx, &nsd->link_xon_tx);
940 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->link_xoff_rx, &nsd->link_xoff_rx);
943 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->link_xoff_tx, &nsd->link_xoff_tx);
946
947 for (i = 0; i < 8; i++) {
948 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
949 pf->stat_offsets_loaded,
950 &osd->priority_xoff_rx[i],
951 &nsd->priority_xoff_rx[i]);
952 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
953 pf->stat_offsets_loaded,
954 &osd->priority_xon_rx[i],
955 &nsd->priority_xon_rx[i]);
956 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
957 pf->stat_offsets_loaded,
958 &osd->priority_xon_tx[i],
959 &nsd->priority_xon_tx[i]);
960 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
961 pf->stat_offsets_loaded,
962 &osd->priority_xoff_tx[i],
963 &nsd->priority_xoff_tx[i]);
964 i40e_stat_update32(hw,
965 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
966 pf->stat_offsets_loaded,
967 &osd->priority_xon_2_xoff[i],
968 &nsd->priority_xon_2_xoff[i]);
969 }
970
971 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
972 I40E_GLPRT_PRC64L(hw->port),
973 pf->stat_offsets_loaded,
974 &osd->rx_size_64, &nsd->rx_size_64);
975 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
976 I40E_GLPRT_PRC127L(hw->port),
977 pf->stat_offsets_loaded,
978 &osd->rx_size_127, &nsd->rx_size_127);
979 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
980 I40E_GLPRT_PRC255L(hw->port),
981 pf->stat_offsets_loaded,
982 &osd->rx_size_255, &nsd->rx_size_255);
983 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
984 I40E_GLPRT_PRC511L(hw->port),
985 pf->stat_offsets_loaded,
986 &osd->rx_size_511, &nsd->rx_size_511);
987 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
988 I40E_GLPRT_PRC1023L(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->rx_size_1023, &nsd->rx_size_1023);
991 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
992 I40E_GLPRT_PRC1522L(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->rx_size_1522, &nsd->rx_size_1522);
995 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
996 I40E_GLPRT_PRC9522L(hw->port),
997 pf->stat_offsets_loaded,
998 &osd->rx_size_big, &nsd->rx_size_big);
999
1000 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1001 I40E_GLPRT_PTC64L(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->tx_size_64, &nsd->tx_size_64);
1004 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1005 I40E_GLPRT_PTC127L(hw->port),
1006 pf->stat_offsets_loaded,
1007 &osd->tx_size_127, &nsd->tx_size_127);
1008 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1009 I40E_GLPRT_PTC255L(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->tx_size_255, &nsd->tx_size_255);
1012 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1013 I40E_GLPRT_PTC511L(hw->port),
1014 pf->stat_offsets_loaded,
1015 &osd->tx_size_511, &nsd->tx_size_511);
1016 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1017 I40E_GLPRT_PTC1023L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->tx_size_1023, &nsd->tx_size_1023);
1020 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1021 I40E_GLPRT_PTC1522L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->tx_size_1522, &nsd->tx_size_1522);
1024 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1025 I40E_GLPRT_PTC9522L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->tx_size_big, &nsd->tx_size_big);
1028
1029 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->rx_undersize, &nsd->rx_undersize);
1032 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->rx_fragments, &nsd->rx_fragments);
1035 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_oversize, &nsd->rx_oversize);
1038 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_jabber, &nsd->rx_jabber);
1041
1042
1043 i40e_stat_update32(hw,
1044 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1045 pf->stat_offsets_loaded,
1046 &osd->fd_atr_match, &nsd->fd_atr_match);
1047 i40e_stat_update32(hw,
1048 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1049 pf->stat_offsets_loaded,
1050 &osd->fd_sb_match, &nsd->fd_sb_match);
1051 i40e_stat_update32(hw,
1052 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1053 pf->stat_offsets_loaded,
1054 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1055
1056 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1057 nsd->tx_lpi_status =
1058 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1059 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1060 nsd->rx_lpi_status =
1061 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1062 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1063 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1064 pf->stat_offsets_loaded,
1065 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1066 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1067 pf->stat_offsets_loaded,
1068 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1069
1070 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1071 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
1072 nsd->fd_sb_status = true;
1073 else
1074 nsd->fd_sb_status = false;
1075
1076 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1077 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
1078 nsd->fd_atr_status = true;
1079 else
1080 nsd->fd_atr_status = false;
1081
1082 pf->stat_offsets_loaded = true;
1083}
1084
1085
1086
1087
1088
1089
1090
1091void i40e_update_stats(struct i40e_vsi *vsi)
1092{
1093 struct i40e_pf *pf = vsi->back;
1094
1095 if (vsi == pf->vsi[pf->lan_vsi])
1096 i40e_update_pf_stats(pf);
1097
1098 i40e_update_vsi_stats(vsi);
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1110 const u8 *macaddr, s16 vlan)
1111{
1112 struct i40e_mac_filter *f;
1113 u64 key;
1114
1115 if (!vsi || !macaddr)
1116 return NULL;
1117
1118 key = i40e_addr_to_hkey(macaddr);
1119 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1120 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1121 (vlan == f->vlan))
1122 return f;
1123 }
1124 return NULL;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1136{
1137 struct i40e_mac_filter *f;
1138 u64 key;
1139
1140 if (!vsi || !macaddr)
1141 return NULL;
1142
1143 key = i40e_addr_to_hkey(macaddr);
1144 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1145 if ((ether_addr_equal(macaddr, f->macaddr)))
1146 return f;
1147 }
1148 return NULL;
1149}
1150
1151
1152
1153
1154
1155
1156
1157bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1158{
1159
1160 if (vsi->info.pvid)
1161 return true;
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 return vsi->has_vlan_filter;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1216 struct hlist_head *tmp_add_list,
1217 struct hlist_head *tmp_del_list,
1218 int vlan_filters)
1219{
1220 s16 pvid = le16_to_cpu(vsi->info.pvid);
1221 struct i40e_mac_filter *f, *add_head;
1222 struct i40e_new_mac_filter *new;
1223 struct hlist_node *h;
1224 int bkt, new_vlan;
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 hlist_for_each_entry(new, tmp_add_list, hlist) {
1242 if (pvid && new->f->vlan != pvid)
1243 new->f->vlan = pvid;
1244 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1245 new->f->vlan = 0;
1246 else if (!vlan_filters && new->f->vlan == 0)
1247 new->f->vlan = I40E_VLAN_ANY;
1248 }
1249
1250
1251 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1252
1253
1254
1255
1256
1257 if ((pvid && f->vlan != pvid) ||
1258 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1259 (!vlan_filters && f->vlan == 0)) {
1260
1261 if (pvid)
1262 new_vlan = pvid;
1263 else if (vlan_filters)
1264 new_vlan = 0;
1265 else
1266 new_vlan = I40E_VLAN_ANY;
1267
1268
1269 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1270 if (!add_head)
1271 return -ENOMEM;
1272
1273
1274 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1275 if (!new)
1276 return -ENOMEM;
1277
1278 new->f = add_head;
1279 new->state = add_head->state;
1280
1281
1282 hlist_add_head(&new->hlist, tmp_add_list);
1283
1284
1285 f->state = I40E_FILTER_REMOVE;
1286 hash_del(&f->hlist);
1287 hlist_add_head(&f->hlist, tmp_del_list);
1288 }
1289 }
1290
1291 vsi->has_vlan_filter = !!vlan_filters;
1292
1293 return 0;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1305{
1306 struct i40e_aqc_remove_macvlan_element_data element;
1307 struct i40e_pf *pf = vsi->back;
1308
1309
1310 if (vsi->type != I40E_VSI_MAIN)
1311 return;
1312
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316
1317 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1318 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1319
1320 memset(&element, 0, sizeof(element));
1321 ether_addr_copy(element.mac_addr, macaddr);
1322 element.vlan_tag = 0;
1323
1324 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1325 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1326 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1341 const u8 *macaddr, s16 vlan)
1342{
1343 struct i40e_mac_filter *f;
1344 u64 key;
1345
1346 if (!vsi || !macaddr)
1347 return NULL;
1348
1349 f = i40e_find_filter(vsi, macaddr, vlan);
1350 if (!f) {
1351 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1352 if (!f)
1353 return NULL;
1354
1355
1356
1357
1358 if (vlan >= 0)
1359 vsi->has_vlan_filter = true;
1360
1361 ether_addr_copy(f->macaddr, macaddr);
1362 f->vlan = vlan;
1363
1364
1365
1366
1367 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
1368 f->state = I40E_FILTER_FAILED;
1369 else
1370 f->state = I40E_FILTER_NEW;
1371 INIT_HLIST_NODE(&f->hlist);
1372
1373 key = i40e_addr_to_hkey(macaddr);
1374 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1375
1376 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1377 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 if (f->state == I40E_FILTER_REMOVE)
1389 f->state = I40E_FILTER_ACTIVE;
1390
1391 return f;
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1410{
1411 if (!f)
1412 return;
1413
1414
1415
1416
1417
1418 if ((f->state == I40E_FILTER_FAILED) ||
1419 (f->state == I40E_FILTER_NEW)) {
1420 hash_del(&f->hlist);
1421 kfree(f);
1422 } else {
1423 f->state = I40E_FILTER_REMOVE;
1424 }
1425
1426 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1427 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1443{
1444 struct i40e_mac_filter *f;
1445
1446 if (!vsi || !macaddr)
1447 return;
1448
1449 f = i40e_find_filter(vsi, macaddr, vlan);
1450 __i40e_del_filter(vsi, f);
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1466 const u8 *macaddr)
1467{
1468 struct i40e_mac_filter *f, *add = NULL;
1469 struct hlist_node *h;
1470 int bkt;
1471
1472 if (vsi->info.pvid)
1473 return i40e_add_filter(vsi, macaddr,
1474 le16_to_cpu(vsi->info.pvid));
1475
1476 if (!i40e_is_vsi_in_vlan(vsi))
1477 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1478
1479 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1480 if (f->state == I40E_FILTER_REMOVE)
1481 continue;
1482 add = i40e_add_filter(vsi, macaddr, f->vlan);
1483 if (!add)
1484 return NULL;
1485 }
1486
1487 return add;
1488}
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1501{
1502 struct i40e_mac_filter *f;
1503 struct hlist_node *h;
1504 bool found = false;
1505 int bkt;
1506
1507 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1508 "Missing mac_filter_hash_lock\n");
1509 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1510 if (ether_addr_equal(macaddr, f->macaddr)) {
1511 __i40e_del_filter(vsi, f);
1512 found = true;
1513 }
1514 }
1515
1516 if (found)
1517 return 0;
1518 else
1519 return -ENOENT;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529static int i40e_set_mac(struct net_device *netdev, void *p)
1530{
1531 struct i40e_netdev_priv *np = netdev_priv(netdev);
1532 struct i40e_vsi *vsi = np->vsi;
1533 struct i40e_pf *pf = vsi->back;
1534 struct i40e_hw *hw = &pf->hw;
1535 struct sockaddr *addr = p;
1536
1537 if (!is_valid_ether_addr(addr->sa_data))
1538 return -EADDRNOTAVAIL;
1539
1540 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1541 netdev_info(netdev, "already using mac address %pM\n",
1542 addr->sa_data);
1543 return 0;
1544 }
1545
1546 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1547 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1548 return -EADDRNOTAVAIL;
1549
1550 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1551 netdev_info(netdev, "returning to hw mac address %pM\n",
1552 hw->mac.addr);
1553 else
1554 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1555
1556 spin_lock_bh(&vsi->mac_filter_hash_lock);
1557 i40e_del_mac_filter(vsi, netdev->dev_addr);
1558 i40e_add_mac_filter(vsi, addr->sa_data);
1559 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1560 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1561 if (vsi->type == I40E_VSI_MAIN) {
1562 i40e_status ret;
1563
1564 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1565 I40E_AQC_WRITE_TYPE_LAA_WOL,
1566 addr->sa_data, NULL);
1567 if (ret)
1568 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1569 i40e_stat_str(hw, ret),
1570 i40e_aq_str(hw, hw->aq.asq_last_status));
1571 }
1572
1573
1574
1575
1576 i40e_service_event_schedule(vsi->back);
1577 return 0;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1590 struct i40e_vsi_context *ctxt,
1591 u8 enabled_tc,
1592 bool is_add)
1593{
1594 struct i40e_pf *pf = vsi->back;
1595 u16 sections = 0;
1596 u8 netdev_tc = 0;
1597 u16 numtc = 0;
1598 u16 qcount;
1599 u8 offset;
1600 u16 qmap;
1601 int i;
1602 u16 num_tc_qps = 0;
1603
1604 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1605 offset = 0;
1606
1607 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1608
1609 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1610 if (enabled_tc & BIT(i))
1611 numtc++;
1612 }
1613 if (!numtc) {
1614 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1615 numtc = 1;
1616 }
1617 } else {
1618
1619 numtc = 1;
1620 }
1621
1622 vsi->tc_config.numtc = numtc;
1623 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1624
1625 qcount = vsi->alloc_queue_pairs;
1626
1627 num_tc_qps = qcount / numtc;
1628 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1629
1630
1631 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1632
1633 if (vsi->tc_config.enabled_tc & BIT(i)) {
1634
1635 int pow, num_qps;
1636
1637 switch (vsi->type) {
1638 case I40E_VSI_MAIN:
1639 qcount = min_t(int, pf->alloc_rss_size,
1640 num_tc_qps);
1641 break;
1642 case I40E_VSI_FDIR:
1643 case I40E_VSI_SRIOV:
1644 case I40E_VSI_VMDQ2:
1645 default:
1646 qcount = num_tc_qps;
1647 WARN_ON(i != 0);
1648 break;
1649 }
1650 vsi->tc_config.tc_info[i].qoffset = offset;
1651 vsi->tc_config.tc_info[i].qcount = qcount;
1652
1653
1654 num_qps = qcount;
1655 pow = 0;
1656 while (num_qps && (BIT_ULL(pow) < qcount)) {
1657 pow++;
1658 num_qps >>= 1;
1659 }
1660
1661 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1662 qmap =
1663 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1664 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1665
1666 offset += qcount;
1667 } else {
1668
1669
1670
1671
1672 vsi->tc_config.tc_info[i].qoffset = 0;
1673 vsi->tc_config.tc_info[i].qcount = 1;
1674 vsi->tc_config.tc_info[i].netdev_tc = 0;
1675
1676 qmap = 0;
1677 }
1678 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1679 }
1680
1681
1682 vsi->num_queue_pairs = offset;
1683 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1684 if (vsi->req_queue_pairs > 0)
1685 vsi->num_queue_pairs = vsi->req_queue_pairs;
1686 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1687 vsi->num_queue_pairs = pf->num_lan_msix;
1688 }
1689
1690
1691 if (is_add) {
1692 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1693
1694 ctxt->info.up_enable_bits = enabled_tc;
1695 }
1696 if (vsi->type == I40E_VSI_SRIOV) {
1697 ctxt->info.mapping_flags |=
1698 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1699 for (i = 0; i < vsi->num_queue_pairs; i++)
1700 ctxt->info.queue_mapping[i] =
1701 cpu_to_le16(vsi->base_queue + i);
1702 } else {
1703 ctxt->info.mapping_flags |=
1704 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1705 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1706 }
1707 ctxt->info.valid_sections |= cpu_to_le16(sections);
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1719{
1720 struct i40e_netdev_priv *np = netdev_priv(netdev);
1721 struct i40e_vsi *vsi = np->vsi;
1722
1723 if (i40e_add_mac_filter(vsi, addr))
1724 return 0;
1725 else
1726 return -ENOMEM;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1738{
1739 struct i40e_netdev_priv *np = netdev_priv(netdev);
1740 struct i40e_vsi *vsi = np->vsi;
1741
1742 i40e_del_mac_filter(vsi, addr);
1743
1744 return 0;
1745}
1746
1747
1748
1749
1750
1751static void i40e_set_rx_mode(struct net_device *netdev)
1752{
1753 struct i40e_netdev_priv *np = netdev_priv(netdev);
1754 struct i40e_vsi *vsi = np->vsi;
1755
1756 spin_lock_bh(&vsi->mac_filter_hash_lock);
1757
1758 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1759 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1760
1761 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1762
1763
1764 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1765 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1766 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1767 }
1768
1769
1770
1771
1772 i40e_service_event_schedule(vsi->back);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1784 struct hlist_head *from)
1785{
1786 struct i40e_mac_filter *f;
1787 struct hlist_node *h;
1788
1789 hlist_for_each_entry_safe(f, h, from, hlist) {
1790 u64 key = i40e_addr_to_hkey(f->macaddr);
1791
1792
1793 hlist_del(&f->hlist);
1794 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1795 }
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1807 struct hlist_head *from)
1808{
1809 struct i40e_new_mac_filter *new;
1810 struct hlist_node *h;
1811
1812 hlist_for_each_entry_safe(new, h, from, hlist) {
1813
1814 hlist_del(&new->hlist);
1815 kfree(new);
1816 }
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827static
1828struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
1829{
1830 hlist_for_each_entry_continue(next, hlist) {
1831 if (!is_broadcast_ether_addr(next->f->macaddr))
1832 return next;
1833 }
1834
1835 return NULL;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848static int
1849i40e_update_filter_state(int count,
1850 struct i40e_aqc_add_macvlan_element_data *add_list,
1851 struct i40e_new_mac_filter *add_head)
1852{
1853 int retval = 0;
1854 int i;
1855
1856 for (i = 0; i < count; i++) {
1857
1858
1859
1860
1861
1862
1863 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
1864 add_head->state = I40E_FILTER_FAILED;
1865 } else {
1866 add_head->state = I40E_FILTER_ACTIVE;
1867 retval++;
1868 }
1869
1870 add_head = i40e_next_filter(add_head);
1871 if (!add_head)
1872 break;
1873 }
1874
1875 return retval;
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891static
1892void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
1893 struct i40e_aqc_remove_macvlan_element_data *list,
1894 int num_del, int *retval)
1895{
1896 struct i40e_hw *hw = &vsi->back->hw;
1897 i40e_status aq_ret;
1898 int aq_err;
1899
1900 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
1901 aq_err = hw->aq.asq_last_status;
1902
1903
1904 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1905 *retval = -EIO;
1906 dev_info(&vsi->back->pdev->dev,
1907 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1908 vsi_name, i40e_stat_str(hw, aq_ret),
1909 i40e_aq_str(hw, aq_err));
1910 }
1911}
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926static
1927void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
1928 struct i40e_aqc_add_macvlan_element_data *list,
1929 struct i40e_new_mac_filter *add_head,
1930 int num_add, bool *promisc_changed)
1931{
1932 struct i40e_hw *hw = &vsi->back->hw;
1933 int aq_err, fcnt;
1934
1935 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
1936 aq_err = hw->aq.asq_last_status;
1937 fcnt = i40e_update_filter_state(num_add, list, add_head);
1938
1939 if (fcnt != num_add) {
1940 *promisc_changed = true;
1941 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
1942 dev_warn(&vsi->back->pdev->dev,
1943 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1944 i40e_aq_str(hw, aq_err),
1945 vsi_name);
1946 }
1947}
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960static i40e_status
1961i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
1962 struct i40e_mac_filter *f)
1963{
1964 bool enable = f->state == I40E_FILTER_NEW;
1965 struct i40e_hw *hw = &vsi->back->hw;
1966 i40e_status aq_ret;
1967
1968 if (f->vlan == I40E_VLAN_ANY) {
1969 aq_ret = i40e_aq_set_vsi_broadcast(hw,
1970 vsi->seid,
1971 enable,
1972 NULL);
1973 } else {
1974 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
1975 vsi->seid,
1976 enable,
1977 f->vlan,
1978 NULL);
1979 }
1980
1981 if (aq_ret)
1982 dev_warn(&vsi->back->pdev->dev,
1983 "Error %s setting broadcast promiscuous mode on %s\n",
1984 i40e_aq_str(hw, hw->aq.asq_last_status),
1985 vsi_name);
1986
1987 return aq_ret;
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1999{
2000 struct hlist_head tmp_add_list, tmp_del_list;
2001 struct i40e_mac_filter *f;
2002 struct i40e_new_mac_filter *new, *add_head = NULL;
2003 struct i40e_hw *hw = &vsi->back->hw;
2004 unsigned int failed_filters = 0;
2005 unsigned int vlan_filters = 0;
2006 bool promisc_changed = false;
2007 char vsi_name[16] = "PF";
2008 int filter_list_len = 0;
2009 i40e_status aq_ret = 0;
2010 u32 changed_flags = 0;
2011 struct hlist_node *h;
2012 struct i40e_pf *pf;
2013 int num_add = 0;
2014 int num_del = 0;
2015 int retval = 0;
2016 u16 cmd_flags;
2017 int list_size;
2018 int bkt;
2019
2020
2021 struct i40e_aqc_add_macvlan_element_data *add_list;
2022 struct i40e_aqc_remove_macvlan_element_data *del_list;
2023
2024 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2025 usleep_range(1000, 2000);
2026 pf = vsi->back;
2027
2028 if (vsi->netdev) {
2029 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2030 vsi->current_netdev_flags = vsi->netdev->flags;
2031 }
2032
2033 INIT_HLIST_HEAD(&tmp_add_list);
2034 INIT_HLIST_HEAD(&tmp_del_list);
2035
2036 if (vsi->type == I40E_VSI_SRIOV)
2037 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2038 else if (vsi->type != I40E_VSI_MAIN)
2039 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2040
2041 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2042 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2043
2044 spin_lock_bh(&vsi->mac_filter_hash_lock);
2045
2046 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2047 if (f->state == I40E_FILTER_REMOVE) {
2048
2049 hash_del(&f->hlist);
2050 hlist_add_head(&f->hlist, &tmp_del_list);
2051
2052
2053 continue;
2054 }
2055 if (f->state == I40E_FILTER_NEW) {
2056
2057 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2058 if (!new)
2059 goto err_no_memory_locked;
2060
2061
2062 new->f = f;
2063 new->state = f->state;
2064
2065
2066 hlist_add_head(&new->hlist, &tmp_add_list);
2067 }
2068
2069
2070
2071
2072
2073 if (f->vlan > 0)
2074 vlan_filters++;
2075 }
2076
2077 retval = i40e_correct_mac_vlan_filters(vsi,
2078 &tmp_add_list,
2079 &tmp_del_list,
2080 vlan_filters);
2081 if (retval)
2082 goto err_no_memory_locked;
2083
2084 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2085 }
2086
2087
2088 if (!hlist_empty(&tmp_del_list)) {
2089 filter_list_len = hw->aq.asq_buf_size /
2090 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2091 list_size = filter_list_len *
2092 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2093 del_list = kzalloc(list_size, GFP_ATOMIC);
2094 if (!del_list)
2095 goto err_no_memory;
2096
2097 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2098 cmd_flags = 0;
2099
2100
2101
2102
2103 if (is_broadcast_ether_addr(f->macaddr)) {
2104 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2105
2106 hlist_del(&f->hlist);
2107 kfree(f);
2108 continue;
2109 }
2110
2111
2112 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2113 if (f->vlan == I40E_VLAN_ANY) {
2114 del_list[num_del].vlan_tag = 0;
2115 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2116 } else {
2117 del_list[num_del].vlan_tag =
2118 cpu_to_le16((u16)(f->vlan));
2119 }
2120
2121 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2122 del_list[num_del].flags = cmd_flags;
2123 num_del++;
2124
2125
2126 if (num_del == filter_list_len) {
2127 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2128 num_del, &retval);
2129 memset(del_list, 0, list_size);
2130 num_del = 0;
2131 }
2132
2133
2134
2135 hlist_del(&f->hlist);
2136 kfree(f);
2137 }
2138
2139 if (num_del) {
2140 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2141 num_del, &retval);
2142 }
2143
2144 kfree(del_list);
2145 del_list = NULL;
2146 }
2147
2148 if (!hlist_empty(&tmp_add_list)) {
2149
2150 filter_list_len = hw->aq.asq_buf_size /
2151 sizeof(struct i40e_aqc_add_macvlan_element_data);
2152 list_size = filter_list_len *
2153 sizeof(struct i40e_aqc_add_macvlan_element_data);
2154 add_list = kzalloc(list_size, GFP_ATOMIC);
2155 if (!add_list)
2156 goto err_no_memory;
2157
2158 num_add = 0;
2159 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2160 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2161 vsi->state)) {
2162 new->state = I40E_FILTER_FAILED;
2163 continue;
2164 }
2165
2166
2167
2168
2169 if (is_broadcast_ether_addr(new->f->macaddr)) {
2170 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2171 new->f))
2172 new->state = I40E_FILTER_FAILED;
2173 else
2174 new->state = I40E_FILTER_ACTIVE;
2175 continue;
2176 }
2177
2178
2179 if (num_add == 0)
2180 add_head = new;
2181 cmd_flags = 0;
2182 ether_addr_copy(add_list[num_add].mac_addr,
2183 new->f->macaddr);
2184 if (new->f->vlan == I40E_VLAN_ANY) {
2185 add_list[num_add].vlan_tag = 0;
2186 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2187 } else {
2188 add_list[num_add].vlan_tag =
2189 cpu_to_le16((u16)(new->f->vlan));
2190 }
2191 add_list[num_add].queue_number = 0;
2192
2193 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2194 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2195 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2196 num_add++;
2197
2198
2199 if (num_add == filter_list_len) {
2200 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2201 add_head, num_add,
2202 &promisc_changed);
2203 memset(add_list, 0, list_size);
2204 num_add = 0;
2205 }
2206 }
2207 if (num_add) {
2208 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2209 num_add, &promisc_changed);
2210 }
2211
2212
2213
2214 spin_lock_bh(&vsi->mac_filter_hash_lock);
2215 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2216
2217 if (new->f->state == I40E_FILTER_NEW)
2218 new->f->state = new->state;
2219 hlist_del(&new->hlist);
2220 kfree(new);
2221 }
2222 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2223 kfree(add_list);
2224 add_list = NULL;
2225 }
2226
2227
2228 spin_lock_bh(&vsi->mac_filter_hash_lock);
2229 vsi->active_filters = 0;
2230 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2231 if (f->state == I40E_FILTER_ACTIVE)
2232 vsi->active_filters++;
2233 else if (f->state == I40E_FILTER_FAILED)
2234 failed_filters++;
2235 }
2236 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2237
2238
2239
2240
2241 if (promisc_changed)
2242 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2243
2244
2245
2246
2247
2248 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
2249 !promisc_changed && !failed_filters &&
2250 (vsi->active_filters < vsi->promisc_threshold)) {
2251 dev_info(&pf->pdev->dev,
2252 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2253 vsi_name);
2254 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2255 promisc_changed = true;
2256 vsi->promisc_threshold = 0;
2257 }
2258
2259
2260 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2261 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2262 goto out;
2263 }
2264
2265
2266 if (changed_flags & IFF_ALLMULTI) {
2267 bool cur_multipromisc;
2268
2269 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2270 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2271 vsi->seid,
2272 cur_multipromisc,
2273 NULL);
2274 if (aq_ret) {
2275 retval = i40e_aq_rc_to_posix(aq_ret,
2276 hw->aq.asq_last_status);
2277 dev_info(&pf->pdev->dev,
2278 "set multi promisc failed on %s, err %s aq_err %s\n",
2279 vsi_name,
2280 i40e_stat_str(hw, aq_ret),
2281 i40e_aq_str(hw, hw->aq.asq_last_status));
2282 }
2283 }
2284
2285 if ((changed_flags & IFF_PROMISC) || promisc_changed) {
2286 bool cur_promisc;
2287
2288 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2289 test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2290 vsi->state));
2291 if ((vsi->type == I40E_VSI_MAIN) &&
2292 (pf->lan_veb != I40E_NO_VEB) &&
2293 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2294
2295
2296
2297
2298
2299 if (pf->cur_promisc != cur_promisc) {
2300 pf->cur_promisc = cur_promisc;
2301 if (cur_promisc)
2302 aq_ret =
2303 i40e_aq_set_default_vsi(hw,
2304 vsi->seid,
2305 NULL);
2306 else
2307 aq_ret =
2308 i40e_aq_clear_default_vsi(hw,
2309 vsi->seid,
2310 NULL);
2311 if (aq_ret) {
2312 retval = i40e_aq_rc_to_posix(aq_ret,
2313 hw->aq.asq_last_status);
2314 dev_info(&pf->pdev->dev,
2315 "Set default VSI failed on %s, err %s, aq_err %s\n",
2316 vsi_name,
2317 i40e_stat_str(hw, aq_ret),
2318 i40e_aq_str(hw,
2319 hw->aq.asq_last_status));
2320 }
2321 }
2322 } else {
2323 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2324 hw,
2325 vsi->seid,
2326 cur_promisc, NULL,
2327 true);
2328 if (aq_ret) {
2329 retval =
2330 i40e_aq_rc_to_posix(aq_ret,
2331 hw->aq.asq_last_status);
2332 dev_info(&pf->pdev->dev,
2333 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2334 vsi_name,
2335 i40e_stat_str(hw, aq_ret),
2336 i40e_aq_str(hw,
2337 hw->aq.asq_last_status));
2338 }
2339 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2340 hw,
2341 vsi->seid,
2342 cur_promisc, NULL);
2343 if (aq_ret) {
2344 retval =
2345 i40e_aq_rc_to_posix(aq_ret,
2346 hw->aq.asq_last_status);
2347 dev_info(&pf->pdev->dev,
2348 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2349 vsi_name,
2350 i40e_stat_str(hw, aq_ret),
2351 i40e_aq_str(hw,
2352 hw->aq.asq_last_status));
2353 }
2354 }
2355 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2356 vsi->seid,
2357 cur_promisc, NULL);
2358 if (aq_ret) {
2359 retval = i40e_aq_rc_to_posix(aq_ret,
2360 pf->hw.aq.asq_last_status);
2361 dev_info(&pf->pdev->dev,
2362 "set brdcast promisc failed, err %s, aq_err %s\n",
2363 i40e_stat_str(hw, aq_ret),
2364 i40e_aq_str(hw,
2365 hw->aq.asq_last_status));
2366 }
2367 }
2368out:
2369
2370 if (retval)
2371 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2372
2373 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2374 return retval;
2375
2376err_no_memory:
2377
2378 spin_lock_bh(&vsi->mac_filter_hash_lock);
2379err_no_memory_locked:
2380 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2381 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2382 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2383
2384 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2385 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2386 return -ENOMEM;
2387}
2388
2389
2390
2391
2392
2393static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2394{
2395 int v;
2396
2397 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2398 return;
2399 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2400
2401 for (v = 0; v < pf->num_alloc_vsi; v++) {
2402 if (pf->vsi[v] &&
2403 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2404 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2405
2406 if (ret) {
2407
2408 pf->flags |= I40E_FLAG_FILTER_SYNC;
2409 break;
2410 }
2411 }
2412 }
2413}
2414
2415
2416
2417
2418
2419static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2420{
2421 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2422 return I40E_RXBUFFER_2048;
2423 else
2424 return I40E_RXBUFFER_3072;
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2435{
2436 struct i40e_netdev_priv *np = netdev_priv(netdev);
2437 struct i40e_vsi *vsi = np->vsi;
2438 struct i40e_pf *pf = vsi->back;
2439
2440 if (i40e_enabled_xdp_vsi(vsi)) {
2441 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2442
2443 if (frame_size > i40e_max_xdp_frame_size(vsi))
2444 return -EINVAL;
2445 }
2446
2447 netdev_info(netdev, "changing MTU from %d to %d\n",
2448 netdev->mtu, new_mtu);
2449 netdev->mtu = new_mtu;
2450 if (netif_running(netdev))
2451 i40e_vsi_reinit_locked(vsi);
2452 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2453 I40E_FLAG_CLIENT_L2_CHANGE);
2454 return 0;
2455}
2456
2457
2458
2459
2460
2461
2462
2463int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2464{
2465 struct i40e_netdev_priv *np = netdev_priv(netdev);
2466 struct i40e_pf *pf = np->vsi->back;
2467
2468 switch (cmd) {
2469 case SIOCGHWTSTAMP:
2470 return i40e_ptp_get_ts_config(pf, ifr);
2471 case SIOCSHWTSTAMP:
2472 return i40e_ptp_set_ts_config(pf, ifr);
2473 default:
2474 return -EOPNOTSUPP;
2475 }
2476}
2477
2478
2479
2480
2481
2482void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2483{
2484 struct i40e_vsi_context ctxt;
2485 i40e_status ret;
2486
2487 if ((vsi->info.valid_sections &
2488 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2489 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2490 return;
2491
2492 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2493 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2494 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2495
2496 ctxt.seid = vsi->seid;
2497 ctxt.info = vsi->info;
2498 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2499 if (ret) {
2500 dev_info(&vsi->back->pdev->dev,
2501 "update vlan stripping failed, err %s aq_err %s\n",
2502 i40e_stat_str(&vsi->back->hw, ret),
2503 i40e_aq_str(&vsi->back->hw,
2504 vsi->back->hw.aq.asq_last_status));
2505 }
2506}
2507
2508
2509
2510
2511
2512void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2513{
2514 struct i40e_vsi_context ctxt;
2515 i40e_status ret;
2516
2517 if ((vsi->info.valid_sections &
2518 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2519 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2520 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2521 return;
2522
2523 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2524 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2525 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2526
2527 ctxt.seid = vsi->seid;
2528 ctxt.info = vsi->info;
2529 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2530 if (ret) {
2531 dev_info(&vsi->back->pdev->dev,
2532 "update vlan stripping failed, err %s aq_err %s\n",
2533 i40e_stat_str(&vsi->back->hw, ret),
2534 i40e_aq_str(&vsi->back->hw,
2535 vsi->back->hw.aq.asq_last_status));
2536 }
2537}
2538
2539
2540
2541
2542
2543
2544static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2545{
2546 struct i40e_netdev_priv *np = netdev_priv(netdev);
2547 struct i40e_vsi *vsi = np->vsi;
2548
2549 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2550 i40e_vlan_stripping_enable(vsi);
2551 else
2552 i40e_vlan_stripping_disable(vsi);
2553}
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2569{
2570 struct i40e_mac_filter *f, *add_f;
2571 struct hlist_node *h;
2572 int bkt;
2573
2574 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2575 if (f->state == I40E_FILTER_REMOVE)
2576 continue;
2577 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2578 if (!add_f) {
2579 dev_info(&vsi->back->pdev->dev,
2580 "Could not add vlan filter %d for %pM\n",
2581 vid, f->macaddr);
2582 return -ENOMEM;
2583 }
2584 }
2585
2586 return 0;
2587}
2588
2589
2590
2591
2592
2593
2594int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2595{
2596 int err;
2597
2598 if (!vid || vsi->info.pvid)
2599 return -EINVAL;
2600
2601
2602 spin_lock_bh(&vsi->mac_filter_hash_lock);
2603 err = i40e_add_vlan_all_mac(vsi, vid);
2604 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2605 if (err)
2606 return err;
2607
2608
2609
2610
2611 i40e_service_event_schedule(vsi->back);
2612 return 0;
2613}
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2629{
2630 struct i40e_mac_filter *f;
2631 struct hlist_node *h;
2632 int bkt;
2633
2634 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2635 if (f->vlan == vid)
2636 __i40e_del_filter(vsi, f);
2637 }
2638}
2639
2640
2641
2642
2643
2644
2645void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2646{
2647 if (!vid || vsi->info.pvid)
2648 return;
2649
2650 spin_lock_bh(&vsi->mac_filter_hash_lock);
2651 i40e_rm_vlan_all_mac(vsi, vid);
2652 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2653
2654
2655
2656
2657 i40e_service_event_schedule(vsi->back);
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2668 __always_unused __be16 proto, u16 vid)
2669{
2670 struct i40e_netdev_priv *np = netdev_priv(netdev);
2671 struct i40e_vsi *vsi = np->vsi;
2672 int ret = 0;
2673
2674 if (vid >= VLAN_N_VID)
2675 return -EINVAL;
2676
2677
2678
2679
2680
2681
2682
2683 if (vid)
2684 ret = i40e_vsi_add_vlan(vsi, vid);
2685
2686 if (!ret)
2687 set_bit(vid, vsi->active_vlans);
2688
2689 return ret;
2690}
2691
2692
2693
2694
2695
2696
2697
2698
2699static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2700 __always_unused __be16 proto, u16 vid)
2701{
2702 struct i40e_netdev_priv *np = netdev_priv(netdev);
2703 struct i40e_vsi *vsi = np->vsi;
2704
2705
2706
2707
2708
2709 i40e_vsi_kill_vlan(vsi, vid);
2710
2711 clear_bit(vid, vsi->active_vlans);
2712
2713 return 0;
2714}
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2727{
2728 int ret;
2729 struct i40e_aqc_add_macvlan_element_data element;
2730
2731 ret = i40e_aq_mac_address_write(&vsi->back->hw,
2732 I40E_AQC_WRITE_TYPE_LAA_WOL,
2733 macaddr, NULL);
2734 if (ret) {
2735 dev_info(&vsi->back->pdev->dev,
2736 "Addr change for VSI failed: %d\n", ret);
2737 return -EADDRNOTAVAIL;
2738 }
2739
2740 memset(&element, 0, sizeof(element));
2741 ether_addr_copy(element.mac_addr, macaddr);
2742 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2743 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2744 if (ret) {
2745 dev_info(&vsi->back->pdev->dev,
2746 "add filter failed err %s aq_err %s\n",
2747 i40e_stat_str(&vsi->back->hw, ret),
2748 i40e_aq_str(&vsi->back->hw,
2749 vsi->back->hw.aq.asq_last_status));
2750 }
2751 return ret;
2752}
2753
2754
2755
2756
2757
2758static void i40e_restore_vlan(struct i40e_vsi *vsi)
2759{
2760 u16 vid;
2761
2762 if (!vsi->netdev)
2763 return;
2764
2765 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2766
2767 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2768 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2769 vid);
2770}
2771
2772
2773
2774
2775
2776
2777int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2778{
2779 struct i40e_vsi_context ctxt;
2780 i40e_status ret;
2781
2782 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2783 vsi->info.pvid = cpu_to_le16(vid);
2784 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2785 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2786 I40E_AQ_VSI_PVLAN_EMOD_STR;
2787
2788 ctxt.seid = vsi->seid;
2789 ctxt.info = vsi->info;
2790 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2791 if (ret) {
2792 dev_info(&vsi->back->pdev->dev,
2793 "add pvid failed, err %s aq_err %s\n",
2794 i40e_stat_str(&vsi->back->hw, ret),
2795 i40e_aq_str(&vsi->back->hw,
2796 vsi->back->hw.aq.asq_last_status));
2797 return -ENOENT;
2798 }
2799
2800 return 0;
2801}
2802
2803
2804
2805
2806
2807
2808
2809void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2810{
2811 i40e_vlan_stripping_disable(vsi);
2812
2813 vsi->info.pvid = 0;
2814}
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2827{
2828 int i, err = 0;
2829
2830 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2831 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2832
2833 if (!i40e_enabled_xdp_vsi(vsi))
2834 return err;
2835
2836 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2837 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2838
2839 return err;
2840}
2841
2842
2843
2844
2845
2846
2847
2848static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2849{
2850 int i;
2851
2852 if (vsi->tx_rings) {
2853 for (i = 0; i < vsi->num_queue_pairs; i++)
2854 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2855 i40e_free_tx_resources(vsi->tx_rings[i]);
2856 }
2857
2858 if (vsi->xdp_rings) {
2859 for (i = 0; i < vsi->num_queue_pairs; i++)
2860 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2861 i40e_free_tx_resources(vsi->xdp_rings[i]);
2862 }
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2876{
2877 int i, err = 0;
2878
2879 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2880 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2881 return err;
2882}
2883
2884
2885
2886
2887
2888
2889
2890static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2891{
2892 int i;
2893
2894 if (!vsi->rx_rings)
2895 return;
2896
2897 for (i = 0; i < vsi->num_queue_pairs; i++)
2898 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2899 i40e_free_rx_resources(vsi->rx_rings[i]);
2900}
2901
2902
2903
2904
2905
2906
2907
2908
2909static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2910{
2911 struct i40e_vsi *vsi = ring->vsi;
2912 cpumask_var_t mask;
2913
2914 if (!ring->q_vector || !ring->netdev)
2915 return;
2916
2917
2918 if (vsi->tc_config.numtc <= 1) {
2919 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2920 netif_set_xps_queue(ring->netdev,
2921 &ring->q_vector->affinity_mask,
2922 ring->queue_index);
2923 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2924
2925 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2926 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2927 free_cpumask_var(mask);
2928 }
2929
2930
2931
2932
2933 i40e_service_event_schedule(vsi->back);
2934}
2935
2936
2937
2938
2939
2940
2941
2942static int i40e_configure_tx_ring(struct i40e_ring *ring)
2943{
2944 struct i40e_vsi *vsi = ring->vsi;
2945 u16 pf_q = vsi->base_queue + ring->queue_index;
2946 struct i40e_hw *hw = &vsi->back->hw;
2947 struct i40e_hmc_obj_txq tx_ctx;
2948 i40e_status err = 0;
2949 u32 qtx_ctl = 0;
2950
2951
2952 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2953 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2954 ring->atr_count = 0;
2955 } else {
2956 ring->atr_sample_rate = 0;
2957 }
2958
2959
2960 i40e_config_xps_tx_ring(ring);
2961
2962
2963 memset(&tx_ctx, 0, sizeof(tx_ctx));
2964
2965 tx_ctx.new_context = 1;
2966 tx_ctx.base = (ring->dma / 128);
2967 tx_ctx.qlen = ring->count;
2968 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2969 I40E_FLAG_FD_ATR_ENABLED));
2970 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2971
2972 if (vsi->type != I40E_VSI_FDIR)
2973 tx_ctx.head_wb_ena = 1;
2974 tx_ctx.head_wb_addr = ring->dma +
2975 (ring->count * sizeof(struct i40e_tx_desc));
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2988 tx_ctx.rdylist_act = 0;
2989
2990
2991 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2992 if (err) {
2993 dev_info(&vsi->back->pdev->dev,
2994 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2995 ring->queue_index, pf_q, err);
2996 return -ENOMEM;
2997 }
2998
2999
3000 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3001 if (err) {
3002 dev_info(&vsi->back->pdev->dev,
3003 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3004 ring->queue_index, pf_q, err);
3005 return -ENOMEM;
3006 }
3007
3008
3009 if (vsi->type == I40E_VSI_VMDQ2) {
3010 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3011 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3012 I40E_QTX_CTL_VFVM_INDX_MASK;
3013 } else {
3014 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3015 }
3016
3017 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3018 I40E_QTX_CTL_PF_INDX_MASK);
3019 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3020 i40e_flush(hw);
3021
3022
3023 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3024
3025 return 0;
3026}
3027
3028
3029
3030
3031
3032
3033
3034static int i40e_configure_rx_ring(struct i40e_ring *ring)
3035{
3036 struct i40e_vsi *vsi = ring->vsi;
3037 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3038 u16 pf_q = vsi->base_queue + ring->queue_index;
3039 struct i40e_hw *hw = &vsi->back->hw;
3040 struct i40e_hmc_obj_rxq rx_ctx;
3041 i40e_status err = 0;
3042
3043 ring->state = 0;
3044
3045
3046 memset(&rx_ctx, 0, sizeof(rx_ctx));
3047
3048 ring->rx_buf_len = vsi->rx_buf_len;
3049
3050 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3051 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3052
3053 rx_ctx.base = (ring->dma / 128);
3054 rx_ctx.qlen = ring->count;
3055
3056
3057 rx_ctx.dsize = 1;
3058
3059
3060
3061
3062 rx_ctx.hsplit_0 = 0;
3063
3064 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3065 if (hw->revision_id == 0)
3066 rx_ctx.lrxqthresh = 0;
3067 else
3068 rx_ctx.lrxqthresh = 2;
3069 rx_ctx.crcstrip = 1;
3070 rx_ctx.l2tsel = 1;
3071
3072 rx_ctx.showiv = 0;
3073
3074 rx_ctx.prefena = 1;
3075
3076
3077 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3078 if (err) {
3079 dev_info(&vsi->back->pdev->dev,
3080 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3081 ring->queue_index, pf_q, err);
3082 return -ENOMEM;
3083 }
3084
3085
3086 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3087 if (err) {
3088 dev_info(&vsi->back->pdev->dev,
3089 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3090 ring->queue_index, pf_q, err);
3091 return -ENOMEM;
3092 }
3093
3094
3095 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3096 clear_ring_build_skb_enabled(ring);
3097 else
3098 set_ring_build_skb_enabled(ring);
3099
3100
3101 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3102 writel(0, ring->tail);
3103
3104 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3105
3106 return 0;
3107}
3108
3109
3110
3111
3112
3113
3114
3115static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3116{
3117 int err = 0;
3118 u16 i;
3119
3120 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3121 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3122
3123 if (!i40e_enabled_xdp_vsi(vsi))
3124 return err;
3125
3126 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3127 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3128
3129 return err;
3130}
3131
3132
3133
3134
3135
3136
3137
3138static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3139{
3140 int err = 0;
3141 u16 i;
3142
3143 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3144 vsi->max_frame = I40E_MAX_RXBUFFER;
3145 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3146#if (PAGE_SIZE < 8192)
3147 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3148 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3149 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3150 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3151#endif
3152 } else {
3153 vsi->max_frame = I40E_MAX_RXBUFFER;
3154 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3155 I40E_RXBUFFER_2048;
3156 }
3157
3158
3159 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3160 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3161
3162 return err;
3163}
3164
3165
3166
3167
3168
3169static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3170{
3171 struct i40e_ring *tx_ring, *rx_ring;
3172 u16 qoffset, qcount;
3173 int i, n;
3174
3175 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3176
3177 for (i = 0; i < vsi->num_queue_pairs; i++) {
3178 rx_ring = vsi->rx_rings[i];
3179 tx_ring = vsi->tx_rings[i];
3180 rx_ring->dcb_tc = 0;
3181 tx_ring->dcb_tc = 0;
3182 }
3183 }
3184
3185 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3186 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3187 continue;
3188
3189 qoffset = vsi->tc_config.tc_info[n].qoffset;
3190 qcount = vsi->tc_config.tc_info[n].qcount;
3191 for (i = qoffset; i < (qoffset + qcount); i++) {
3192 rx_ring = vsi->rx_rings[i];
3193 tx_ring = vsi->tx_rings[i];
3194 rx_ring->dcb_tc = n;
3195 tx_ring->dcb_tc = n;
3196 }
3197 }
3198}
3199
3200
3201
3202
3203
3204static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3205{
3206 struct i40e_pf *pf = vsi->back;
3207 int err;
3208
3209 if (vsi->netdev)
3210 i40e_set_rx_mode(vsi->netdev);
3211
3212 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
3213 err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
3214 if (err) {
3215 dev_warn(&pf->pdev->dev,
3216 "could not set up macaddr; err %d\n", err);
3217 }
3218 }
3219}
3220
3221
3222
3223
3224
3225
3226
3227
3228static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3229{
3230 struct i40e_fdir_filter *filter;
3231 struct i40e_pf *pf = vsi->back;
3232 struct hlist_node *node;
3233
3234 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3235 return;
3236
3237
3238 pf->fd_tcp4_filter_cnt = 0;
3239 pf->fd_udp4_filter_cnt = 0;
3240 pf->fd_sctp4_filter_cnt = 0;
3241 pf->fd_ip4_filter_cnt = 0;
3242
3243 hlist_for_each_entry_safe(filter, node,
3244 &pf->fdir_filter_list, fdir_node) {
3245 i40e_add_del_fdir(vsi, filter, true);
3246 }
3247}
3248
3249
3250
3251
3252
3253static int i40e_vsi_configure(struct i40e_vsi *vsi)
3254{
3255 int err;
3256
3257 i40e_set_vsi_rx_mode(vsi);
3258 i40e_restore_vlan(vsi);
3259 i40e_vsi_config_dcb_rings(vsi);
3260 err = i40e_vsi_configure_tx(vsi);
3261 if (!err)
3262 err = i40e_vsi_configure_rx(vsi);
3263
3264 return err;
3265}
3266
3267
3268
3269
3270
3271static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3272{
3273 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3274 struct i40e_pf *pf = vsi->back;
3275 struct i40e_hw *hw = &pf->hw;
3276 u16 vector;
3277 int i, q;
3278 u32 qp;
3279
3280
3281
3282
3283
3284 qp = vsi->base_queue;
3285 vector = vsi->base_vector;
3286 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3287 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3288
3289 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3290 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3291 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3292 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3293 q_vector->rx.itr);
3294 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3295 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3296 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3297 q_vector->tx.itr);
3298 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3299 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3300
3301
3302 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3303 for (q = 0; q < q_vector->num_ringpairs; q++) {
3304 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3305 u32 val;
3306
3307 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3308 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3309 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3310 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3311 (I40E_QUEUE_TYPE_TX <<
3312 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3313
3314 wr32(hw, I40E_QINT_RQCTL(qp), val);
3315
3316 if (has_xdp) {
3317 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3318 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3319 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3320 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3321 (I40E_QUEUE_TYPE_TX <<
3322 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3323
3324 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3325 }
3326
3327 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3328 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3329 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3330 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3331 (I40E_QUEUE_TYPE_RX <<
3332 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3333
3334
3335 if (q == (q_vector->num_ringpairs - 1))
3336 val |= (I40E_QUEUE_END_OF_LIST <<
3337 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3338
3339 wr32(hw, I40E_QINT_TQCTL(qp), val);
3340 qp++;
3341 }
3342 }
3343
3344 i40e_flush(hw);
3345}
3346
3347
3348
3349
3350
3351static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3352{
3353 struct i40e_hw *hw = &pf->hw;
3354 u32 val;
3355
3356
3357 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3358 rd32(hw, I40E_PFINT_ICR0);
3359
3360 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3361 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3362 I40E_PFINT_ICR0_ENA_GRST_MASK |
3363 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3364 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3365 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3366 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3367 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3368
3369 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3370 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3371
3372 if (pf->flags & I40E_FLAG_PTP)
3373 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3374
3375 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3376
3377
3378 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3379 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3380
3381
3382 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3383}
3384
3385
3386
3387
3388
3389static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3390{
3391 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3392 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3393 struct i40e_pf *pf = vsi->back;
3394 struct i40e_hw *hw = &pf->hw;
3395 u32 val;
3396
3397
3398 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3399 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3400 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3401 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3402 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3403 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3404 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3405
3406 i40e_enable_misc_int_causes(pf);
3407
3408
3409 wr32(hw, I40E_PFINT_LNKLST0, 0);
3410
3411
3412 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3413 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3414 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3415 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3416
3417 wr32(hw, I40E_QINT_RQCTL(0), val);
3418
3419 if (i40e_enabled_xdp_vsi(vsi)) {
3420 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3421 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3422 (I40E_QUEUE_TYPE_TX
3423 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3424
3425 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3426 }
3427
3428 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3429 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3430 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3431
3432 wr32(hw, I40E_QINT_TQCTL(0), val);
3433 i40e_flush(hw);
3434}
3435
3436
3437
3438
3439
3440void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3441{
3442 struct i40e_hw *hw = &pf->hw;
3443
3444 wr32(hw, I40E_PFINT_DYN_CTL0,
3445 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3446 i40e_flush(hw);
3447}
3448
3449
3450
3451
3452
3453
3454void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3455{
3456 struct i40e_hw *hw = &pf->hw;
3457 u32 val;
3458
3459 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3460 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3461 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3462
3463 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3464 i40e_flush(hw);
3465}
3466
3467
3468
3469
3470
3471
3472static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3473{
3474 struct i40e_q_vector *q_vector = data;
3475
3476 if (!q_vector->tx.ring && !q_vector->rx.ring)
3477 return IRQ_HANDLED;
3478
3479 napi_schedule_irqoff(&q_vector->napi);
3480
3481 return IRQ_HANDLED;
3482}
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3493 const cpumask_t *mask)
3494{
3495 struct i40e_q_vector *q_vector =
3496 container_of(notify, struct i40e_q_vector, affinity_notify);
3497
3498 q_vector->affinity_mask = *mask;
3499}
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509static void i40e_irq_affinity_release(struct kref *ref) {}
3510
3511
3512
3513
3514
3515
3516
3517
3518static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3519{
3520 int q_vectors = vsi->num_q_vectors;
3521 struct i40e_pf *pf = vsi->back;
3522 int base = vsi->base_vector;
3523 int rx_int_idx = 0;
3524 int tx_int_idx = 0;
3525 int vector, err;
3526 int irq_num;
3527
3528 for (vector = 0; vector < q_vectors; vector++) {
3529 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3530
3531 irq_num = pf->msix_entries[base + vector].vector;
3532
3533 if (q_vector->tx.ring && q_vector->rx.ring) {
3534 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3535 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3536 tx_int_idx++;
3537 } else if (q_vector->rx.ring) {
3538 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3539 "%s-%s-%d", basename, "rx", rx_int_idx++);
3540 } else if (q_vector->tx.ring) {
3541 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3542 "%s-%s-%d", basename, "tx", tx_int_idx++);
3543 } else {
3544
3545 continue;
3546 }
3547 err = request_irq(irq_num,
3548 vsi->irq_handler,
3549 0,
3550 q_vector->name,
3551 q_vector);
3552 if (err) {
3553 dev_info(&pf->pdev->dev,
3554 "MSIX request_irq failed, error: %d\n", err);
3555 goto free_queue_irqs;
3556 }
3557
3558
3559 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3560 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3561 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3562
3563 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
3564 }
3565
3566 vsi->irqs_ready = true;
3567 return 0;
3568
3569free_queue_irqs:
3570 while (vector) {
3571 vector--;
3572 irq_num = pf->msix_entries[base + vector].vector;
3573 irq_set_affinity_notifier(irq_num, NULL);
3574 irq_set_affinity_hint(irq_num, NULL);
3575 free_irq(irq_num, &vsi->q_vectors[vector]);
3576 }
3577 return err;
3578}
3579
3580
3581
3582
3583
3584static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3585{
3586 struct i40e_pf *pf = vsi->back;
3587 struct i40e_hw *hw = &pf->hw;
3588 int base = vsi->base_vector;
3589 int i;
3590
3591
3592 for (i = 0; i < vsi->num_queue_pairs; i++) {
3593 u32 val;
3594
3595 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3596 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3597 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3598
3599 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3600 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3601 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3602
3603 if (!i40e_enabled_xdp_vsi(vsi))
3604 continue;
3605 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3606 }
3607
3608
3609 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3610 for (i = vsi->base_vector;
3611 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3612 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3613
3614 i40e_flush(hw);
3615 for (i = 0; i < vsi->num_q_vectors; i++)
3616 synchronize_irq(pf->msix_entries[i + base].vector);
3617 } else {
3618
3619 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3620 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3621 i40e_flush(hw);
3622 synchronize_irq(pf->pdev->irq);
3623 }
3624}
3625
3626
3627
3628
3629
3630static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3631{
3632 struct i40e_pf *pf = vsi->back;
3633 int i;
3634
3635 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3636 for (i = 0; i < vsi->num_q_vectors; i++)
3637 i40e_irq_dynamic_enable(vsi, i);
3638 } else {
3639 i40e_irq_dynamic_enable_icr0(pf, true);
3640 }
3641
3642 i40e_flush(&pf->hw);
3643 return 0;
3644}
3645
3646
3647
3648
3649
3650static void i40e_stop_misc_vector(struct i40e_pf *pf)
3651{
3652
3653 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3654 i40e_flush(&pf->hw);
3655}
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666static irqreturn_t i40e_intr(int irq, void *data)
3667{
3668 struct i40e_pf *pf = (struct i40e_pf *)data;
3669 struct i40e_hw *hw = &pf->hw;
3670 irqreturn_t ret = IRQ_NONE;
3671 u32 icr0, icr0_remaining;
3672 u32 val, ena_mask;
3673
3674 icr0 = rd32(hw, I40E_PFINT_ICR0);
3675 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3676
3677
3678 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3679 goto enable_intr;
3680
3681
3682 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3683 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3684 pf->sw_int_count++;
3685
3686 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3687 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3688 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3689 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3690 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3691 }
3692
3693
3694 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3695 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3696 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3697
3698
3699
3700
3701
3702
3703
3704 if (!test_bit(__I40E_DOWN, pf->state))
3705 napi_schedule_irqoff(&q_vector->napi);
3706 }
3707
3708 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3709 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3710 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3711 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3712 }
3713
3714 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3715 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3716 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3717 }
3718
3719 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3720 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3721 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3722 }
3723
3724 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3725 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3726 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3727 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3728 val = rd32(hw, I40E_GLGEN_RSTAT);
3729 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3730 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3731 if (val == I40E_RESET_CORER) {
3732 pf->corer_count++;
3733 } else if (val == I40E_RESET_GLOBR) {
3734 pf->globr_count++;
3735 } else if (val == I40E_RESET_EMPR) {
3736 pf->empr_count++;
3737 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3738 }
3739 }
3740
3741 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3742 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3743 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3744 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3745 rd32(hw, I40E_PFHMC_ERRORINFO),
3746 rd32(hw, I40E_PFHMC_ERRORDATA));
3747 }
3748
3749 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3750 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3751
3752 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3753 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3754 i40e_ptp_tx_hwtstamp(pf);
3755 }
3756 }
3757
3758
3759
3760
3761
3762 icr0_remaining = icr0 & ena_mask;
3763 if (icr0_remaining) {
3764 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3765 icr0_remaining);
3766 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3767 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3768 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3769 dev_info(&pf->pdev->dev, "device will be reset\n");
3770 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3771 i40e_service_event_schedule(pf);
3772 }
3773 ena_mask &= ~icr0_remaining;
3774 }
3775 ret = IRQ_HANDLED;
3776
3777enable_intr:
3778
3779 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3780 if (!test_bit(__I40E_DOWN, pf->state)) {
3781 i40e_service_event_schedule(pf);
3782 i40e_irq_dynamic_enable_icr0(pf, false);
3783 }
3784
3785 return ret;
3786}
3787
3788
3789
3790
3791
3792
3793
3794
3795static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3796{
3797 struct i40e_vsi *vsi = tx_ring->vsi;
3798 u16 i = tx_ring->next_to_clean;
3799 struct i40e_tx_buffer *tx_buf;
3800 struct i40e_tx_desc *tx_desc;
3801
3802 tx_buf = &tx_ring->tx_bi[i];
3803 tx_desc = I40E_TX_DESC(tx_ring, i);
3804 i -= tx_ring->count;
3805
3806 do {
3807 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3808
3809
3810 if (!eop_desc)
3811 break;
3812
3813
3814 read_barrier_depends();
3815
3816
3817 if (!(eop_desc->cmd_type_offset_bsz &
3818 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3819 break;
3820
3821
3822 tx_buf->next_to_watch = NULL;
3823
3824 tx_desc->buffer_addr = 0;
3825 tx_desc->cmd_type_offset_bsz = 0;
3826
3827 tx_buf++;
3828 tx_desc++;
3829 i++;
3830 if (unlikely(!i)) {
3831 i -= tx_ring->count;
3832 tx_buf = tx_ring->tx_bi;
3833 tx_desc = I40E_TX_DESC(tx_ring, 0);
3834 }
3835
3836 dma_unmap_single(tx_ring->dev,
3837 dma_unmap_addr(tx_buf, dma),
3838 dma_unmap_len(tx_buf, len),
3839 DMA_TO_DEVICE);
3840 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3841 kfree(tx_buf->raw_buf);
3842
3843 tx_buf->raw_buf = NULL;
3844 tx_buf->tx_flags = 0;
3845 tx_buf->next_to_watch = NULL;
3846 dma_unmap_len_set(tx_buf, len, 0);
3847 tx_desc->buffer_addr = 0;
3848 tx_desc->cmd_type_offset_bsz = 0;
3849
3850
3851 tx_buf++;
3852 tx_desc++;
3853 i++;
3854 if (unlikely(!i)) {
3855 i -= tx_ring->count;
3856 tx_buf = tx_ring->tx_bi;
3857 tx_desc = I40E_TX_DESC(tx_ring, 0);
3858 }
3859
3860
3861 budget--;
3862 } while (likely(budget));
3863
3864 i += tx_ring->count;
3865 tx_ring->next_to_clean = i;
3866
3867 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3868 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3869
3870 return budget > 0;
3871}
3872
3873
3874
3875
3876
3877
3878static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3879{
3880 struct i40e_q_vector *q_vector = data;
3881 struct i40e_vsi *vsi;
3882
3883 if (!q_vector->tx.ring)
3884 return IRQ_HANDLED;
3885
3886 vsi = q_vector->tx.ring->vsi;
3887 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3888
3889 return IRQ_HANDLED;
3890}
3891
3892
3893
3894
3895
3896
3897
3898static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3899{
3900 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3901 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3902 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3903
3904 tx_ring->q_vector = q_vector;
3905 tx_ring->next = q_vector->tx.ring;
3906 q_vector->tx.ring = tx_ring;
3907 q_vector->tx.count++;
3908
3909
3910 if (i40e_enabled_xdp_vsi(vsi)) {
3911 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
3912
3913 xdp_ring->q_vector = q_vector;
3914 xdp_ring->next = q_vector->tx.ring;
3915 q_vector->tx.ring = xdp_ring;
3916 q_vector->tx.count++;
3917 }
3918
3919 rx_ring->q_vector = q_vector;
3920 rx_ring->next = q_vector->rx.ring;
3921 q_vector->rx.ring = rx_ring;
3922 q_vector->rx.count++;
3923}
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3935{
3936 int qp_remaining = vsi->num_queue_pairs;
3937 int q_vectors = vsi->num_q_vectors;
3938 int num_ringpairs;
3939 int v_start = 0;
3940 int qp_idx = 0;
3941
3942
3943
3944
3945
3946
3947
3948
3949 for (; v_start < q_vectors; v_start++) {
3950 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3951
3952 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3953
3954 q_vector->num_ringpairs = num_ringpairs;
3955
3956 q_vector->rx.count = 0;
3957 q_vector->tx.count = 0;
3958 q_vector->rx.ring = NULL;
3959 q_vector->tx.ring = NULL;
3960
3961 while (num_ringpairs--) {
3962 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3963 qp_idx++;
3964 qp_remaining--;
3965 }
3966 }
3967}
3968
3969
3970
3971
3972
3973
3974static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3975{
3976 struct i40e_pf *pf = vsi->back;
3977 int err;
3978
3979 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3980 err = i40e_vsi_request_irq_msix(vsi, basename);
3981 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3982 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3983 pf->int_name, pf);
3984 else
3985 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3986 pf->int_name, pf);
3987
3988 if (err)
3989 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3990
3991 return err;
3992}
3993
3994#ifdef CONFIG_NET_POLL_CONTROLLER
3995
3996
3997
3998
3999
4000
4001
4002static void i40e_netpoll(struct net_device *netdev)
4003{
4004 struct i40e_netdev_priv *np = netdev_priv(netdev);
4005 struct i40e_vsi *vsi = np->vsi;
4006 struct i40e_pf *pf = vsi->back;
4007 int i;
4008
4009
4010 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4011 return;
4012
4013 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4014 for (i = 0; i < vsi->num_q_vectors; i++)
4015 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4016 } else {
4017 i40e_intr(pf->pdev->irq, netdev);
4018 }
4019}
4020#endif
4021
4022#define I40E_QTX_ENA_WAIT_COUNT 50
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4036{
4037 int i;
4038 u32 tx_reg;
4039
4040 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4041 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4042 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4043 break;
4044
4045 usleep_range(10, 20);
4046 }
4047 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4048 return -ETIMEDOUT;
4049
4050 return 0;
4051}
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4064{
4065 struct i40e_hw *hw = &pf->hw;
4066 u32 tx_reg;
4067 int i;
4068
4069
4070 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4071 if (!enable)
4072 usleep_range(10, 20);
4073
4074 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4075 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4076 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4077 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4078 break;
4079 usleep_range(1000, 2000);
4080 }
4081
4082
4083 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4084 return;
4085
4086
4087 if (enable) {
4088 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4089 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4090 } else {
4091 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4092 }
4093
4094 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4095}
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4106 bool is_xdp, bool enable)
4107{
4108 int ret;
4109
4110 i40e_control_tx_q(pf, pf_q, enable);
4111
4112
4113 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4114 if (ret) {
4115 dev_info(&pf->pdev->dev,
4116 "VSI seid %d %sTx ring %d %sable timeout\n",
4117 seid, (is_xdp ? "XDP " : ""), pf_q,
4118 (enable ? "en" : "dis"));
4119 }
4120
4121 return ret;
4122}
4123
4124
4125
4126
4127
4128
4129static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4130{
4131 struct i40e_pf *pf = vsi->back;
4132 int i, pf_q, ret = 0;
4133
4134 pf_q = vsi->base_queue;
4135 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4136 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4137 pf_q,
4138 false , enable);
4139 if (ret)
4140 break;
4141
4142 if (!i40e_enabled_xdp_vsi(vsi))
4143 continue;
4144
4145 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4146 pf_q + vsi->alloc_queue_pairs,
4147 true , enable);
4148 if (ret)
4149 break;
4150 }
4151
4152 return ret;
4153}
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4167{
4168 int i;
4169 u32 rx_reg;
4170
4171 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4172 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4173 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4174 break;
4175
4176 usleep_range(10, 20);
4177 }
4178 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4179 return -ETIMEDOUT;
4180
4181 return 0;
4182}
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4195{
4196 struct i40e_hw *hw = &pf->hw;
4197 u32 rx_reg;
4198 int i;
4199
4200 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4201 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4202 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4203 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4204 break;
4205 usleep_range(1000, 2000);
4206 }
4207
4208
4209 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4210 return;
4211
4212
4213 if (enable)
4214 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4215 else
4216 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4217
4218 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4219}
4220
4221
4222
4223
4224
4225
4226static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4227{
4228 struct i40e_pf *pf = vsi->back;
4229 int i, pf_q, ret = 0;
4230
4231 pf_q = vsi->base_queue;
4232 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4233 i40e_control_rx_q(pf, pf_q, enable);
4234
4235
4236 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4237 if (ret) {
4238 dev_info(&pf->pdev->dev,
4239 "VSI seid %d Rx ring %d %sable timeout\n",
4240 vsi->seid, pf_q, (enable ? "en" : "dis"));
4241 break;
4242 }
4243 }
4244
4245
4246
4247
4248 if (!enable)
4249 mdelay(50);
4250
4251 return ret;
4252}
4253
4254
4255
4256
4257
4258int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4259{
4260 int ret = 0;
4261
4262
4263 ret = i40e_vsi_control_rx(vsi, true);
4264 if (ret)
4265 return ret;
4266 ret = i40e_vsi_control_tx(vsi, true);
4267
4268 return ret;
4269}
4270
4271
4272
4273
4274
4275void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4276{
4277
4278 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4279 return i40e_vsi_stop_rings_no_wait(vsi);
4280
4281
4282
4283
4284 i40e_vsi_control_tx(vsi, false);
4285 i40e_vsi_control_rx(vsi, false);
4286}
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4300{
4301 struct i40e_pf *pf = vsi->back;
4302 int i, pf_q;
4303
4304 pf_q = vsi->base_queue;
4305 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4306 i40e_control_tx_q(pf, pf_q, false);
4307 i40e_control_rx_q(pf, pf_q, false);
4308 }
4309}
4310
4311
4312
4313
4314
4315static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4316{
4317 struct i40e_pf *pf = vsi->back;
4318 struct i40e_hw *hw = &pf->hw;
4319 int base = vsi->base_vector;
4320 u32 val, qp;
4321 int i;
4322
4323 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4324 if (!vsi->q_vectors)
4325 return;
4326
4327 if (!vsi->irqs_ready)
4328 return;
4329
4330 vsi->irqs_ready = false;
4331 for (i = 0; i < vsi->num_q_vectors; i++) {
4332 int irq_num;
4333 u16 vector;
4334
4335 vector = i + base;
4336 irq_num = pf->msix_entries[vector].vector;
4337
4338
4339 if (!vsi->q_vectors[i] ||
4340 !vsi->q_vectors[i]->num_ringpairs)
4341 continue;
4342
4343
4344 irq_set_affinity_notifier(irq_num, NULL);
4345
4346 irq_set_affinity_hint(irq_num, NULL);
4347 synchronize_irq(irq_num);
4348 free_irq(irq_num, vsi->q_vectors[i]);
4349
4350
4351
4352
4353
4354
4355
4356
4357 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4358 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4359 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4360 val |= I40E_QUEUE_END_OF_LIST
4361 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4362 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4363
4364 while (qp != I40E_QUEUE_END_OF_LIST) {
4365 u32 next;
4366
4367 val = rd32(hw, I40E_QINT_RQCTL(qp));
4368
4369 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4370 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4371 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4372 I40E_QINT_RQCTL_INTEVENT_MASK);
4373
4374 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4375 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4376
4377 wr32(hw, I40E_QINT_RQCTL(qp), val);
4378
4379 val = rd32(hw, I40E_QINT_TQCTL(qp));
4380
4381 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4382 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4383
4384 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4385 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4386 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4387 I40E_QINT_TQCTL_INTEVENT_MASK);
4388
4389 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4390 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4391
4392 wr32(hw, I40E_QINT_TQCTL(qp), val);
4393 qp = next;
4394 }
4395 }
4396 } else {
4397 free_irq(pf->pdev->irq, pf);
4398
4399 val = rd32(hw, I40E_PFINT_LNKLST0);
4400 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4401 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4402 val |= I40E_QUEUE_END_OF_LIST
4403 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4404 wr32(hw, I40E_PFINT_LNKLST0, val);
4405
4406 val = rd32(hw, I40E_QINT_RQCTL(qp));
4407 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4408 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4409 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4410 I40E_QINT_RQCTL_INTEVENT_MASK);
4411
4412 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4413 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4414
4415 wr32(hw, I40E_QINT_RQCTL(qp), val);
4416
4417 val = rd32(hw, I40E_QINT_TQCTL(qp));
4418
4419 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4420 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4421 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4422 I40E_QINT_TQCTL_INTEVENT_MASK);
4423
4424 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4425 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4426
4427 wr32(hw, I40E_QINT_TQCTL(qp), val);
4428 }
4429}
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4441{
4442 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4443 struct i40e_ring *ring;
4444
4445 if (!q_vector)
4446 return;
4447
4448
4449 i40e_for_each_ring(ring, q_vector->tx)
4450 ring->q_vector = NULL;
4451
4452 i40e_for_each_ring(ring, q_vector->rx)
4453 ring->q_vector = NULL;
4454
4455
4456 if (vsi->netdev)
4457 netif_napi_del(&q_vector->napi);
4458
4459 vsi->q_vectors[v_idx] = NULL;
4460
4461 kfree_rcu(q_vector, rcu);
4462}
4463
4464
4465
4466
4467
4468
4469
4470
4471static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4472{
4473 int v_idx;
4474
4475 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4476 i40e_free_q_vector(vsi, v_idx);
4477}
4478
4479
4480
4481
4482
4483static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4484{
4485
4486 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4487 pci_disable_msix(pf->pdev);
4488 kfree(pf->msix_entries);
4489 pf->msix_entries = NULL;
4490 kfree(pf->irq_pile);
4491 pf->irq_pile = NULL;
4492 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4493 pci_disable_msi(pf->pdev);
4494 }
4495 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4496}
4497
4498
4499
4500
4501
4502
4503
4504
4505static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4506{
4507 int i;
4508
4509 i40e_stop_misc_vector(pf);
4510 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4511 synchronize_irq(pf->msix_entries[0].vector);
4512 free_irq(pf->msix_entries[0].vector, pf);
4513 }
4514
4515 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4516 I40E_IWARP_IRQ_PILE_ID);
4517
4518 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4519 for (i = 0; i < pf->num_alloc_vsi; i++)
4520 if (pf->vsi[i])
4521 i40e_vsi_free_q_vectors(pf->vsi[i]);
4522 i40e_reset_interrupt_capability(pf);
4523}
4524
4525
4526
4527
4528
4529static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4530{
4531 int q_idx;
4532
4533 if (!vsi->netdev)
4534 return;
4535
4536 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4537 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4538
4539 if (q_vector->rx.ring || q_vector->tx.ring)
4540 napi_enable(&q_vector->napi);
4541 }
4542}
4543
4544
4545
4546
4547
4548static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4549{
4550 int q_idx;
4551
4552 if (!vsi->netdev)
4553 return;
4554
4555 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4556 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4557
4558 if (q_vector->rx.ring || q_vector->tx.ring)
4559 napi_disable(&q_vector->napi);
4560 }
4561}
4562
4563
4564
4565
4566
4567static void i40e_vsi_close(struct i40e_vsi *vsi)
4568{
4569 struct i40e_pf *pf = vsi->back;
4570 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4571 i40e_down(vsi);
4572 i40e_vsi_free_irq(vsi);
4573 i40e_vsi_free_tx_resources(vsi);
4574 i40e_vsi_free_rx_resources(vsi);
4575 vsi->current_netdev_flags = 0;
4576 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4577 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4578 pf->flags |= I40E_FLAG_CLIENT_RESET;
4579}
4580
4581
4582
4583
4584
4585static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4586{
4587 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4588 return;
4589
4590 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4591 if (vsi->netdev && netif_running(vsi->netdev))
4592 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4593 else
4594 i40e_vsi_close(vsi);
4595}
4596
4597
4598
4599
4600
4601static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4602{
4603 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4604 return;
4605
4606 if (vsi->netdev && netif_running(vsi->netdev))
4607 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4608 else
4609 i40e_vsi_open(vsi);
4610}
4611
4612
4613
4614
4615
4616static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4617{
4618 int v;
4619
4620 for (v = 0; v < pf->num_alloc_vsi; v++) {
4621 if (pf->vsi[v])
4622 i40e_quiesce_vsi(pf->vsi[v]);
4623 }
4624}
4625
4626
4627
4628
4629
4630static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4631{
4632 int v;
4633
4634 for (v = 0; v < pf->num_alloc_vsi; v++) {
4635 if (pf->vsi[v])
4636 i40e_unquiesce_vsi(pf->vsi[v]);
4637 }
4638}
4639
4640
4641
4642
4643
4644
4645
4646int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4647{
4648 struct i40e_pf *pf = vsi->back;
4649 int i, pf_q, ret;
4650
4651 pf_q = vsi->base_queue;
4652 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4653
4654 ret = i40e_pf_txq_wait(pf, pf_q, false);
4655 if (ret) {
4656 dev_info(&pf->pdev->dev,
4657 "VSI seid %d Tx ring %d disable timeout\n",
4658 vsi->seid, pf_q);
4659 return ret;
4660 }
4661
4662 if (!i40e_enabled_xdp_vsi(vsi))
4663 goto wait_rx;
4664
4665
4666 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4667 false);
4668 if (ret) {
4669 dev_info(&pf->pdev->dev,
4670 "VSI seid %d XDP Tx ring %d disable timeout\n",
4671 vsi->seid, pf_q);
4672 return ret;
4673 }
4674wait_rx:
4675
4676 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4677 if (ret) {
4678 dev_info(&pf->pdev->dev,
4679 "VSI seid %d Rx ring %d disable timeout\n",
4680 vsi->seid, pf_q);
4681 return ret;
4682 }
4683 }
4684
4685 return 0;
4686}
4687
4688#ifdef CONFIG_I40E_DCB
4689
4690
4691
4692
4693
4694
4695
4696static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4697{
4698 int v, ret = 0;
4699
4700 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4701 if (pf->vsi[v]) {
4702 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4703 if (ret)
4704 break;
4705 }
4706 }
4707
4708 return ret;
4709}
4710
4711#endif
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4724{
4725 struct i40e_ring *tx_ring = NULL;
4726 struct i40e_pf *pf;
4727 u32 val, tx_pending;
4728 int i;
4729
4730 pf = vsi->back;
4731
4732
4733 for (i = 0; i < vsi->num_queue_pairs; i++) {
4734 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4735 if (q_idx == vsi->tx_rings[i]->queue_index) {
4736 tx_ring = vsi->tx_rings[i];
4737 break;
4738 }
4739 }
4740 }
4741
4742 if (!tx_ring)
4743 return;
4744
4745
4746 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4747 val = rd32(&pf->hw,
4748 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4749 tx_ring->vsi->base_vector - 1));
4750 else
4751 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4752
4753 tx_pending = i40e_get_tx_pending(tx_ring);
4754
4755
4756
4757
4758
4759
4760 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4761 i40e_force_wb(vsi, tx_ring->q_vector);
4762}
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772static void i40e_detect_recover_hung(struct i40e_pf *pf)
4773{
4774 struct net_device *netdev;
4775 struct i40e_vsi *vsi;
4776 int i;
4777
4778
4779 vsi = pf->vsi[pf->lan_vsi];
4780
4781 if (!vsi)
4782 return;
4783
4784
4785 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4786 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
4787 return;
4788
4789
4790 if (vsi->type != I40E_VSI_MAIN)
4791 return;
4792
4793 netdev = vsi->netdev;
4794 if (!netdev)
4795 return;
4796
4797
4798 if (!netif_carrier_ok(netdev))
4799 return;
4800
4801
4802 for (i = 0; i < netdev->num_tx_queues; i++) {
4803 struct netdev_queue *q;
4804
4805 q = netdev_get_tx_queue(netdev, i);
4806 if (q)
4807 i40e_detect_recover_hung_queue(i, vsi);
4808 }
4809}
4810
4811
4812
4813
4814
4815
4816
4817
4818static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4819{
4820 struct i40e_dcb_app_priority_table app;
4821 struct i40e_hw *hw = &pf->hw;
4822 u8 enabled_tc = 1;
4823 u8 tc, i;
4824
4825 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4826
4827 for (i = 0; i < dcbcfg->numapps; i++) {
4828 app = dcbcfg->app[i];
4829 if (app.selector == I40E_APP_SEL_TCPIP &&
4830 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4831 tc = dcbcfg->etscfg.prioritytable[app.priority];
4832 enabled_tc |= BIT(tc);
4833 break;
4834 }
4835 }
4836
4837 return enabled_tc;
4838}
4839
4840
4841
4842
4843
4844
4845
4846static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4847{
4848 int i, tc_unused = 0;
4849 u8 num_tc = 0;
4850 u8 ret = 0;
4851
4852
4853
4854
4855
4856 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4857 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4858
4859
4860
4861
4862 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4863 if (num_tc & BIT(i)) {
4864 if (!tc_unused) {
4865 ret++;
4866 } else {
4867 pr_err("Non-contiguous TC - Disabling DCB\n");
4868 return 1;
4869 }
4870 } else {
4871 tc_unused = 1;
4872 }
4873 }
4874
4875
4876 if (!ret)
4877 ret = 1;
4878
4879 return ret;
4880}
4881
4882
4883
4884
4885
4886
4887
4888
4889static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4890{
4891 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4892 u8 enabled_tc = 1;
4893 u8 i;
4894
4895 for (i = 0; i < num_tc; i++)
4896 enabled_tc |= BIT(i);
4897
4898 return enabled_tc;
4899}
4900
4901
4902
4903
4904
4905
4906
4907static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4908{
4909 struct i40e_hw *hw = &pf->hw;
4910 u8 i, enabled_tc = 1;
4911 u8 num_tc = 0;
4912 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4913
4914
4915 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4916 return 1;
4917
4918
4919 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4920 return i40e_dcb_get_num_tc(dcbcfg);
4921
4922
4923 if (pf->hw.func_caps.iscsi)
4924 enabled_tc = i40e_get_iscsi_tc_map(pf);
4925 else
4926 return 1;
4927
4928 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4929 if (enabled_tc & BIT(i))
4930 num_tc++;
4931 }
4932 return num_tc;
4933}
4934
4935
4936
4937
4938
4939
4940
4941static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4942{
4943
4944 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4945 return I40E_DEFAULT_TRAFFIC_CLASS;
4946
4947
4948 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4949 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4950
4951
4952 if (pf->hw.func_caps.iscsi)
4953 return i40e_get_iscsi_tc_map(pf);
4954 else
4955 return I40E_DEFAULT_TRAFFIC_CLASS;
4956}
4957
4958
4959
4960
4961
4962
4963
4964static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4965{
4966 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4967 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4968 struct i40e_pf *pf = vsi->back;
4969 struct i40e_hw *hw = &pf->hw;
4970 i40e_status ret;
4971 u32 tc_bw_max;
4972 int i;
4973
4974
4975 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4976 if (ret) {
4977 dev_info(&pf->pdev->dev,
4978 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4979 i40e_stat_str(&pf->hw, ret),
4980 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4981 return -EINVAL;
4982 }
4983
4984
4985 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4986 NULL);
4987 if (ret) {
4988 dev_info(&pf->pdev->dev,
4989 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4990 i40e_stat_str(&pf->hw, ret),
4991 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4992 return -EINVAL;
4993 }
4994
4995 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4996 dev_info(&pf->pdev->dev,
4997 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4998 bw_config.tc_valid_bits,
4999 bw_ets_config.tc_valid_bits);
5000
5001 }
5002
5003 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5004 vsi->bw_max_quanta = bw_config.max_bw;
5005 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5006 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5007 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5008 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5009 vsi->bw_ets_limit_credits[i] =
5010 le16_to_cpu(bw_ets_config.credits[i]);
5011
5012 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5013 }
5014
5015 return 0;
5016}
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5027 u8 *bw_share)
5028{
5029 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5030 i40e_status ret;
5031 int i;
5032
5033 bw_data.tc_valid_bits = enabled_tc;
5034 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5035 bw_data.tc_bw_credits[i] = bw_share[i];
5036
5037 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
5038 NULL);
5039 if (ret) {
5040 dev_info(&vsi->back->pdev->dev,
5041 "AQ command Config VSI BW allocation per TC failed = %d\n",
5042 vsi->back->hw.aq.asq_last_status);
5043 return -EINVAL;
5044 }
5045
5046 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5047 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5048
5049 return 0;
5050}
5051
5052
5053
5054
5055
5056
5057
5058static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5059{
5060 struct net_device *netdev = vsi->netdev;
5061 struct i40e_pf *pf = vsi->back;
5062 struct i40e_hw *hw = &pf->hw;
5063 u8 netdev_tc = 0;
5064 int i;
5065 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5066
5067 if (!netdev)
5068 return;
5069
5070 if (!enabled_tc) {
5071 netdev_reset_tc(netdev);
5072 return;
5073 }
5074
5075
5076 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5077 return;
5078
5079
5080 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5081
5082
5083
5084
5085
5086
5087
5088 if (vsi->tc_config.enabled_tc & BIT(i))
5089 netdev_set_tc_queue(netdev,
5090 vsi->tc_config.tc_info[i].netdev_tc,
5091 vsi->tc_config.tc_info[i].qcount,
5092 vsi->tc_config.tc_info[i].qoffset);
5093 }
5094
5095
5096 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5097
5098 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5099
5100 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5101 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5102 }
5103}
5104
5105
5106
5107
5108
5109
5110static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5111 struct i40e_vsi_context *ctxt)
5112{
5113
5114
5115
5116
5117 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5118 memcpy(&vsi->info.queue_mapping,
5119 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5120 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5121 sizeof(vsi->info.tc_mapping));
5122}
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5138{
5139 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5140 struct i40e_vsi_context ctxt;
5141 int ret = 0;
5142 int i;
5143
5144
5145 if (vsi->tc_config.enabled_tc == enabled_tc)
5146 return ret;
5147
5148
5149 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5150 if (enabled_tc & BIT(i))
5151 bw_share[i] = 1;
5152 }
5153
5154 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5155 if (ret) {
5156 dev_info(&vsi->back->pdev->dev,
5157 "Failed configuring TC map %d for VSI %d\n",
5158 enabled_tc, vsi->seid);
5159 goto out;
5160 }
5161
5162
5163 ctxt.seid = vsi->seid;
5164 ctxt.pf_num = vsi->back->hw.pf_id;
5165 ctxt.vf_num = 0;
5166 ctxt.uplink_seid = vsi->uplink_seid;
5167 ctxt.info = vsi->info;
5168 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5169
5170 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5171 ctxt.info.valid_sections |=
5172 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5173 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5174 }
5175
5176
5177 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5178 if (ret) {
5179 dev_info(&vsi->back->pdev->dev,
5180 "Update vsi tc config failed, err %s aq_err %s\n",
5181 i40e_stat_str(&vsi->back->hw, ret),
5182 i40e_aq_str(&vsi->back->hw,
5183 vsi->back->hw.aq.asq_last_status));
5184 goto out;
5185 }
5186
5187 i40e_vsi_update_queue_map(vsi, &ctxt);
5188 vsi->info.valid_sections = 0;
5189
5190
5191 ret = i40e_vsi_get_bw_info(vsi);
5192 if (ret) {
5193 dev_info(&vsi->back->pdev->dev,
5194 "Failed updating vsi bw info, err %s aq_err %s\n",
5195 i40e_stat_str(&vsi->back->hw, ret),
5196 i40e_aq_str(&vsi->back->hw,
5197 vsi->back->hw.aq.asq_last_status));
5198 goto out;
5199 }
5200
5201
5202 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5203out:
5204 return ret;
5205}
5206
5207
5208
5209
5210
5211
5212
5213
5214int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
5215{
5216 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
5217 struct i40e_pf *pf = veb->pf;
5218 int ret = 0;
5219 int i;
5220
5221
5222 if (!enabled_tc || veb->enabled_tc == enabled_tc)
5223 return ret;
5224
5225 bw_data.tc_valid_bits = enabled_tc;
5226
5227
5228
5229 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5230 if (enabled_tc & BIT(i))
5231 bw_data.tc_bw_share_credits[i] = 1;
5232 }
5233
5234 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
5235 &bw_data, NULL);
5236 if (ret) {
5237 dev_info(&pf->pdev->dev,
5238 "VEB bw config failed, err %s aq_err %s\n",
5239 i40e_stat_str(&pf->hw, ret),
5240 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5241 goto out;
5242 }
5243
5244
5245 ret = i40e_veb_get_bw_info(veb);
5246 if (ret) {
5247 dev_info(&pf->pdev->dev,
5248 "Failed getting veb bw config, err %s aq_err %s\n",
5249 i40e_stat_str(&pf->hw, ret),
5250 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5251 }
5252
5253out:
5254 return ret;
5255}
5256
5257#ifdef CONFIG_I40E_DCB
5258
5259
5260
5261
5262
5263
5264
5265
5266static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5267{
5268 u8 tc_map = 0;
5269 int ret;
5270 u8 v;
5271
5272
5273 tc_map = i40e_pf_get_tc_map(pf);
5274 for (v = 0; v < I40E_MAX_VEB; v++) {
5275 if (!pf->veb[v])
5276 continue;
5277 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5278 if (ret) {
5279 dev_info(&pf->pdev->dev,
5280 "Failed configuring TC for VEB seid=%d\n",
5281 pf->veb[v]->seid);
5282
5283 }
5284 }
5285
5286
5287 for (v = 0; v < pf->num_alloc_vsi; v++) {
5288 if (!pf->vsi[v])
5289 continue;
5290
5291
5292
5293
5294 if (v == pf->lan_vsi)
5295 tc_map = i40e_pf_get_tc_map(pf);
5296 else
5297 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
5298
5299 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5300 if (ret) {
5301 dev_info(&pf->pdev->dev,
5302 "Failed configuring TC for VSI seid=%d\n",
5303 pf->vsi[v]->seid);
5304
5305 } else {
5306
5307 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5308 if (pf->vsi[v]->netdev)
5309 i40e_dcbnl_set_all(pf->vsi[v]);
5310 }
5311 }
5312}
5313
5314
5315
5316
5317
5318
5319
5320
5321static int i40e_resume_port_tx(struct i40e_pf *pf)
5322{
5323 struct i40e_hw *hw = &pf->hw;
5324 int ret;
5325
5326 ret = i40e_aq_resume_port_tx(hw, NULL);
5327 if (ret) {
5328 dev_info(&pf->pdev->dev,
5329 "Resume Port Tx failed, err %s aq_err %s\n",
5330 i40e_stat_str(&pf->hw, ret),
5331 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5332
5333 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
5334 i40e_service_event_schedule(pf);
5335 }
5336
5337 return ret;
5338}
5339
5340
5341
5342
5343
5344
5345
5346
5347static int i40e_init_pf_dcb(struct i40e_pf *pf)
5348{
5349 struct i40e_hw *hw = &pf->hw;
5350 int err = 0;
5351
5352
5353 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
5354 goto out;
5355
5356
5357 err = i40e_init_dcb(hw);
5358 if (!err) {
5359
5360 if ((!hw->func_caps.dcb) ||
5361 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5362 dev_info(&pf->pdev->dev,
5363 "DCBX offload is not supported or is disabled for this PF.\n");
5364 } else {
5365
5366 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5367 DCB_CAP_DCBX_VER_IEEE;
5368
5369 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5370
5371
5372
5373 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5374 pf->flags |= I40E_FLAG_DCB_ENABLED;
5375 else
5376 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5377 dev_dbg(&pf->pdev->dev,
5378 "DCBX offload is supported for this PF.\n");
5379 }
5380 } else {
5381 dev_info(&pf->pdev->dev,
5382 "Query for DCB configuration failed, err %s aq_err %s\n",
5383 i40e_stat_str(&pf->hw, err),
5384 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5385 }
5386
5387out:
5388 return err;
5389}
5390#endif
5391#define SPEED_SIZE 14
5392#define FC_SIZE 8
5393
5394
5395
5396
5397void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5398{
5399 enum i40e_aq_link_speed new_speed;
5400 char *speed = "Unknown";
5401 char *fc = "Unknown";
5402 char *fec = "";
5403 char *an = "";
5404
5405 new_speed = vsi->back->hw.phy.link_info.link_speed;
5406
5407 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
5408 return;
5409 vsi->current_isup = isup;
5410 vsi->current_speed = new_speed;
5411 if (!isup) {
5412 netdev_info(vsi->netdev, "NIC Link is Down\n");
5413 return;
5414 }
5415
5416
5417
5418
5419 if (vsi->back->hw.func_caps.npar_enable &&
5420 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5421 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5422 netdev_warn(vsi->netdev,
5423 "The partition detected link speed that is less than 10Gbps\n");
5424
5425 switch (vsi->back->hw.phy.link_info.link_speed) {
5426 case I40E_LINK_SPEED_40GB:
5427 speed = "40 G";
5428 break;
5429 case I40E_LINK_SPEED_20GB:
5430 speed = "20 G";
5431 break;
5432 case I40E_LINK_SPEED_25GB:
5433 speed = "25 G";
5434 break;
5435 case I40E_LINK_SPEED_10GB:
5436 speed = "10 G";
5437 break;
5438 case I40E_LINK_SPEED_1GB:
5439 speed = "1000 M";
5440 break;
5441 case I40E_LINK_SPEED_100MB:
5442 speed = "100 M";
5443 break;
5444 default:
5445 break;
5446 }
5447
5448 switch (vsi->back->hw.fc.current_mode) {
5449 case I40E_FC_FULL:
5450 fc = "RX/TX";
5451 break;
5452 case I40E_FC_TX_PAUSE:
5453 fc = "TX";
5454 break;
5455 case I40E_FC_RX_PAUSE:
5456 fc = "RX";
5457 break;
5458 default:
5459 fc = "None";
5460 break;
5461 }
5462
5463 if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
5464 fec = ", FEC: None";
5465 an = ", Autoneg: False";
5466
5467 if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
5468 an = ", Autoneg: True";
5469
5470 if (vsi->back->hw.phy.link_info.fec_info &
5471 I40E_AQ_CONFIG_FEC_KR_ENA)
5472 fec = ", FEC: CL74 FC-FEC/BASE-R";
5473 else if (vsi->back->hw.phy.link_info.fec_info &
5474 I40E_AQ_CONFIG_FEC_RS_ENA)
5475 fec = ", FEC: CL108 RS-FEC";
5476 }
5477
5478 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n",
5479 speed, fec, an, fc);
5480}
5481
5482
5483
5484
5485
5486static int i40e_up_complete(struct i40e_vsi *vsi)
5487{
5488 struct i40e_pf *pf = vsi->back;
5489 int err;
5490
5491 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5492 i40e_vsi_configure_msix(vsi);
5493 else
5494 i40e_configure_msi_and_legacy(vsi);
5495
5496
5497 err = i40e_vsi_start_rings(vsi);
5498 if (err)
5499 return err;
5500
5501 clear_bit(__I40E_VSI_DOWN, vsi->state);
5502 i40e_napi_enable_all(vsi);
5503 i40e_vsi_enable_irq(vsi);
5504
5505 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5506 (vsi->netdev)) {
5507 i40e_print_link_message(vsi, true);
5508 netif_tx_start_all_queues(vsi->netdev);
5509 netif_carrier_on(vsi->netdev);
5510 } else if (vsi->netdev) {
5511 i40e_print_link_message(vsi, false);
5512
5513 if ((pf->hw.phy.link_info.link_info &
5514 I40E_AQ_MEDIA_AVAILABLE) &&
5515 (!(pf->hw.phy.link_info.an_info &
5516 I40E_AQ_QUALIFIED_MODULE)))
5517 netdev_err(vsi->netdev,
5518 "the driver failed to link because an unqualified module was detected.");
5519 }
5520
5521
5522 if (vsi->type == I40E_VSI_FDIR) {
5523
5524 pf->fd_add_err = 0;
5525 pf->fd_atr_cnt = 0;
5526 i40e_fdir_filter_restore(vsi);
5527 }
5528
5529
5530
5531
5532 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5533 i40e_service_event_schedule(pf);
5534
5535 return 0;
5536}
5537
5538
5539
5540
5541
5542
5543
5544
5545static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5546{
5547 struct i40e_pf *pf = vsi->back;
5548
5549 WARN_ON(in_interrupt());
5550 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
5551 usleep_range(1000, 2000);
5552 i40e_down(vsi);
5553
5554 i40e_up(vsi);
5555 clear_bit(__I40E_CONFIG_BUSY, pf->state);
5556}
5557
5558
5559
5560
5561
5562int i40e_up(struct i40e_vsi *vsi)
5563{
5564 int err;
5565
5566 err = i40e_vsi_configure(vsi);
5567 if (!err)
5568 err = i40e_up_complete(vsi);
5569
5570 return err;
5571}
5572
5573
5574
5575
5576
5577void i40e_down(struct i40e_vsi *vsi)
5578{
5579 int i;
5580
5581
5582
5583
5584 if (vsi->netdev) {
5585 netif_carrier_off(vsi->netdev);
5586 netif_tx_disable(vsi->netdev);
5587 }
5588 i40e_vsi_disable_irq(vsi);
5589 i40e_vsi_stop_rings(vsi);
5590 i40e_napi_disable_all(vsi);
5591
5592 for (i = 0; i < vsi->num_queue_pairs; i++) {
5593 i40e_clean_tx_ring(vsi->tx_rings[i]);
5594 if (i40e_enabled_xdp_vsi(vsi))
5595 i40e_clean_tx_ring(vsi->xdp_rings[i]);
5596 i40e_clean_rx_ring(vsi->rx_rings[i]);
5597 }
5598
5599}
5600
5601
5602
5603
5604
5605
5606static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5607{
5608 struct i40e_netdev_priv *np = netdev_priv(netdev);
5609 struct i40e_vsi *vsi = np->vsi;
5610 struct i40e_pf *pf = vsi->back;
5611 u8 enabled_tc = 0;
5612 int ret = -EINVAL;
5613 int i;
5614
5615
5616 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5617 netdev_info(netdev, "DCB is not enabled for adapter\n");
5618 goto exit;
5619 }
5620
5621
5622 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5623 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5624 goto exit;
5625 }
5626
5627
5628 if (tc > i40e_pf_get_num_tc(pf)) {
5629 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5630 goto exit;
5631 }
5632
5633
5634 for (i = 0; i < tc; i++)
5635 enabled_tc |= BIT(i);
5636
5637
5638 if (enabled_tc == vsi->tc_config.enabled_tc)
5639 return 0;
5640
5641
5642 i40e_quiesce_vsi(vsi);
5643
5644
5645 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5646 if (ret) {
5647 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5648 vsi->seid);
5649 goto exit;
5650 }
5651
5652
5653 i40e_unquiesce_vsi(vsi);
5654
5655exit:
5656 return ret;
5657}
5658
5659static int __i40e_setup_tc(struct net_device *netdev, u32 handle,
5660 u32 chain_index, __be16 proto,
5661 struct tc_to_netdev *tc)
5662{
5663 if (tc->type != TC_SETUP_MQPRIO)
5664 return -EINVAL;
5665
5666 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
5667
5668 return i40e_setup_tc(netdev, tc->mqprio->num_tc);
5669}
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683int i40e_open(struct net_device *netdev)
5684{
5685 struct i40e_netdev_priv *np = netdev_priv(netdev);
5686 struct i40e_vsi *vsi = np->vsi;
5687 struct i40e_pf *pf = vsi->back;
5688 int err;
5689
5690
5691 if (test_bit(__I40E_TESTING, pf->state) ||
5692 test_bit(__I40E_BAD_EEPROM, pf->state))
5693 return -EBUSY;
5694
5695 netif_carrier_off(netdev);
5696
5697 err = i40e_vsi_open(vsi);
5698 if (err)
5699 return err;
5700
5701
5702 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5703 TCP_FLAG_FIN) >> 16);
5704 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5705 TCP_FLAG_FIN |
5706 TCP_FLAG_CWR) >> 16);
5707 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5708
5709 udp_tunnel_get_rx_info(netdev);
5710
5711 return 0;
5712}
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724int i40e_vsi_open(struct i40e_vsi *vsi)
5725{
5726 struct i40e_pf *pf = vsi->back;
5727 char int_name[I40E_INT_NAME_STR_LEN];
5728 int err;
5729
5730
5731 err = i40e_vsi_setup_tx_resources(vsi);
5732 if (err)
5733 goto err_setup_tx;
5734 err = i40e_vsi_setup_rx_resources(vsi);
5735 if (err)
5736 goto err_setup_rx;
5737
5738 err = i40e_vsi_configure(vsi);
5739 if (err)
5740 goto err_setup_rx;
5741
5742 if (vsi->netdev) {
5743 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5744 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5745 err = i40e_vsi_request_irq(vsi, int_name);
5746 if (err)
5747 goto err_setup_rx;
5748
5749
5750 err = netif_set_real_num_tx_queues(vsi->netdev,
5751 vsi->num_queue_pairs);
5752 if (err)
5753 goto err_set_queues;
5754
5755 err = netif_set_real_num_rx_queues(vsi->netdev,
5756 vsi->num_queue_pairs);
5757 if (err)
5758 goto err_set_queues;
5759
5760 } else if (vsi->type == I40E_VSI_FDIR) {
5761 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5762 dev_driver_string(&pf->pdev->dev),
5763 dev_name(&pf->pdev->dev));
5764 err = i40e_vsi_request_irq(vsi, int_name);
5765
5766 } else {
5767 err = -EINVAL;
5768 goto err_setup_rx;
5769 }
5770
5771 err = i40e_up_complete(vsi);
5772 if (err)
5773 goto err_up_complete;
5774
5775 return 0;
5776
5777err_up_complete:
5778 i40e_down(vsi);
5779err_set_queues:
5780 i40e_vsi_free_irq(vsi);
5781err_setup_rx:
5782 i40e_vsi_free_rx_resources(vsi);
5783err_setup_tx:
5784 i40e_vsi_free_tx_resources(vsi);
5785 if (vsi == pf->vsi[pf->lan_vsi])
5786 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
5787
5788 return err;
5789}
5790
5791
5792
5793
5794
5795
5796
5797
5798static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5799{
5800 struct i40e_fdir_filter *filter;
5801 struct i40e_flex_pit *pit_entry, *tmp;
5802 struct hlist_node *node2;
5803
5804 hlist_for_each_entry_safe(filter, node2,
5805 &pf->fdir_filter_list, fdir_node) {
5806 hlist_del(&filter->fdir_node);
5807 kfree(filter);
5808 }
5809
5810 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
5811 list_del(&pit_entry->list);
5812 kfree(pit_entry);
5813 }
5814 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
5815
5816 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
5817 list_del(&pit_entry->list);
5818 kfree(pit_entry);
5819 }
5820 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
5821
5822 pf->fdir_pf_active_filters = 0;
5823 pf->fd_tcp4_filter_cnt = 0;
5824 pf->fd_udp4_filter_cnt = 0;
5825 pf->fd_sctp4_filter_cnt = 0;
5826 pf->fd_ip4_filter_cnt = 0;
5827
5828
5829 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5830 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5831 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5832
5833
5834 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5835 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5836 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5837
5838
5839 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5840 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5841 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5842
5843
5844 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5845 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
5846}
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858int i40e_close(struct net_device *netdev)
5859{
5860 struct i40e_netdev_priv *np = netdev_priv(netdev);
5861 struct i40e_vsi *vsi = np->vsi;
5862
5863 i40e_vsi_close(vsi);
5864
5865 return 0;
5866}
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
5880{
5881 u32 val;
5882
5883 WARN_ON(in_interrupt());
5884
5885
5886
5887 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5888
5889
5890
5891
5892
5893
5894
5895
5896
5897 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5898 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5899 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5900 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5901
5902 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5903
5904
5905
5906
5907
5908 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5909 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5910 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5911 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5912 i40e_flush(&pf->hw);
5913
5914 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5925 i40e_handle_reset_warning(pf, lock_acquired);
5926
5927 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5928 int v;
5929
5930
5931 dev_info(&pf->pdev->dev,
5932 "VSI reinit requested\n");
5933 for (v = 0; v < pf->num_alloc_vsi; v++) {
5934 struct i40e_vsi *vsi = pf->vsi[v];
5935
5936 if (vsi != NULL &&
5937 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
5938 vsi->state))
5939 i40e_vsi_reinit_locked(pf->vsi[v]);
5940 }
5941 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5942 int v;
5943
5944
5945 dev_info(&pf->pdev->dev, "VSI down requested\n");
5946 for (v = 0; v < pf->num_alloc_vsi; v++) {
5947 struct i40e_vsi *vsi = pf->vsi[v];
5948
5949 if (vsi != NULL &&
5950 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
5951 vsi->state)) {
5952 set_bit(__I40E_VSI_DOWN, vsi->state);
5953 i40e_down(vsi);
5954 }
5955 }
5956 } else {
5957 dev_info(&pf->pdev->dev,
5958 "bad reset request 0x%08x\n", reset_flags);
5959 }
5960}
5961
5962#ifdef CONFIG_I40E_DCB
5963
5964
5965
5966
5967
5968
5969bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5970 struct i40e_dcbx_config *old_cfg,
5971 struct i40e_dcbx_config *new_cfg)
5972{
5973 bool need_reconfig = false;
5974
5975
5976 if (memcmp(&new_cfg->etscfg,
5977 &old_cfg->etscfg,
5978 sizeof(new_cfg->etscfg))) {
5979
5980 if (memcmp(&new_cfg->etscfg.prioritytable,
5981 &old_cfg->etscfg.prioritytable,
5982 sizeof(new_cfg->etscfg.prioritytable))) {
5983 need_reconfig = true;
5984 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5985 }
5986
5987 if (memcmp(&new_cfg->etscfg.tcbwtable,
5988 &old_cfg->etscfg.tcbwtable,
5989 sizeof(new_cfg->etscfg.tcbwtable)))
5990 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5991
5992 if (memcmp(&new_cfg->etscfg.tsatable,
5993 &old_cfg->etscfg.tsatable,
5994 sizeof(new_cfg->etscfg.tsatable)))
5995 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5996 }
5997
5998
5999 if (memcmp(&new_cfg->pfc,
6000 &old_cfg->pfc,
6001 sizeof(new_cfg->pfc))) {
6002 need_reconfig = true;
6003 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
6004 }
6005
6006
6007 if (memcmp(&new_cfg->app,
6008 &old_cfg->app,
6009 sizeof(new_cfg->app))) {
6010 need_reconfig = true;
6011 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
6012 }
6013
6014 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
6015 return need_reconfig;
6016}
6017
6018
6019
6020
6021
6022
6023static int i40e_handle_lldp_event(struct i40e_pf *pf,
6024 struct i40e_arq_event_info *e)
6025{
6026 struct i40e_aqc_lldp_get_mib *mib =
6027 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
6028 struct i40e_hw *hw = &pf->hw;
6029 struct i40e_dcbx_config tmp_dcbx_cfg;
6030 bool need_reconfig = false;
6031 int ret = 0;
6032 u8 type;
6033
6034
6035 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
6036 return ret;
6037
6038
6039 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
6040 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
6041 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
6042 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
6043 return ret;
6044
6045
6046 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
6047 dev_dbg(&pf->pdev->dev,
6048 "LLDP event mib type %s\n", type ? "remote" : "local");
6049 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
6050
6051 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
6052 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
6053 &hw->remote_dcbx_config);
6054 goto exit;
6055 }
6056
6057
6058 tmp_dcbx_cfg = hw->local_dcbx_config;
6059
6060
6061 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
6062
6063 ret = i40e_get_dcb_config(&pf->hw);
6064 if (ret) {
6065 dev_info(&pf->pdev->dev,
6066 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
6067 i40e_stat_str(&pf->hw, ret),
6068 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6069 goto exit;
6070 }
6071
6072
6073 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
6074 sizeof(tmp_dcbx_cfg))) {
6075 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
6076 goto exit;
6077 }
6078
6079 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
6080 &hw->local_dcbx_config);
6081
6082 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
6083
6084 if (!need_reconfig)
6085 goto exit;
6086
6087
6088 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6089 pf->flags |= I40E_FLAG_DCB_ENABLED;
6090 else
6091 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6092
6093 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6094
6095 i40e_pf_quiesce_all_vsi(pf);
6096
6097
6098 i40e_dcb_reconfigure(pf);
6099
6100 ret = i40e_resume_port_tx(pf);
6101
6102 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6103
6104 if (ret)
6105 goto exit;
6106
6107
6108 ret = i40e_pf_wait_queues_disabled(pf);
6109 if (ret) {
6110
6111 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6112 i40e_service_event_schedule(pf);
6113 } else {
6114 i40e_pf_unquiesce_all_vsi(pf);
6115 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
6116 I40E_FLAG_CLIENT_L2_CHANGE);
6117 }
6118
6119exit:
6120 return ret;
6121}
6122#endif
6123
6124
6125
6126
6127
6128
6129
6130void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
6131{
6132 rtnl_lock();
6133 i40e_do_reset(pf, reset_flags, true);
6134 rtnl_unlock();
6135}
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
6146 struct i40e_arq_event_info *e)
6147{
6148 struct i40e_aqc_lan_overflow *data =
6149 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
6150 u32 queue = le32_to_cpu(data->prtdcb_rupto);
6151 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
6152 struct i40e_hw *hw = &pf->hw;
6153 struct i40e_vf *vf;
6154 u16 vf_id;
6155
6156 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
6157 queue, qtx_ctl);
6158
6159
6160 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
6161 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
6162 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
6163 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
6164 vf_id -= hw->func_caps.vf_base_id;
6165 vf = &pf->vf[vf_id];
6166 i40e_vc_notify_vf_reset(vf);
6167
6168 msleep(20);
6169 i40e_reset_vf(vf, false);
6170 }
6171}
6172
6173
6174
6175
6176
6177u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
6178{
6179 u32 val, fcnt_prog;
6180
6181 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6182 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
6183 return fcnt_prog;
6184}
6185
6186
6187
6188
6189
6190u32 i40e_get_current_fd_count(struct i40e_pf *pf)
6191{
6192 u32 val, fcnt_prog;
6193
6194 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6195 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
6196 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
6197 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
6198 return fcnt_prog;
6199}
6200
6201
6202
6203
6204
6205u32 i40e_get_global_fd_count(struct i40e_pf *pf)
6206{
6207 u32 val, fcnt_prog;
6208
6209 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
6210 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
6211 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
6212 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
6213 return fcnt_prog;
6214}
6215
6216
6217
6218
6219
6220void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6221{
6222 struct i40e_fdir_filter *filter;
6223 u32 fcnt_prog, fcnt_avail;
6224 struct hlist_node *node;
6225
6226 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6227 return;
6228
6229
6230 fcnt_prog = i40e_get_global_fd_count(pf);
6231 fcnt_avail = pf->fdir_pf_filter_count;
6232 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
6233 (pf->fd_add_err == 0) ||
6234 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
6235 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
6236 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
6237 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
6238 (I40E_DEBUG_FD & pf->hw.debug_mask))
6239 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
6240 }
6241 }
6242
6243
6244
6245
6246
6247 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
6248 (pf->fd_tcp4_filter_cnt == 0)) {
6249 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
6250 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6251 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
6252 (I40E_DEBUG_FD & pf->hw.debug_mask))
6253 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
6254 }
6255 }
6256
6257
6258 if (pf->fd_inv > 0) {
6259 hlist_for_each_entry_safe(filter, node,
6260 &pf->fdir_filter_list, fdir_node) {
6261 if (filter->fd_id == pf->fd_inv) {
6262 hlist_del(&filter->fdir_node);
6263 kfree(filter);
6264 pf->fdir_pf_active_filters--;
6265 }
6266 }
6267 }
6268}
6269
6270#define I40E_MIN_FD_FLUSH_INTERVAL 10
6271#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
6272
6273
6274
6275
6276static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6277{
6278 unsigned long min_flush_time;
6279 int flush_wait_retry = 50;
6280 bool disable_atr = false;
6281 int fd_room;
6282 int reg;
6283
6284 if (!time_after(jiffies, pf->fd_flush_timestamp +
6285 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
6286 return;
6287
6288
6289
6290
6291 min_flush_time = pf->fd_flush_timestamp +
6292 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
6293 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
6294
6295 if (!(time_after(jiffies, min_flush_time)) &&
6296 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
6297 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6298 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
6299 disable_atr = true;
6300 }
6301
6302 pf->fd_flush_timestamp = jiffies;
6303 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
6304
6305 wr32(&pf->hw, I40E_PFQF_CTL_1,
6306 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6307 i40e_flush(&pf->hw);
6308 pf->fd_flush_cnt++;
6309 pf->fd_add_err = 0;
6310 do {
6311
6312 usleep_range(5000, 6000);
6313 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6314 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6315 break;
6316 } while (flush_wait_retry--);
6317 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6318 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6319 } else {
6320
6321 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6322 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
6323 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6324 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
6325 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6326 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6327 }
6328}
6329
6330
6331
6332
6333
6334u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6335{
6336 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6337}
6338
6339
6340
6341
6342
6343
6344#define I40E_MAX_FD_PROGRAM_ERROR 256
6345
6346
6347
6348
6349
6350static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6351{
6352
6353
6354 if (test_bit(__I40E_DOWN, pf->state))
6355 return;
6356
6357 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6358 i40e_fdir_flush_and_replay(pf);
6359
6360 i40e_fdir_check_and_reenable(pf);
6361
6362}
6363
6364
6365
6366
6367
6368
6369static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6370{
6371 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
6372 return;
6373
6374 switch (vsi->type) {
6375 case I40E_VSI_MAIN:
6376 if (!vsi->netdev || !vsi->netdev_registered)
6377 break;
6378
6379 if (link_up) {
6380 netif_carrier_on(vsi->netdev);
6381 netif_tx_wake_all_queues(vsi->netdev);
6382 } else {
6383 netif_carrier_off(vsi->netdev);
6384 netif_tx_stop_all_queues(vsi->netdev);
6385 }
6386 break;
6387
6388 case I40E_VSI_SRIOV:
6389 case I40E_VSI_VMDQ2:
6390 case I40E_VSI_CTRL:
6391 case I40E_VSI_IWARP:
6392 case I40E_VSI_MIRROR:
6393 default:
6394
6395 break;
6396 }
6397}
6398
6399
6400
6401
6402
6403
6404static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6405{
6406 struct i40e_pf *pf;
6407 int i;
6408
6409 if (!veb || !veb->pf)
6410 return;
6411 pf = veb->pf;
6412
6413
6414 for (i = 0; i < I40E_MAX_VEB; i++)
6415 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6416 i40e_veb_link_event(pf->veb[i], link_up);
6417
6418
6419 for (i = 0; i < pf->num_alloc_vsi; i++)
6420 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6421 i40e_vsi_link_event(pf->vsi[i], link_up);
6422}
6423
6424
6425
6426
6427
6428static void i40e_link_event(struct i40e_pf *pf)
6429{
6430 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6431 u8 new_link_speed, old_link_speed;
6432 i40e_status status;
6433 bool new_link, old_link;
6434
6435
6436 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6437
6438
6439 pf->hw.phy.get_link_info = true;
6440
6441 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6442
6443 status = i40e_get_link_status(&pf->hw, &new_link);
6444
6445
6446 if (status == I40E_SUCCESS) {
6447 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
6448 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
6449 } else {
6450
6451
6452
6453 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
6454 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6455 status);
6456 return;
6457 }
6458
6459 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6460 new_link_speed = pf->hw.phy.link_info.link_speed;
6461
6462 if (new_link == old_link &&
6463 new_link_speed == old_link_speed &&
6464 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
6465 new_link == netif_carrier_ok(vsi->netdev)))
6466 return;
6467
6468 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
6469 i40e_print_link_message(vsi, new_link);
6470
6471
6472
6473
6474 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6475 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6476 else
6477 i40e_vsi_link_event(vsi, new_link);
6478
6479 if (pf->vf)
6480 i40e_vc_notify_link_state(pf);
6481
6482 if (pf->flags & I40E_FLAG_PTP)
6483 i40e_ptp_set_increment(pf);
6484}
6485
6486
6487
6488
6489
6490static void i40e_watchdog_subtask(struct i40e_pf *pf)
6491{
6492 int i;
6493
6494
6495 if (test_bit(__I40E_DOWN, pf->state) ||
6496 test_bit(__I40E_CONFIG_BUSY, pf->state))
6497 return;
6498
6499
6500 if (time_before(jiffies, (pf->service_timer_previous +
6501 pf->service_timer_period)))
6502 return;
6503 pf->service_timer_previous = jiffies;
6504
6505 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
6506 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
6507 i40e_link_event(pf);
6508
6509
6510
6511
6512 for (i = 0; i < pf->num_alloc_vsi; i++)
6513 if (pf->vsi[i] && pf->vsi[i]->netdev)
6514 i40e_update_stats(pf->vsi[i]);
6515
6516 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6517
6518 for (i = 0; i < I40E_MAX_VEB; i++)
6519 if (pf->veb[i])
6520 i40e_update_veb_stats(pf->veb[i]);
6521 }
6522
6523 i40e_ptp_rx_hang(pf);
6524 i40e_ptp_tx_hang(pf);
6525}
6526
6527
6528
6529
6530
6531static void i40e_reset_subtask(struct i40e_pf *pf)
6532{
6533 u32 reset_flags = 0;
6534
6535 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
6536 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6537 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
6538 }
6539 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
6540 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6541 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6542 }
6543 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
6544 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6545 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
6546 }
6547 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
6548 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6549 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
6550 }
6551 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6552 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6553 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
6554 }
6555
6556
6557
6558
6559 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
6560 i40e_prep_for_reset(pf, false);
6561 i40e_reset(pf);
6562 i40e_rebuild(pf, false, false);
6563 }
6564
6565
6566 if (reset_flags &&
6567 !test_bit(__I40E_DOWN, pf->state) &&
6568 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
6569 i40e_do_reset(pf, reset_flags, false);
6570 }
6571}
6572
6573
6574
6575
6576
6577
6578static void i40e_handle_link_event(struct i40e_pf *pf,
6579 struct i40e_arq_event_info *e)
6580{
6581 struct i40e_aqc_get_link_status *status =
6582 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6583
6584
6585
6586
6587
6588
6589
6590 i40e_link_event(pf);
6591
6592
6593 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6594 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6595 (!(status->link_info & I40E_AQ_LINK_UP)))
6596 dev_err(&pf->pdev->dev,
6597 "The driver failed to link because an unqualified module was detected.\n");
6598}
6599
6600
6601
6602
6603
6604static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6605{
6606 struct i40e_arq_event_info event;
6607 struct i40e_hw *hw = &pf->hw;
6608 u16 pending, i = 0;
6609 i40e_status ret;
6610 u16 opcode;
6611 u32 oldval;
6612 u32 val;
6613
6614
6615 if (test_bit(__I40E_RESET_FAILED, pf->state))
6616 return;
6617
6618
6619 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6620 oldval = val;
6621 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6622 if (hw->debug_mask & I40E_DEBUG_AQ)
6623 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6624 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6625 }
6626 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6627 if (hw->debug_mask & I40E_DEBUG_AQ)
6628 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6629 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6630 pf->arq_overflows++;
6631 }
6632 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6633 if (hw->debug_mask & I40E_DEBUG_AQ)
6634 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6635 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6636 }
6637 if (oldval != val)
6638 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6639
6640 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6641 oldval = val;
6642 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6643 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6644 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6645 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6646 }
6647 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6648 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6649 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6650 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6651 }
6652 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6653 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6654 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6655 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6656 }
6657 if (oldval != val)
6658 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6659
6660 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6661 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6662 if (!event.msg_buf)
6663 return;
6664
6665 do {
6666 ret = i40e_clean_arq_element(hw, &event, &pending);
6667 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6668 break;
6669 else if (ret) {
6670 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6671 break;
6672 }
6673
6674 opcode = le16_to_cpu(event.desc.opcode);
6675 switch (opcode) {
6676
6677 case i40e_aqc_opc_get_link_status:
6678 i40e_handle_link_event(pf, &event);
6679 break;
6680 case i40e_aqc_opc_send_msg_to_pf:
6681 ret = i40e_vc_process_vf_msg(pf,
6682 le16_to_cpu(event.desc.retval),
6683 le32_to_cpu(event.desc.cookie_high),
6684 le32_to_cpu(event.desc.cookie_low),
6685 event.msg_buf,
6686 event.msg_len);
6687 break;
6688 case i40e_aqc_opc_lldp_update_mib:
6689 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6690#ifdef CONFIG_I40E_DCB
6691 rtnl_lock();
6692 ret = i40e_handle_lldp_event(pf, &event);
6693 rtnl_unlock();
6694#endif
6695 break;
6696 case i40e_aqc_opc_event_lan_overflow:
6697 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6698 i40e_handle_lan_overflow_event(pf, &event);
6699 break;
6700 case i40e_aqc_opc_send_msg_to_peer:
6701 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6702 break;
6703 case i40e_aqc_opc_nvm_erase:
6704 case i40e_aqc_opc_nvm_update:
6705 case i40e_aqc_opc_oem_post_update:
6706 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6707 "ARQ NVM operation 0x%04x completed\n",
6708 opcode);
6709 break;
6710 default:
6711 dev_info(&pf->pdev->dev,
6712 "ARQ: Unknown event 0x%04x ignored\n",
6713 opcode);
6714 break;
6715 }
6716 } while (i++ < pf->adminq_work_limit);
6717
6718 if (i < pf->adminq_work_limit)
6719 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
6720
6721
6722 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6723 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6724 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6725 i40e_flush(hw);
6726
6727 kfree(event.msg_buf);
6728}
6729
6730
6731
6732
6733
6734static void i40e_verify_eeprom(struct i40e_pf *pf)
6735{
6736 int err;
6737
6738 err = i40e_diag_eeprom_test(&pf->hw);
6739 if (err) {
6740
6741 err = i40e_diag_eeprom_test(&pf->hw);
6742 if (err) {
6743 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6744 err);
6745 set_bit(__I40E_BAD_EEPROM, pf->state);
6746 }
6747 }
6748
6749 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
6750 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6751 clear_bit(__I40E_BAD_EEPROM, pf->state);
6752 }
6753}
6754
6755
6756
6757
6758
6759
6760
6761static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6762{
6763 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6764 struct i40e_vsi_context ctxt;
6765 int ret;
6766
6767 ctxt.seid = pf->main_vsi_seid;
6768 ctxt.pf_num = pf->hw.pf_id;
6769 ctxt.vf_num = 0;
6770 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6771 if (ret) {
6772 dev_info(&pf->pdev->dev,
6773 "couldn't get PF vsi config, err %s aq_err %s\n",
6774 i40e_stat_str(&pf->hw, ret),
6775 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6776 return;
6777 }
6778 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6779 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6780 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6781
6782 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6783 if (ret) {
6784 dev_info(&pf->pdev->dev,
6785 "update vsi switch failed, err %s aq_err %s\n",
6786 i40e_stat_str(&pf->hw, ret),
6787 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6788 }
6789}
6790
6791
6792
6793
6794
6795
6796
6797static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6798{
6799 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6800 struct i40e_vsi_context ctxt;
6801 int ret;
6802
6803 ctxt.seid = pf->main_vsi_seid;
6804 ctxt.pf_num = pf->hw.pf_id;
6805 ctxt.vf_num = 0;
6806 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6807 if (ret) {
6808 dev_info(&pf->pdev->dev,
6809 "couldn't get PF vsi config, err %s aq_err %s\n",
6810 i40e_stat_str(&pf->hw, ret),
6811 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6812 return;
6813 }
6814 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6815 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6816 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6817
6818 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6819 if (ret) {
6820 dev_info(&pf->pdev->dev,
6821 "update vsi switch failed, err %s aq_err %s\n",
6822 i40e_stat_str(&pf->hw, ret),
6823 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6824 }
6825}
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835static void i40e_config_bridge_mode(struct i40e_veb *veb)
6836{
6837 struct i40e_pf *pf = veb->pf;
6838
6839 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6840 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6841 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6842 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6843 i40e_disable_pf_switch_lb(pf);
6844 else
6845 i40e_enable_pf_switch_lb(pf);
6846}
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857static int i40e_reconstitute_veb(struct i40e_veb *veb)
6858{
6859 struct i40e_vsi *ctl_vsi = NULL;
6860 struct i40e_pf *pf = veb->pf;
6861 int v, veb_idx;
6862 int ret;
6863
6864
6865 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6866 if (pf->vsi[v] &&
6867 pf->vsi[v]->veb_idx == veb->idx &&
6868 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6869 ctl_vsi = pf->vsi[v];
6870 break;
6871 }
6872 }
6873 if (!ctl_vsi) {
6874 dev_info(&pf->pdev->dev,
6875 "missing owner VSI for veb_idx %d\n", veb->idx);
6876 ret = -ENOENT;
6877 goto end_reconstitute;
6878 }
6879 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6880 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6881 ret = i40e_add_vsi(ctl_vsi);
6882 if (ret) {
6883 dev_info(&pf->pdev->dev,
6884 "rebuild of veb_idx %d owner VSI failed: %d\n",
6885 veb->idx, ret);
6886 goto end_reconstitute;
6887 }
6888 i40e_vsi_reset_stats(ctl_vsi);
6889
6890
6891 ret = i40e_add_veb(veb, ctl_vsi);
6892 if (ret)
6893 goto end_reconstitute;
6894
6895 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6896 veb->bridge_mode = BRIDGE_MODE_VEB;
6897 else
6898 veb->bridge_mode = BRIDGE_MODE_VEPA;
6899 i40e_config_bridge_mode(veb);
6900
6901
6902 for (v = 0; v < pf->num_alloc_vsi; v++) {
6903 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6904 continue;
6905
6906 if (pf->vsi[v]->veb_idx == veb->idx) {
6907 struct i40e_vsi *vsi = pf->vsi[v];
6908
6909 vsi->uplink_seid = veb->seid;
6910 ret = i40e_add_vsi(vsi);
6911 if (ret) {
6912 dev_info(&pf->pdev->dev,
6913 "rebuild of vsi_idx %d failed: %d\n",
6914 v, ret);
6915 goto end_reconstitute;
6916 }
6917 i40e_vsi_reset_stats(vsi);
6918 }
6919 }
6920
6921
6922 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6923 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6924 pf->veb[veb_idx]->uplink_seid = veb->seid;
6925 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6926 if (ret)
6927 break;
6928 }
6929 }
6930
6931end_reconstitute:
6932 return ret;
6933}
6934
6935
6936
6937
6938
6939static int i40e_get_capabilities(struct i40e_pf *pf)
6940{
6941 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6942 u16 data_size;
6943 int buf_len;
6944 int err;
6945
6946 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6947 do {
6948 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6949 if (!cap_buf)
6950 return -ENOMEM;
6951
6952
6953 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6954 &data_size,
6955 i40e_aqc_opc_list_func_capabilities,
6956 NULL);
6957
6958 kfree(cap_buf);
6959
6960 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6961
6962 buf_len = data_size;
6963 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6964 dev_info(&pf->pdev->dev,
6965 "capability discovery failed, err %s aq_err %s\n",
6966 i40e_stat_str(&pf->hw, err),
6967 i40e_aq_str(&pf->hw,
6968 pf->hw.aq.asq_last_status));
6969 return -ENODEV;
6970 }
6971 } while (err);
6972
6973 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6974 dev_info(&pf->pdev->dev,
6975 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6976 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6977 pf->hw.func_caps.num_msix_vectors,
6978 pf->hw.func_caps.num_msix_vectors_vf,
6979 pf->hw.func_caps.fd_filters_guaranteed,
6980 pf->hw.func_caps.fd_filters_best_effort,
6981 pf->hw.func_caps.num_tx_qp,
6982 pf->hw.func_caps.num_vsis);
6983
6984#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6985 + pf->hw.func_caps.num_vfs)
6986 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6987 dev_info(&pf->pdev->dev,
6988 "got num_vsis %d, setting num_vsis to %d\n",
6989 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6990 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6991 }
6992
6993 return 0;
6994}
6995
6996static int i40e_vsi_clear(struct i40e_vsi *vsi);
6997
6998
6999
7000
7001
7002static void i40e_fdir_sb_setup(struct i40e_pf *pf)
7003{
7004 struct i40e_vsi *vsi;
7005
7006
7007
7008
7009 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
7010 static const u32 hkey[] = {
7011 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
7012 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
7013 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
7014 0x95b3a76d};
7015 int i;
7016
7017 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
7018 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
7019 }
7020
7021 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7022 return;
7023
7024
7025 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7026
7027
7028 if (!vsi) {
7029 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
7030 pf->vsi[pf->lan_vsi]->seid, 0);
7031 if (!vsi) {
7032 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
7033 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7034 return;
7035 }
7036 }
7037
7038 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
7039}
7040
7041
7042
7043
7044
7045static void i40e_fdir_teardown(struct i40e_pf *pf)
7046{
7047 struct i40e_vsi *vsi;
7048
7049 i40e_fdir_filter_exit(pf);
7050 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7051 if (vsi)
7052 i40e_vsi_release(vsi);
7053}
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
7064{
7065 struct i40e_hw *hw = &pf->hw;
7066 i40e_status ret = 0;
7067 u32 v;
7068
7069 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
7070 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7071 return;
7072 if (i40e_check_asq_alive(&pf->hw))
7073 i40e_vc_notify_reset(pf);
7074
7075 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
7076
7077
7078
7079 if (!lock_acquired)
7080 rtnl_lock();
7081 i40e_pf_quiesce_all_vsi(pf);
7082 if (!lock_acquired)
7083 rtnl_unlock();
7084
7085 for (v = 0; v < pf->num_alloc_vsi; v++) {
7086 if (pf->vsi[v])
7087 pf->vsi[v]->seid = 0;
7088 }
7089
7090 i40e_shutdown_adminq(&pf->hw);
7091
7092
7093 if (hw->hmc.hmc_obj) {
7094 ret = i40e_shutdown_lan_hmc(hw);
7095 if (ret)
7096 dev_warn(&pf->pdev->dev,
7097 "shutdown_lan_hmc failed: %d\n", ret);
7098 }
7099}
7100
7101
7102
7103
7104
7105static void i40e_send_version(struct i40e_pf *pf)
7106{
7107 struct i40e_driver_version dv;
7108
7109 dv.major_version = DRV_VERSION_MAJOR;
7110 dv.minor_version = DRV_VERSION_MINOR;
7111 dv.build_version = DRV_VERSION_BUILD;
7112 dv.subbuild_version = 0;
7113 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
7114 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7115}
7116
7117
7118
7119
7120
7121static void i40e_get_oem_version(struct i40e_hw *hw)
7122{
7123 u16 block_offset = 0xffff;
7124 u16 block_length = 0;
7125 u16 capabilities = 0;
7126 u16 gen_snap = 0;
7127 u16 release = 0;
7128
7129#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
7130#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
7131#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
7132#define I40E_NVM_OEM_GEN_OFFSET 0x02
7133#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
7134#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
7135#define I40E_NVM_OEM_LENGTH 3
7136
7137
7138 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
7139 if (block_offset == 0xffff)
7140 return;
7141
7142
7143 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
7144 &block_length);
7145 if (block_length < I40E_NVM_OEM_LENGTH)
7146 return;
7147
7148
7149 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
7150 &capabilities);
7151 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
7152 return;
7153
7154 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
7155 &gen_snap);
7156 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
7157 &release);
7158 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
7159 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
7160}
7161
7162
7163
7164
7165
7166static int i40e_reset(struct i40e_pf *pf)
7167{
7168 struct i40e_hw *hw = &pf->hw;
7169 i40e_status ret;
7170
7171 ret = i40e_pf_reset(hw);
7172 if (ret) {
7173 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
7174 set_bit(__I40E_RESET_FAILED, pf->state);
7175 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7176 } else {
7177 pf->pfr_count++;
7178 }
7179 return ret;
7180}
7181
7182
7183
7184
7185
7186
7187
7188
7189static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7190{
7191 struct i40e_hw *hw = &pf->hw;
7192 u8 set_fc_aq_fail = 0;
7193 i40e_status ret;
7194 u32 val;
7195 int v;
7196
7197 if (test_bit(__I40E_DOWN, pf->state))
7198 goto clear_recovery;
7199 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
7200
7201
7202 ret = i40e_init_adminq(&pf->hw);
7203 if (ret) {
7204 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
7205 i40e_stat_str(&pf->hw, ret),
7206 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7207 goto clear_recovery;
7208 }
7209 i40e_get_oem_version(&pf->hw);
7210
7211
7212 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
7213 i40e_verify_eeprom(pf);
7214
7215 i40e_clear_pxe_mode(hw);
7216 ret = i40e_get_capabilities(pf);
7217 if (ret)
7218 goto end_core_reset;
7219
7220 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7221 hw->func_caps.num_rx_qp, 0, 0);
7222 if (ret) {
7223 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
7224 goto end_core_reset;
7225 }
7226 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7227 if (ret) {
7228 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
7229 goto end_core_reset;
7230 }
7231
7232#ifdef CONFIG_I40E_DCB
7233 ret = i40e_init_pf_dcb(pf);
7234 if (ret) {
7235 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
7236 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7237
7238 }
7239#endif
7240
7241 if (!lock_acquired)
7242 rtnl_lock();
7243 ret = i40e_setup_pf_switch(pf, reinit);
7244 if (ret)
7245 goto end_unlock;
7246
7247
7248
7249
7250 ret = i40e_aq_set_phy_int_mask(&pf->hw,
7251 ~(I40E_AQ_EVENT_LINK_UPDOWN |
7252 I40E_AQ_EVENT_MEDIA_NA |
7253 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7254 if (ret)
7255 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
7256 i40e_stat_str(&pf->hw, ret),
7257 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7258
7259
7260 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
7261 if (ret)
7262 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
7263 i40e_stat_str(&pf->hw, ret),
7264 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7265
7266
7267
7268
7269
7270
7271
7272
7273 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
7274 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
7275
7276 for (v = 0; v < I40E_MAX_VEB; v++) {
7277 if (!pf->veb[v])
7278 continue;
7279
7280 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
7281 pf->veb[v]->uplink_seid == 0) {
7282 ret = i40e_reconstitute_veb(pf->veb[v]);
7283
7284 if (!ret)
7285 continue;
7286
7287
7288
7289
7290
7291
7292
7293 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
7294 dev_info(&pf->pdev->dev,
7295 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
7296 ret);
7297 pf->vsi[pf->lan_vsi]->uplink_seid
7298 = pf->mac_seid;
7299 break;
7300 } else if (pf->veb[v]->uplink_seid == 0) {
7301 dev_info(&pf->pdev->dev,
7302 "rebuild of orphan VEB failed: %d\n",
7303 ret);
7304 }
7305 }
7306 }
7307 }
7308
7309 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
7310 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
7311
7312 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
7313 if (ret) {
7314 dev_info(&pf->pdev->dev,
7315 "rebuild of Main VSI failed: %d\n", ret);
7316 goto end_unlock;
7317 }
7318 }
7319
7320
7321
7322
7323
7324#define I40E_REG_MSS 0x000E64DC
7325#define I40E_REG_MSS_MIN_MASK 0x3FF0000
7326#define I40E_64BYTE_MSS 0x400000
7327 val = rd32(hw, I40E_REG_MSS);
7328 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
7329 val &= ~I40E_REG_MSS_MIN_MASK;
7330 val |= I40E_64BYTE_MSS;
7331 wr32(hw, I40E_REG_MSS, val);
7332 }
7333
7334 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
7335 msleep(75);
7336 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
7337 if (ret)
7338 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
7339 i40e_stat_str(&pf->hw, ret),
7340 i40e_aq_str(&pf->hw,
7341 pf->hw.aq.asq_last_status));
7342 }
7343
7344 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7345 ret = i40e_setup_misc_vector(pf);
7346
7347
7348
7349
7350
7351
7352
7353 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
7354 pf->main_vsi_seid);
7355
7356
7357 i40e_pf_unquiesce_all_vsi(pf);
7358
7359
7360 if (!lock_acquired)
7361 rtnl_unlock();
7362
7363 i40e_reset_all_vfs(pf, true);
7364
7365
7366 i40e_send_version(pf);
7367
7368
7369 goto end_core_reset;
7370
7371end_unlock:
7372 if (!lock_acquired)
7373 rtnl_unlock();
7374end_core_reset:
7375 clear_bit(__I40E_RESET_FAILED, pf->state);
7376clear_recovery:
7377 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7378}
7379
7380
7381
7382
7383
7384
7385
7386
7387static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
7388 bool lock_acquired)
7389{
7390 int ret;
7391
7392
7393
7394
7395 ret = i40e_reset(pf);
7396 if (!ret)
7397 i40e_rebuild(pf, reinit, lock_acquired);
7398}
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
7410{
7411 i40e_prep_for_reset(pf, lock_acquired);
7412 i40e_reset_and_rebuild(pf, false, lock_acquired);
7413}
7414
7415
7416
7417
7418
7419
7420
7421static void i40e_handle_mdd_event(struct i40e_pf *pf)
7422{
7423 struct i40e_hw *hw = &pf->hw;
7424 bool mdd_detected = false;
7425 bool pf_mdd_detected = false;
7426 struct i40e_vf *vf;
7427 u32 reg;
7428 int i;
7429
7430 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
7431 return;
7432
7433
7434 reg = rd32(hw, I40E_GL_MDET_TX);
7435 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7436 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7437 I40E_GL_MDET_TX_PF_NUM_SHIFT;
7438 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7439 I40E_GL_MDET_TX_VF_NUM_SHIFT;
7440 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7441 I40E_GL_MDET_TX_EVENT_SHIFT;
7442 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7443 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7444 pf->hw.func_caps.base_queue;
7445 if (netif_msg_tx_err(pf))
7446 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7447 event, queue, pf_num, vf_num);
7448 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7449 mdd_detected = true;
7450 }
7451 reg = rd32(hw, I40E_GL_MDET_RX);
7452 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7453 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7454 I40E_GL_MDET_RX_FUNCTION_SHIFT;
7455 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7456 I40E_GL_MDET_RX_EVENT_SHIFT;
7457 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7458 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7459 pf->hw.func_caps.base_queue;
7460 if (netif_msg_rx_err(pf))
7461 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7462 event, queue, func);
7463 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7464 mdd_detected = true;
7465 }
7466
7467 if (mdd_detected) {
7468 reg = rd32(hw, I40E_PF_MDET_TX);
7469 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7470 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7471 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7472 pf_mdd_detected = true;
7473 }
7474 reg = rd32(hw, I40E_PF_MDET_RX);
7475 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7476 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7477 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7478 pf_mdd_detected = true;
7479 }
7480
7481 if (pf_mdd_detected) {
7482 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7483 i40e_service_event_schedule(pf);
7484 }
7485 }
7486
7487
7488 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7489 vf = &(pf->vf[i]);
7490 reg = rd32(hw, I40E_VP_MDET_TX(i));
7491 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7492 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7493 vf->num_mdd_events++;
7494 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7495 i);
7496 }
7497
7498 reg = rd32(hw, I40E_VP_MDET_RX(i));
7499 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7500 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7501 vf->num_mdd_events++;
7502 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7503 i);
7504 }
7505
7506 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7507 dev_info(&pf->pdev->dev,
7508 "Too many MDD events on VF %d, disabled\n", i);
7509 dev_info(&pf->pdev->dev,
7510 "Use PF Control I/F to re-enable the VF\n");
7511 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
7512 }
7513 }
7514
7515
7516 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
7517 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7518 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7519 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7520 i40e_flush(hw);
7521}
7522
7523
7524
7525
7526
7527static void i40e_sync_udp_filters(struct i40e_pf *pf)
7528{
7529 int i;
7530
7531
7532 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7533 if (pf->udp_ports[i].port)
7534 pf->pending_udp_bitmap |= BIT_ULL(i);
7535 }
7536
7537 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
7538}
7539
7540
7541
7542
7543
7544static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7545{
7546 struct i40e_hw *hw = &pf->hw;
7547 i40e_status ret;
7548 u16 port;
7549 int i;
7550
7551 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7552 return;
7553
7554 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7555
7556 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7557 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7558 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7559 port = pf->udp_ports[i].port;
7560 if (port)
7561 ret = i40e_aq_add_udp_tunnel(hw, port,
7562 pf->udp_ports[i].type,
7563 NULL, NULL);
7564 else
7565 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7566
7567 if (ret) {
7568 dev_dbg(&pf->pdev->dev,
7569 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7570 pf->udp_ports[i].type ? "vxlan" : "geneve",
7571 port ? "add" : "delete",
7572 port, i,
7573 i40e_stat_str(&pf->hw, ret),
7574 i40e_aq_str(&pf->hw,
7575 pf->hw.aq.asq_last_status));
7576 pf->udp_ports[i].port = 0;
7577 }
7578 }
7579 }
7580}
7581
7582
7583
7584
7585
7586static void i40e_service_task(struct work_struct *work)
7587{
7588 struct i40e_pf *pf = container_of(work,
7589 struct i40e_pf,
7590 service_task);
7591 unsigned long start_time = jiffies;
7592
7593
7594 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7595 return;
7596
7597 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
7598 return;
7599
7600 i40e_detect_recover_hung(pf);
7601 i40e_sync_filters_subtask(pf);
7602 i40e_reset_subtask(pf);
7603 i40e_handle_mdd_event(pf);
7604 i40e_vc_process_vflr_event(pf);
7605 i40e_watchdog_subtask(pf);
7606 i40e_fdir_reinit_subtask(pf);
7607 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
7608
7609 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
7610 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
7611 } else {
7612 i40e_client_subtask(pf);
7613 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
7614 i40e_notify_client_of_l2_param_changes(
7615 pf->vsi[pf->lan_vsi]);
7616 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
7617 }
7618 }
7619 i40e_sync_filters_subtask(pf);
7620 i40e_sync_udp_filters_subtask(pf);
7621 i40e_clean_adminq_subtask(pf);
7622
7623
7624 smp_mb__before_atomic();
7625 clear_bit(__I40E_SERVICE_SCHED, pf->state);
7626
7627
7628
7629
7630
7631 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7632 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
7633 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
7634 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
7635 i40e_service_event_schedule(pf);
7636}
7637
7638
7639
7640
7641
7642static void i40e_service_timer(unsigned long data)
7643{
7644 struct i40e_pf *pf = (struct i40e_pf *)data;
7645
7646 mod_timer(&pf->service_timer,
7647 round_jiffies(jiffies + pf->service_timer_period));
7648 i40e_service_event_schedule(pf);
7649}
7650
7651
7652
7653
7654
7655static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7656{
7657 struct i40e_pf *pf = vsi->back;
7658
7659 switch (vsi->type) {
7660 case I40E_VSI_MAIN:
7661 vsi->alloc_queue_pairs = pf->num_lan_qps;
7662 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7663 I40E_REQ_DESCRIPTOR_MULTIPLE);
7664 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7665 vsi->num_q_vectors = pf->num_lan_msix;
7666 else
7667 vsi->num_q_vectors = 1;
7668
7669 break;
7670
7671 case I40E_VSI_FDIR:
7672 vsi->alloc_queue_pairs = 1;
7673 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7674 I40E_REQ_DESCRIPTOR_MULTIPLE);
7675 vsi->num_q_vectors = pf->num_fdsb_msix;
7676 break;
7677
7678 case I40E_VSI_VMDQ2:
7679 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7680 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7681 I40E_REQ_DESCRIPTOR_MULTIPLE);
7682 vsi->num_q_vectors = pf->num_vmdq_msix;
7683 break;
7684
7685 case I40E_VSI_SRIOV:
7686 vsi->alloc_queue_pairs = pf->num_vf_qps;
7687 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7688 I40E_REQ_DESCRIPTOR_MULTIPLE);
7689 break;
7690
7691 default:
7692 WARN_ON(1);
7693 return -ENODATA;
7694 }
7695
7696 return 0;
7697}
7698
7699
7700
7701
7702
7703
7704
7705
7706
7707static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7708{
7709 struct i40e_ring **next_rings;
7710 int size;
7711 int ret = 0;
7712
7713
7714 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
7715 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
7716 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7717 if (!vsi->tx_rings)
7718 return -ENOMEM;
7719 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
7720 if (i40e_enabled_xdp_vsi(vsi)) {
7721 vsi->xdp_rings = next_rings;
7722 next_rings += vsi->alloc_queue_pairs;
7723 }
7724 vsi->rx_rings = next_rings;
7725
7726 if (alloc_qvectors) {
7727
7728 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7729 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7730 if (!vsi->q_vectors) {
7731 ret = -ENOMEM;
7732 goto err_vectors;
7733 }
7734 }
7735 return ret;
7736
7737err_vectors:
7738 kfree(vsi->tx_rings);
7739 return ret;
7740}
7741
7742
7743
7744
7745
7746
7747
7748
7749
7750static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7751{
7752 int ret = -ENODEV;
7753 struct i40e_vsi *vsi;
7754 int vsi_idx;
7755 int i;
7756
7757
7758 mutex_lock(&pf->switch_mutex);
7759
7760
7761
7762
7763
7764
7765
7766 i = pf->next_vsi;
7767 while (i < pf->num_alloc_vsi && pf->vsi[i])
7768 i++;
7769 if (i >= pf->num_alloc_vsi) {
7770 i = 0;
7771 while (i < pf->next_vsi && pf->vsi[i])
7772 i++;
7773 }
7774
7775 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7776 vsi_idx = i;
7777 } else {
7778 ret = -ENODEV;
7779 goto unlock_pf;
7780 }
7781 pf->next_vsi = ++i;
7782
7783 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7784 if (!vsi) {
7785 ret = -ENOMEM;
7786 goto unlock_pf;
7787 }
7788 vsi->type = type;
7789 vsi->back = pf;
7790 set_bit(__I40E_VSI_DOWN, vsi->state);
7791 vsi->flags = 0;
7792 vsi->idx = vsi_idx;
7793 vsi->int_rate_limit = 0;
7794 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7795 pf->rss_table_size : 64;
7796 vsi->netdev_registered = false;
7797 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7798 hash_init(vsi->mac_filter_hash);
7799 vsi->irqs_ready = false;
7800
7801 ret = i40e_set_num_rings_in_vsi(vsi);
7802 if (ret)
7803 goto err_rings;
7804
7805 ret = i40e_vsi_alloc_arrays(vsi, true);
7806 if (ret)
7807 goto err_rings;
7808
7809
7810 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7811
7812
7813 spin_lock_init(&vsi->mac_filter_hash_lock);
7814 pf->vsi[vsi_idx] = vsi;
7815 ret = vsi_idx;
7816 goto unlock_pf;
7817
7818err_rings:
7819 pf->next_vsi = i - 1;
7820 kfree(vsi);
7821unlock_pf:
7822 mutex_unlock(&pf->switch_mutex);
7823 return ret;
7824}
7825
7826
7827
7828
7829
7830
7831
7832
7833
7834static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7835{
7836
7837 if (free_qvectors) {
7838 kfree(vsi->q_vectors);
7839 vsi->q_vectors = NULL;
7840 }
7841 kfree(vsi->tx_rings);
7842 vsi->tx_rings = NULL;
7843 vsi->rx_rings = NULL;
7844 vsi->xdp_rings = NULL;
7845}
7846
7847
7848
7849
7850
7851
7852static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7853{
7854 if (!vsi)
7855 return;
7856
7857 kfree(vsi->rss_hkey_user);
7858 vsi->rss_hkey_user = NULL;
7859
7860 kfree(vsi->rss_lut_user);
7861 vsi->rss_lut_user = NULL;
7862}
7863
7864
7865
7866
7867
7868static int i40e_vsi_clear(struct i40e_vsi *vsi)
7869{
7870 struct i40e_pf *pf;
7871
7872 if (!vsi)
7873 return 0;
7874
7875 if (!vsi->back)
7876 goto free_vsi;
7877 pf = vsi->back;
7878
7879 mutex_lock(&pf->switch_mutex);
7880 if (!pf->vsi[vsi->idx]) {
7881 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7882 vsi->idx, vsi->idx, vsi, vsi->type);
7883 goto unlock_vsi;
7884 }
7885
7886 if (pf->vsi[vsi->idx] != vsi) {
7887 dev_err(&pf->pdev->dev,
7888 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7889 pf->vsi[vsi->idx]->idx,
7890 pf->vsi[vsi->idx],
7891 pf->vsi[vsi->idx]->type,
7892 vsi->idx, vsi, vsi->type);
7893 goto unlock_vsi;
7894 }
7895
7896
7897 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7898 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7899
7900 i40e_vsi_free_arrays(vsi, true);
7901 i40e_clear_rss_config_user(vsi);
7902
7903 pf->vsi[vsi->idx] = NULL;
7904 if (vsi->idx < pf->next_vsi)
7905 pf->next_vsi = vsi->idx;
7906
7907unlock_vsi:
7908 mutex_unlock(&pf->switch_mutex);
7909free_vsi:
7910 kfree(vsi);
7911
7912 return 0;
7913}
7914
7915
7916
7917
7918
7919static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7920{
7921 int i;
7922
7923 if (vsi->tx_rings && vsi->tx_rings[0]) {
7924 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7925 kfree_rcu(vsi->tx_rings[i], rcu);
7926 vsi->tx_rings[i] = NULL;
7927 vsi->rx_rings[i] = NULL;
7928 if (vsi->xdp_rings)
7929 vsi->xdp_rings[i] = NULL;
7930 }
7931 }
7932}
7933
7934
7935
7936
7937
7938static int i40e_alloc_rings(struct i40e_vsi *vsi)
7939{
7940 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
7941 struct i40e_pf *pf = vsi->back;
7942 struct i40e_ring *ring;
7943
7944
7945 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7946
7947 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
7948 if (!ring)
7949 goto err_out;
7950
7951 ring->queue_index = i;
7952 ring->reg_idx = vsi->base_queue + i;
7953 ring->ring_active = false;
7954 ring->vsi = vsi;
7955 ring->netdev = vsi->netdev;
7956 ring->dev = &pf->pdev->dev;
7957 ring->count = vsi->num_desc;
7958 ring->size = 0;
7959 ring->dcb_tc = 0;
7960 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7961 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7962 ring->tx_itr_setting = pf->tx_itr_default;
7963 vsi->tx_rings[i] = ring++;
7964
7965 if (!i40e_enabled_xdp_vsi(vsi))
7966 goto setup_rx;
7967
7968 ring->queue_index = vsi->alloc_queue_pairs + i;
7969 ring->reg_idx = vsi->base_queue + ring->queue_index;
7970 ring->ring_active = false;
7971 ring->vsi = vsi;
7972 ring->netdev = NULL;
7973 ring->dev = &pf->pdev->dev;
7974 ring->count = vsi->num_desc;
7975 ring->size = 0;
7976 ring->dcb_tc = 0;
7977 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7978 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7979 set_ring_xdp(ring);
7980 ring->tx_itr_setting = pf->tx_itr_default;
7981 vsi->xdp_rings[i] = ring++;
7982
7983setup_rx:
7984 ring->queue_index = i;
7985 ring->reg_idx = vsi->base_queue + i;
7986 ring->ring_active = false;
7987 ring->vsi = vsi;
7988 ring->netdev = vsi->netdev;
7989 ring->dev = &pf->pdev->dev;
7990 ring->count = vsi->num_desc;
7991 ring->size = 0;
7992 ring->dcb_tc = 0;
7993 ring->rx_itr_setting = pf->rx_itr_default;
7994 vsi->rx_rings[i] = ring;
7995 }
7996
7997 return 0;
7998
7999err_out:
8000 i40e_vsi_clear_rings(vsi);
8001 return -ENOMEM;
8002}
8003
8004
8005
8006
8007
8008
8009
8010
8011static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
8012{
8013 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
8014 I40E_MIN_MSIX, vectors);
8015 if (vectors < 0) {
8016 dev_info(&pf->pdev->dev,
8017 "MSI-X vector reservation failed: %d\n", vectors);
8018 vectors = 0;
8019 }
8020
8021 return vectors;
8022}
8023
8024
8025
8026
8027
8028
8029
8030
8031
8032static int i40e_init_msix(struct i40e_pf *pf)
8033{
8034 struct i40e_hw *hw = &pf->hw;
8035 int cpus, extra_vectors;
8036 int vectors_left;
8037 int v_budget, i;
8038 int v_actual;
8039 int iwarp_requested = 0;
8040
8041 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8042 return -ENODEV;
8043
8044
8045
8046
8047
8048
8049
8050
8051
8052
8053
8054
8055
8056
8057
8058
8059 vectors_left = hw->func_caps.num_msix_vectors;
8060 v_budget = 0;
8061
8062
8063 if (vectors_left) {
8064 v_budget++;
8065 vectors_left--;
8066 }
8067
8068
8069
8070
8071
8072
8073
8074
8075 cpus = num_online_cpus();
8076 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
8077 vectors_left -= pf->num_lan_msix;
8078
8079
8080 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8081 if (vectors_left) {
8082 pf->num_fdsb_msix = 1;
8083 v_budget++;
8084 vectors_left--;
8085 } else {
8086 pf->num_fdsb_msix = 0;
8087 }
8088 }
8089
8090
8091 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8092 iwarp_requested = pf->num_iwarp_msix;
8093
8094 if (!vectors_left)
8095 pf->num_iwarp_msix = 0;
8096 else if (vectors_left < pf->num_iwarp_msix)
8097 pf->num_iwarp_msix = 1;
8098 v_budget += pf->num_iwarp_msix;
8099 vectors_left -= pf->num_iwarp_msix;
8100 }
8101
8102
8103 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
8104 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
8105 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
8106
8107 if (!vectors_left) {
8108 pf->num_vmdq_msix = 0;
8109 pf->num_vmdq_qps = 0;
8110 } else {
8111
8112
8113
8114
8115
8116
8117 if (vmdq_vecs < vmdq_vecs_wanted)
8118 pf->num_vmdq_qps = 1;
8119 pf->num_vmdq_msix = pf->num_vmdq_qps;
8120
8121 v_budget += vmdq_vecs;
8122 vectors_left -= vmdq_vecs;
8123 }
8124 }
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
8136 pf->num_lan_msix += extra_vectors;
8137 vectors_left -= extra_vectors;
8138
8139 WARN(vectors_left < 0,
8140 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
8141
8142 v_budget += pf->num_lan_msix;
8143 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
8144 GFP_KERNEL);
8145 if (!pf->msix_entries)
8146 return -ENOMEM;
8147
8148 for (i = 0; i < v_budget; i++)
8149 pf->msix_entries[i].entry = i;
8150 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
8151
8152 if (v_actual < I40E_MIN_MSIX) {
8153 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
8154 kfree(pf->msix_entries);
8155 pf->msix_entries = NULL;
8156 pci_disable_msix(pf->pdev);
8157 return -ENODEV;
8158
8159 } else if (v_actual == I40E_MIN_MSIX) {
8160
8161 pf->num_vmdq_vsis = 0;
8162 pf->num_vmdq_qps = 0;
8163 pf->num_lan_qps = 1;
8164 pf->num_lan_msix = 1;
8165
8166 } else if (!vectors_left) {
8167
8168
8169
8170
8171
8172 int vec;
8173
8174 dev_info(&pf->pdev->dev,
8175 "MSI-X vector limit reached, attempting to redistribute vectors\n");
8176
8177 vec = v_actual - 1;
8178
8179
8180 pf->num_vmdq_msix = 1;
8181 pf->num_vmdq_vsis = 1;
8182 pf->num_vmdq_qps = 1;
8183
8184
8185 switch (vec) {
8186 case 2:
8187 pf->num_lan_msix = 1;
8188 break;
8189 case 3:
8190 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8191 pf->num_lan_msix = 1;
8192 pf->num_iwarp_msix = 1;
8193 } else {
8194 pf->num_lan_msix = 2;
8195 }
8196 break;
8197 default:
8198 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8199 pf->num_iwarp_msix = min_t(int, (vec / 3),
8200 iwarp_requested);
8201 pf->num_vmdq_vsis = min_t(int, (vec / 3),
8202 I40E_DEFAULT_NUM_VMDQ_VSI);
8203 } else {
8204 pf->num_vmdq_vsis = min_t(int, (vec / 2),
8205 I40E_DEFAULT_NUM_VMDQ_VSI);
8206 }
8207 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8208 pf->num_fdsb_msix = 1;
8209 vec--;
8210 }
8211 pf->num_lan_msix = min_t(int,
8212 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
8213 pf->num_lan_msix);
8214 pf->num_lan_qps = pf->num_lan_msix;
8215 break;
8216 }
8217 }
8218
8219 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8220 (pf->num_fdsb_msix == 0)) {
8221 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
8222 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8223 }
8224 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8225 (pf->num_vmdq_msix == 0)) {
8226 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
8227 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
8228 }
8229
8230 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
8231 (pf->num_iwarp_msix == 0)) {
8232 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
8233 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
8234 }
8235 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
8236 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
8237 pf->num_lan_msix,
8238 pf->num_vmdq_msix * pf->num_vmdq_vsis,
8239 pf->num_fdsb_msix,
8240 pf->num_iwarp_msix);
8241
8242 return v_actual;
8243}
8244
8245
8246
8247
8248
8249
8250
8251
8252
8253static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
8254{
8255 struct i40e_q_vector *q_vector;
8256
8257
8258 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
8259 if (!q_vector)
8260 return -ENOMEM;
8261
8262 q_vector->vsi = vsi;
8263 q_vector->v_idx = v_idx;
8264 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
8265
8266 if (vsi->netdev)
8267 netif_napi_add(vsi->netdev, &q_vector->napi,
8268 i40e_napi_poll, NAPI_POLL_WEIGHT);
8269
8270 q_vector->rx.latency_range = I40E_LOW_LATENCY;
8271 q_vector->tx.latency_range = I40E_LOW_LATENCY;
8272
8273
8274 vsi->q_vectors[v_idx] = q_vector;
8275
8276 return 0;
8277}
8278
8279
8280
8281
8282
8283
8284
8285
8286static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
8287{
8288 struct i40e_pf *pf = vsi->back;
8289 int err, v_idx, num_q_vectors, current_cpu;
8290
8291
8292 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
8293 num_q_vectors = vsi->num_q_vectors;
8294 else if (vsi == pf->vsi[pf->lan_vsi])
8295 num_q_vectors = 1;
8296 else
8297 return -EINVAL;
8298
8299 current_cpu = cpumask_first(cpu_online_mask);
8300
8301 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
8302 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
8303 if (err)
8304 goto err_out;
8305 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
8306 if (unlikely(current_cpu >= nr_cpu_ids))
8307 current_cpu = cpumask_first(cpu_online_mask);
8308 }
8309
8310 return 0;
8311
8312err_out:
8313 while (v_idx--)
8314 i40e_free_q_vector(vsi, v_idx);
8315
8316 return err;
8317}
8318
8319
8320
8321
8322
8323static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
8324{
8325 int vectors = 0;
8326 ssize_t size;
8327
8328 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8329 vectors = i40e_init_msix(pf);
8330 if (vectors < 0) {
8331 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
8332 I40E_FLAG_IWARP_ENABLED |
8333 I40E_FLAG_RSS_ENABLED |
8334 I40E_FLAG_DCB_CAPABLE |
8335 I40E_FLAG_DCB_ENABLED |
8336 I40E_FLAG_SRIOV_ENABLED |
8337 I40E_FLAG_FD_SB_ENABLED |
8338 I40E_FLAG_FD_ATR_ENABLED |
8339 I40E_FLAG_VMDQ_ENABLED);
8340
8341
8342 i40e_determine_queue_usage(pf);
8343 }
8344 }
8345
8346 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8347 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
8348 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
8349 vectors = pci_enable_msi(pf->pdev);
8350 if (vectors < 0) {
8351 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
8352 vectors);
8353 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
8354 }
8355 vectors = 1;
8356 }
8357
8358 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
8359 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
8360
8361
8362 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
8363 pf->irq_pile = kzalloc(size, GFP_KERNEL);
8364 if (!pf->irq_pile) {
8365 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
8366 return -ENOMEM;
8367 }
8368 pf->irq_pile->num_entries = vectors;
8369 pf->irq_pile->search_hint = 0;
8370
8371
8372 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
8373
8374 return 0;
8375}
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385static int i40e_setup_misc_vector(struct i40e_pf *pf)
8386{
8387 struct i40e_hw *hw = &pf->hw;
8388 int err = 0;
8389
8390
8391
8392
8393 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
8394 err = request_irq(pf->msix_entries[0].vector,
8395 i40e_intr, 0, pf->int_name, pf);
8396 if (err) {
8397 dev_info(&pf->pdev->dev,
8398 "request_irq for %s failed: %d\n",
8399 pf->int_name, err);
8400 return -EFAULT;
8401 }
8402 }
8403
8404 i40e_enable_misc_int_causes(pf);
8405
8406
8407 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
8408 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
8409
8410 i40e_flush(hw);
8411
8412 i40e_irq_dynamic_enable_icr0(pf, true);
8413
8414 return err;
8415}
8416
8417
8418
8419
8420
8421
8422static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8423 u8 *lut, u16 lut_size)
8424{
8425 struct i40e_pf *pf = vsi->back;
8426 struct i40e_hw *hw = &pf->hw;
8427 int ret = 0;
8428
8429 if (seed) {
8430 struct i40e_aqc_get_set_rss_key_data *seed_dw =
8431 (struct i40e_aqc_get_set_rss_key_data *)seed;
8432 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
8433 if (ret) {
8434 dev_info(&pf->pdev->dev,
8435 "Cannot set RSS key, err %s aq_err %s\n",
8436 i40e_stat_str(hw, ret),
8437 i40e_aq_str(hw, hw->aq.asq_last_status));
8438 return ret;
8439 }
8440 }
8441 if (lut) {
8442 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8443
8444 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8445 if (ret) {
8446 dev_info(&pf->pdev->dev,
8447 "Cannot set RSS lut, err %s aq_err %s\n",
8448 i40e_stat_str(hw, ret),
8449 i40e_aq_str(hw, hw->aq.asq_last_status));
8450 return ret;
8451 }
8452 }
8453 return ret;
8454}
8455
8456
8457
8458
8459
8460
8461
8462
8463
8464
8465static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8466 u8 *lut, u16 lut_size)
8467{
8468 struct i40e_pf *pf = vsi->back;
8469 struct i40e_hw *hw = &pf->hw;
8470 int ret = 0;
8471
8472 if (seed) {
8473 ret = i40e_aq_get_rss_key(hw, vsi->id,
8474 (struct i40e_aqc_get_set_rss_key_data *)seed);
8475 if (ret) {
8476 dev_info(&pf->pdev->dev,
8477 "Cannot get RSS key, err %s aq_err %s\n",
8478 i40e_stat_str(&pf->hw, ret),
8479 i40e_aq_str(&pf->hw,
8480 pf->hw.aq.asq_last_status));
8481 return ret;
8482 }
8483 }
8484
8485 if (lut) {
8486 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8487
8488 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8489 if (ret) {
8490 dev_info(&pf->pdev->dev,
8491 "Cannot get RSS lut, err %s aq_err %s\n",
8492 i40e_stat_str(&pf->hw, ret),
8493 i40e_aq_str(&pf->hw,
8494 pf->hw.aq.asq_last_status));
8495 return ret;
8496 }
8497 }
8498
8499 return ret;
8500}
8501
8502
8503
8504
8505
8506static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8507{
8508 u8 seed[I40E_HKEY_ARRAY_SIZE];
8509 struct i40e_pf *pf = vsi->back;
8510 u8 *lut;
8511 int ret;
8512
8513 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
8514 return 0;
8515
8516 if (!vsi->rss_size)
8517 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8518 vsi->num_queue_pairs);
8519 if (!vsi->rss_size)
8520 return -EINVAL;
8521
8522 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8523 if (!lut)
8524 return -ENOMEM;
8525
8526
8527
8528 if (vsi->rss_lut_user)
8529 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8530 else
8531 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8532 if (vsi->rss_hkey_user)
8533 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8534 else
8535 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8536 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8537 kfree(lut);
8538
8539 return ret;
8540}
8541
8542
8543
8544
8545
8546
8547
8548
8549
8550
8551static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8552 const u8 *lut, u16 lut_size)
8553{
8554 struct i40e_pf *pf = vsi->back;
8555 struct i40e_hw *hw = &pf->hw;
8556 u16 vf_id = vsi->vf_id;
8557 u8 i;
8558
8559
8560 if (seed) {
8561 u32 *seed_dw = (u32 *)seed;
8562
8563 if (vsi->type == I40E_VSI_MAIN) {
8564 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8565 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
8566 } else if (vsi->type == I40E_VSI_SRIOV) {
8567 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8568 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
8569 } else {
8570 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8571 }
8572 }
8573
8574 if (lut) {
8575 u32 *lut_dw = (u32 *)lut;
8576
8577 if (vsi->type == I40E_VSI_MAIN) {
8578 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8579 return -EINVAL;
8580 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8581 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8582 } else if (vsi->type == I40E_VSI_SRIOV) {
8583 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8584 return -EINVAL;
8585 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8586 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
8587 } else {
8588 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8589 }
8590 }
8591 i40e_flush(hw);
8592
8593 return 0;
8594}
8595
8596
8597
8598
8599
8600
8601
8602
8603
8604
8605static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8606 u8 *lut, u16 lut_size)
8607{
8608 struct i40e_pf *pf = vsi->back;
8609 struct i40e_hw *hw = &pf->hw;
8610 u16 i;
8611
8612 if (seed) {
8613 u32 *seed_dw = (u32 *)seed;
8614
8615 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8616 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8617 }
8618 if (lut) {
8619 u32 *lut_dw = (u32 *)lut;
8620
8621 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8622 return -EINVAL;
8623 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8624 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8625 }
8626
8627 return 0;
8628}
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8640{
8641 struct i40e_pf *pf = vsi->back;
8642
8643 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8644 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8645 else
8646 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8647}
8648
8649
8650
8651
8652
8653
8654
8655
8656
8657
8658int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8659{
8660 struct i40e_pf *pf = vsi->back;
8661
8662 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8663 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8664 else
8665 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8666}
8667
8668
8669
8670
8671
8672
8673
8674
8675void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8676 u16 rss_table_size, u16 rss_size)
8677{
8678 u16 i;
8679
8680 for (i = 0; i < rss_table_size; i++)
8681 lut[i] = i % rss_size;
8682}
8683
8684
8685
8686
8687
8688static int i40e_pf_config_rss(struct i40e_pf *pf)
8689{
8690 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8691 u8 seed[I40E_HKEY_ARRAY_SIZE];
8692 u8 *lut;
8693 struct i40e_hw *hw = &pf->hw;
8694 u32 reg_val;
8695 u64 hena;
8696 int ret;
8697
8698
8699 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8700 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8701 hena |= i40e_pf_get_default_rss_hena(pf);
8702
8703 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8704 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8705
8706
8707 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8708 reg_val = (pf->rss_table_size == 512) ?
8709 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8710 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8711 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8712
8713
8714 if (!vsi->rss_size) {
8715 u16 qcount;
8716
8717 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8718 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8719 }
8720 if (!vsi->rss_size)
8721 return -EINVAL;
8722
8723 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8724 if (!lut)
8725 return -ENOMEM;
8726
8727
8728 if (vsi->rss_lut_user)
8729 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8730 else
8731 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8732
8733
8734
8735
8736 if (vsi->rss_hkey_user)
8737 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8738 else
8739 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8740 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8741 kfree(lut);
8742
8743 return ret;
8744}
8745
8746
8747
8748
8749
8750
8751
8752
8753
8754
8755int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8756{
8757 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8758 int new_rss_size;
8759
8760 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8761 return 0;
8762
8763 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8764
8765 if (queue_count != vsi->num_queue_pairs) {
8766 u16 qcount;
8767
8768 vsi->req_queue_pairs = queue_count;
8769 i40e_prep_for_reset(pf, true);
8770
8771 pf->alloc_rss_size = new_rss_size;
8772
8773 i40e_reset_and_rebuild(pf, true, true);
8774
8775
8776
8777
8778 if (queue_count < vsi->rss_size) {
8779 i40e_clear_rss_config_user(vsi);
8780 dev_dbg(&pf->pdev->dev,
8781 "discard user configured hash keys and lut\n");
8782 }
8783
8784
8785 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8786 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8787
8788 i40e_pf_config_rss(pf);
8789 }
8790 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8791 vsi->req_queue_pairs, pf->rss_size_max);
8792 return pf->alloc_rss_size;
8793}
8794
8795
8796
8797
8798
8799i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
8800{
8801 i40e_status status;
8802 bool min_valid, max_valid;
8803 u32 max_bw, min_bw;
8804
8805 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8806 &min_valid, &max_valid);
8807
8808 if (!status) {
8809 if (min_valid)
8810 pf->min_bw = min_bw;
8811 if (max_valid)
8812 pf->max_bw = max_bw;
8813 }
8814
8815 return status;
8816}
8817
8818
8819
8820
8821
8822i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
8823{
8824 struct i40e_aqc_configure_partition_bw_data bw_data;
8825 i40e_status status;
8826
8827
8828 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8829 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
8830 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
8831
8832
8833 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8834
8835 return status;
8836}
8837
8838
8839
8840
8841
8842i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
8843{
8844
8845 enum i40e_admin_queue_err last_aq_status;
8846 i40e_status ret;
8847 u16 nvm_word;
8848
8849 if (pf->hw.partition_id != 1) {
8850 dev_info(&pf->pdev->dev,
8851 "Commit BW only works on partition 1! This is partition %d",
8852 pf->hw.partition_id);
8853 ret = I40E_NOT_SUPPORTED;
8854 goto bw_commit_out;
8855 }
8856
8857
8858 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8859 last_aq_status = pf->hw.aq.asq_last_status;
8860 if (ret) {
8861 dev_info(&pf->pdev->dev,
8862 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8863 i40e_stat_str(&pf->hw, ret),
8864 i40e_aq_str(&pf->hw, last_aq_status));
8865 goto bw_commit_out;
8866 }
8867
8868
8869 ret = i40e_aq_read_nvm(&pf->hw,
8870 I40E_SR_NVM_CONTROL_WORD,
8871 0x10, sizeof(nvm_word), &nvm_word,
8872 false, NULL);
8873
8874
8875
8876 last_aq_status = pf->hw.aq.asq_last_status;
8877 i40e_release_nvm(&pf->hw);
8878 if (ret) {
8879 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8880 i40e_stat_str(&pf->hw, ret),
8881 i40e_aq_str(&pf->hw, last_aq_status));
8882 goto bw_commit_out;
8883 }
8884
8885
8886 msleep(50);
8887
8888
8889 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8890 last_aq_status = pf->hw.aq.asq_last_status;
8891 if (ret) {
8892 dev_info(&pf->pdev->dev,
8893 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8894 i40e_stat_str(&pf->hw, ret),
8895 i40e_aq_str(&pf->hw, last_aq_status));
8896 goto bw_commit_out;
8897 }
8898
8899
8900
8901
8902 ret = i40e_aq_update_nvm(&pf->hw,
8903 I40E_SR_NVM_CONTROL_WORD,
8904 0x10, sizeof(nvm_word),
8905 &nvm_word, true, NULL);
8906
8907
8908
8909 last_aq_status = pf->hw.aq.asq_last_status;
8910 i40e_release_nvm(&pf->hw);
8911 if (ret)
8912 dev_info(&pf->pdev->dev,
8913 "BW settings NOT SAVED, err %s aq_err %s\n",
8914 i40e_stat_str(&pf->hw, ret),
8915 i40e_aq_str(&pf->hw, last_aq_status));
8916bw_commit_out:
8917
8918 return ret;
8919}
8920
8921
8922
8923
8924
8925
8926
8927
8928
8929static int i40e_sw_init(struct i40e_pf *pf)
8930{
8931 int err = 0;
8932 int size;
8933
8934
8935 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8936 I40E_FLAG_MSI_ENABLED |
8937 I40E_FLAG_MSIX_ENABLED;
8938
8939
8940 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8941 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8942
8943
8944
8945
8946 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8947 pf->alloc_rss_size = 1;
8948 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8949 pf->rss_size_max = min_t(int, pf->rss_size_max,
8950 pf->hw.func_caps.num_tx_qp);
8951 if (pf->hw.func_caps.rss) {
8952 pf->flags |= I40E_FLAG_RSS_ENABLED;
8953 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8954 num_online_cpus());
8955 }
8956
8957
8958 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8959 pf->flags |= I40E_FLAG_MFP_ENABLED;
8960 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8961 if (i40e_get_partition_bw_setting(pf)) {
8962 dev_warn(&pf->pdev->dev,
8963 "Could not get partition bw settings\n");
8964 } else {
8965 dev_info(&pf->pdev->dev,
8966 "Partition BW Min = %8.8x, Max = %8.8x\n",
8967 pf->min_bw, pf->max_bw);
8968
8969
8970 i40e_set_partition_bw_setting(pf);
8971 }
8972 }
8973
8974 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8975 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8976 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8977 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8978 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8979 pf->hw.num_partitions > 1)
8980 dev_info(&pf->pdev->dev,
8981 "Flow Director Sideband mode Disabled in MFP mode\n");
8982 else
8983 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8984 pf->fdir_pf_filter_count =
8985 pf->hw.func_caps.fd_filters_guaranteed;
8986 pf->hw.fdir_shared_filter_count =
8987 pf->hw.func_caps.fd_filters_best_effort;
8988 }
8989
8990 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
8991 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8992 (pf->hw.aq.fw_maj_ver < 4))) {
8993 pf->flags |= I40E_FLAG_RESTART_AUTONEG;
8994
8995 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8996 }
8997
8998
8999 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9000 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9001 (pf->hw.aq.fw_maj_ver < 4)))
9002 pf->flags |= I40E_FLAG_STOP_FW_LLDP;
9003
9004
9005 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9006 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
9007 (pf->hw.aq.fw_maj_ver >= 5)))
9008 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
9009
9010 if (pf->hw.func_caps.vmdq) {
9011 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
9012 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
9013 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
9014 }
9015
9016 if (pf->hw.func_caps.iwarp) {
9017 pf->flags |= I40E_FLAG_IWARP_ENABLED;
9018
9019 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
9020 }
9021
9022#ifdef CONFIG_PCI_IOV
9023 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
9024 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
9025 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
9026 pf->num_req_vfs = min_t(int,
9027 pf->hw.func_caps.num_vfs,
9028 I40E_MAX_VF_COUNT);
9029 }
9030#endif
9031 if (pf->hw.mac.type == I40E_MAC_X722) {
9032 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE
9033 | I40E_FLAG_128_QP_RSS_CAPABLE
9034 | I40E_FLAG_HW_ATR_EVICT_CAPABLE
9035 | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE
9036 | I40E_FLAG_WB_ON_ITR_CAPABLE
9037 | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
9038 | I40E_FLAG_NO_PCI_LINK_CHECK
9039 | I40E_FLAG_USE_SET_LLDP_MIB
9040 | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
9041 | I40E_FLAG_PTP_L4_CAPABLE
9042 | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE;
9043 } else if ((pf->hw.aq.api_maj_ver > 1) ||
9044 ((pf->hw.aq.api_maj_ver == 1) &&
9045 (pf->hw.aq.api_min_ver > 4))) {
9046
9047 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
9048 }
9049
9050
9051 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
9052 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
9053
9054 pf->eeprom_version = 0xDEAD;
9055 pf->lan_veb = I40E_NO_VEB;
9056 pf->lan_vsi = I40E_NO_VSI;
9057
9058
9059 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
9060
9061
9062 size = sizeof(struct i40e_lump_tracking)
9063 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
9064 pf->qp_pile = kzalloc(size, GFP_KERNEL);
9065 if (!pf->qp_pile) {
9066 err = -ENOMEM;
9067 goto sw_init_done;
9068 }
9069 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
9070 pf->qp_pile->search_hint = 0;
9071
9072 pf->tx_timeout_recovery_level = 1;
9073
9074 mutex_init(&pf->switch_mutex);
9075
9076sw_init_done:
9077 return err;
9078}
9079
9080
9081
9082
9083
9084
9085
9086
9087bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
9088{
9089 bool need_reset = false;
9090
9091
9092
9093
9094 if (features & NETIF_F_NTUPLE) {
9095
9096 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9097 need_reset = true;
9098
9099 if (pf->num_fdsb_msix > 0)
9100 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9101 } else {
9102
9103 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9104 need_reset = true;
9105 i40e_fdir_filter_exit(pf);
9106 }
9107 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
9108 I40E_FLAG_FD_SB_AUTO_DISABLED);
9109
9110 pf->fd_add_err = 0;
9111 pf->fd_atr_cnt = 0;
9112
9113 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
9114 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
9115 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9116 (I40E_DEBUG_FD & pf->hw.debug_mask))
9117 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
9118 }
9119 }
9120 return need_reset;
9121}
9122
9123
9124
9125
9126
9127static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
9128{
9129 struct i40e_pf *pf = vsi->back;
9130 struct i40e_hw *hw = &pf->hw;
9131 u16 vf_id = vsi->vf_id;
9132 u8 i;
9133
9134 if (vsi->type == I40E_VSI_MAIN) {
9135 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
9136 wr32(hw, I40E_PFQF_HLUT(i), 0);
9137 } else if (vsi->type == I40E_VSI_SRIOV) {
9138 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
9139 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
9140 } else {
9141 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
9142 }
9143}
9144
9145
9146
9147
9148
9149
9150
9151static int i40e_set_features(struct net_device *netdev,
9152 netdev_features_t features)
9153{
9154 struct i40e_netdev_priv *np = netdev_priv(netdev);
9155 struct i40e_vsi *vsi = np->vsi;
9156 struct i40e_pf *pf = vsi->back;
9157 bool need_reset;
9158
9159 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
9160 i40e_pf_config_rss(pf);
9161 else if (!(features & NETIF_F_RXHASH) &&
9162 netdev->features & NETIF_F_RXHASH)
9163 i40e_clear_rss_lut(vsi);
9164
9165 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9166 i40e_vlan_stripping_enable(vsi);
9167 else
9168 i40e_vlan_stripping_disable(vsi);
9169
9170 need_reset = i40e_set_ntuple(pf, features);
9171
9172 if (need_reset)
9173 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
9174
9175 return 0;
9176}
9177
9178
9179
9180
9181
9182
9183
9184
9185static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
9186{
9187 u8 i;
9188
9189 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9190 if (pf->udp_ports[i].port == port)
9191 return i;
9192 }
9193
9194 return i;
9195}
9196
9197
9198
9199
9200
9201
9202static void i40e_udp_tunnel_add(struct net_device *netdev,
9203 struct udp_tunnel_info *ti)
9204{
9205 struct i40e_netdev_priv *np = netdev_priv(netdev);
9206 struct i40e_vsi *vsi = np->vsi;
9207 struct i40e_pf *pf = vsi->back;
9208 u16 port = ntohs(ti->port);
9209 u8 next_idx;
9210 u8 idx;
9211
9212 idx = i40e_get_udp_port_idx(pf, port);
9213
9214
9215 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9216 netdev_info(netdev, "port %d already offloaded\n", port);
9217 return;
9218 }
9219
9220
9221 next_idx = i40e_get_udp_port_idx(pf, 0);
9222
9223 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9224 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
9225 port);
9226 return;
9227 }
9228
9229 switch (ti->type) {
9230 case UDP_TUNNEL_TYPE_VXLAN:
9231 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
9232 break;
9233 case UDP_TUNNEL_TYPE_GENEVE:
9234 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
9235 return;
9236 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
9237 break;
9238 default:
9239 return;
9240 }
9241
9242
9243 pf->udp_ports[next_idx].port = port;
9244 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
9245 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9246}
9247
9248
9249
9250
9251
9252
9253static void i40e_udp_tunnel_del(struct net_device *netdev,
9254 struct udp_tunnel_info *ti)
9255{
9256 struct i40e_netdev_priv *np = netdev_priv(netdev);
9257 struct i40e_vsi *vsi = np->vsi;
9258 struct i40e_pf *pf = vsi->back;
9259 u16 port = ntohs(ti->port);
9260 u8 idx;
9261
9262 idx = i40e_get_udp_port_idx(pf, port);
9263
9264
9265 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
9266 goto not_found;
9267
9268 switch (ti->type) {
9269 case UDP_TUNNEL_TYPE_VXLAN:
9270 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
9271 goto not_found;
9272 break;
9273 case UDP_TUNNEL_TYPE_GENEVE:
9274 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
9275 goto not_found;
9276 break;
9277 default:
9278 goto not_found;
9279 }
9280
9281
9282
9283
9284 pf->udp_ports[idx].port = 0;
9285 pf->pending_udp_bitmap |= BIT_ULL(idx);
9286 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9287
9288 return;
9289not_found:
9290 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
9291 port);
9292}
9293
9294static int i40e_get_phys_port_id(struct net_device *netdev,
9295 struct netdev_phys_item_id *ppid)
9296{
9297 struct i40e_netdev_priv *np = netdev_priv(netdev);
9298 struct i40e_pf *pf = np->vsi->back;
9299 struct i40e_hw *hw = &pf->hw;
9300
9301 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
9302 return -EOPNOTSUPP;
9303
9304 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
9305 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
9306
9307 return 0;
9308}
9309
9310
9311
9312
9313
9314
9315
9316
9317
9318static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9319 struct net_device *dev,
9320 const unsigned char *addr, u16 vid,
9321 u16 flags)
9322{
9323 struct i40e_netdev_priv *np = netdev_priv(dev);
9324 struct i40e_pf *pf = np->vsi->back;
9325 int err = 0;
9326
9327 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
9328 return -EOPNOTSUPP;
9329
9330 if (vid) {
9331 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
9332 return -EINVAL;
9333 }
9334
9335
9336
9337
9338 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
9339 netdev_info(dev, "FDB only supports static addresses\n");
9340 return -EINVAL;
9341 }
9342
9343 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
9344 err = dev_uc_add_excl(dev, addr);
9345 else if (is_multicast_ether_addr(addr))
9346 err = dev_mc_add_excl(dev, addr);
9347 else
9348 err = -EINVAL;
9349
9350
9351 if (err == -EEXIST && !(flags & NLM_F_EXCL))
9352 err = 0;
9353
9354 return err;
9355}
9356
9357
9358
9359
9360
9361
9362
9363
9364
9365
9366
9367
9368
9369
9370
9371static int i40e_ndo_bridge_setlink(struct net_device *dev,
9372 struct nlmsghdr *nlh,
9373 u16 flags)
9374{
9375 struct i40e_netdev_priv *np = netdev_priv(dev);
9376 struct i40e_vsi *vsi = np->vsi;
9377 struct i40e_pf *pf = vsi->back;
9378 struct i40e_veb *veb = NULL;
9379 struct nlattr *attr, *br_spec;
9380 int i, rem;
9381
9382
9383 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9384 return -EOPNOTSUPP;
9385
9386
9387 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9388 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9389 veb = pf->veb[i];
9390 }
9391
9392 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9393
9394 nla_for_each_nested(attr, br_spec, rem) {
9395 __u16 mode;
9396
9397 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9398 continue;
9399
9400 mode = nla_get_u16(attr);
9401 if ((mode != BRIDGE_MODE_VEPA) &&
9402 (mode != BRIDGE_MODE_VEB))
9403 return -EINVAL;
9404
9405
9406 if (!veb) {
9407 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9408 vsi->tc_config.enabled_tc);
9409 if (veb) {
9410 veb->bridge_mode = mode;
9411 i40e_config_bridge_mode(veb);
9412 } else {
9413
9414 return -ENOENT;
9415 }
9416 break;
9417 } else if (mode != veb->bridge_mode) {
9418
9419 veb->bridge_mode = mode;
9420
9421 if (mode == BRIDGE_MODE_VEB)
9422 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9423 else
9424 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9425 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
9426 true);
9427 break;
9428 }
9429 }
9430
9431 return 0;
9432}
9433
9434
9435
9436
9437
9438
9439
9440
9441
9442
9443
9444
9445
9446static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9447 struct net_device *dev,
9448 u32 __always_unused filter_mask,
9449 int nlflags)
9450{
9451 struct i40e_netdev_priv *np = netdev_priv(dev);
9452 struct i40e_vsi *vsi = np->vsi;
9453 struct i40e_pf *pf = vsi->back;
9454 struct i40e_veb *veb = NULL;
9455 int i;
9456
9457
9458 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9459 return -EOPNOTSUPP;
9460
9461
9462 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9463 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9464 veb = pf->veb[i];
9465 }
9466
9467 if (!veb)
9468 return 0;
9469
9470 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9471 0, 0, nlflags, filter_mask, NULL);
9472}
9473
9474
9475
9476
9477
9478
9479
9480static netdev_features_t i40e_features_check(struct sk_buff *skb,
9481 struct net_device *dev,
9482 netdev_features_t features)
9483{
9484 size_t len;
9485
9486
9487
9488
9489
9490 if (skb->ip_summed != CHECKSUM_PARTIAL)
9491 return features;
9492
9493
9494
9495
9496 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
9497 features &= ~NETIF_F_GSO_MASK;
9498
9499
9500 len = skb_network_header(skb) - skb->data;
9501 if (len & ~(63 * 2))
9502 goto out_err;
9503
9504
9505 len = skb_transport_header(skb) - skb_network_header(skb);
9506 if (len & ~(127 * 4))
9507 goto out_err;
9508
9509 if (skb->encapsulation) {
9510
9511 len = skb_inner_network_header(skb) - skb_transport_header(skb);
9512 if (len & ~(127 * 2))
9513 goto out_err;
9514
9515
9516 len = skb_inner_transport_header(skb) -
9517 skb_inner_network_header(skb);
9518 if (len & ~(127 * 4))
9519 goto out_err;
9520 }
9521
9522
9523
9524
9525
9526
9527 return features;
9528out_err:
9529 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9530}
9531
9532
9533
9534
9535
9536
9537static int i40e_xdp_setup(struct i40e_vsi *vsi,
9538 struct bpf_prog *prog)
9539{
9540 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9541 struct i40e_pf *pf = vsi->back;
9542 struct bpf_prog *old_prog;
9543 bool need_reset;
9544 int i;
9545
9546
9547 if (frame_size > vsi->rx_buf_len)
9548 return -EINVAL;
9549
9550 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
9551 return 0;
9552
9553
9554 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
9555
9556 if (need_reset)
9557 i40e_prep_for_reset(pf, true);
9558
9559 old_prog = xchg(&vsi->xdp_prog, prog);
9560
9561 if (need_reset)
9562 i40e_reset_and_rebuild(pf, true, true);
9563
9564 for (i = 0; i < vsi->num_queue_pairs; i++)
9565 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
9566
9567 if (old_prog)
9568 bpf_prog_put(old_prog);
9569
9570 return 0;
9571}
9572
9573
9574
9575
9576
9577
9578static int i40e_xdp(struct net_device *dev,
9579 struct netdev_xdp *xdp)
9580{
9581 struct i40e_netdev_priv *np = netdev_priv(dev);
9582 struct i40e_vsi *vsi = np->vsi;
9583
9584 if (vsi->type != I40E_VSI_MAIN)
9585 return -EINVAL;
9586
9587 switch (xdp->command) {
9588 case XDP_SETUP_PROG:
9589 return i40e_xdp_setup(vsi, xdp->prog);
9590 case XDP_QUERY_PROG:
9591 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
9592 return 0;
9593 default:
9594 return -EINVAL;
9595 }
9596}
9597
9598static const struct net_device_ops i40e_netdev_ops = {
9599 .ndo_open = i40e_open,
9600 .ndo_stop = i40e_close,
9601 .ndo_start_xmit = i40e_lan_xmit_frame,
9602 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9603 .ndo_set_rx_mode = i40e_set_rx_mode,
9604 .ndo_validate_addr = eth_validate_addr,
9605 .ndo_set_mac_address = i40e_set_mac,
9606 .ndo_change_mtu = i40e_change_mtu,
9607 .ndo_do_ioctl = i40e_ioctl,
9608 .ndo_tx_timeout = i40e_tx_timeout,
9609 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9610 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9611#ifdef CONFIG_NET_POLL_CONTROLLER
9612 .ndo_poll_controller = i40e_netpoll,
9613#endif
9614 .ndo_setup_tc = __i40e_setup_tc,
9615 .ndo_set_features = i40e_set_features,
9616 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9617 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
9618 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
9619 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9620 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9621 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9622 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9623 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9624 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
9625 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9626 .ndo_fdb_add = i40e_ndo_fdb_add,
9627 .ndo_features_check = i40e_features_check,
9628 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9629 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
9630 .ndo_xdp = i40e_xdp,
9631};
9632
9633
9634
9635
9636
9637
9638
9639static int i40e_config_netdev(struct i40e_vsi *vsi)
9640{
9641 struct i40e_pf *pf = vsi->back;
9642 struct i40e_hw *hw = &pf->hw;
9643 struct i40e_netdev_priv *np;
9644 struct net_device *netdev;
9645 u8 broadcast[ETH_ALEN];
9646 u8 mac_addr[ETH_ALEN];
9647 int etherdev_size;
9648 netdev_features_t hw_enc_features;
9649 netdev_features_t hw_features;
9650
9651 etherdev_size = sizeof(struct i40e_netdev_priv);
9652 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9653 if (!netdev)
9654 return -ENOMEM;
9655
9656 vsi->netdev = netdev;
9657 np = netdev_priv(netdev);
9658 np->vsi = vsi;
9659
9660 hw_enc_features = NETIF_F_SG |
9661 NETIF_F_IP_CSUM |
9662 NETIF_F_IPV6_CSUM |
9663 NETIF_F_HIGHDMA |
9664 NETIF_F_SOFT_FEATURES |
9665 NETIF_F_TSO |
9666 NETIF_F_TSO_ECN |
9667 NETIF_F_TSO6 |
9668 NETIF_F_GSO_GRE |
9669 NETIF_F_GSO_GRE_CSUM |
9670 NETIF_F_GSO_PARTIAL |
9671 NETIF_F_GSO_UDP_TUNNEL |
9672 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9673 NETIF_F_SCTP_CRC |
9674 NETIF_F_RXHASH |
9675 NETIF_F_RXCSUM |
9676 0;
9677
9678 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
9679 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9680
9681 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9682
9683 netdev->hw_enc_features |= hw_enc_features;
9684
9685
9686 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
9687
9688 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9689 netdev->hw_features |= NETIF_F_NTUPLE;
9690 hw_features = hw_enc_features |
9691 NETIF_F_HW_VLAN_CTAG_TX |
9692 NETIF_F_HW_VLAN_CTAG_RX;
9693
9694 netdev->hw_features |= hw_features;
9695
9696 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9697 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9698
9699 if (vsi->type == I40E_VSI_MAIN) {
9700 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9701 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9702
9703
9704
9705
9706
9707
9708
9709
9710
9711
9712 i40e_rm_default_mac_filter(vsi, mac_addr);
9713 spin_lock_bh(&vsi->mac_filter_hash_lock);
9714 i40e_add_mac_filter(vsi, mac_addr);
9715 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9716 } else {
9717
9718 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9719 pf->vsi[pf->lan_vsi]->netdev->name);
9720 random_ether_addr(mac_addr);
9721
9722 spin_lock_bh(&vsi->mac_filter_hash_lock);
9723 i40e_add_mac_filter(vsi, mac_addr);
9724 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9725 }
9726
9727
9728
9729
9730
9731
9732
9733
9734
9735
9736
9737
9738
9739
9740 eth_broadcast_addr(broadcast);
9741 spin_lock_bh(&vsi->mac_filter_hash_lock);
9742 i40e_add_mac_filter(vsi, broadcast);
9743 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9744
9745 ether_addr_copy(netdev->dev_addr, mac_addr);
9746 ether_addr_copy(netdev->perm_addr, mac_addr);
9747
9748 netdev->priv_flags |= IFF_UNICAST_FLT;
9749 netdev->priv_flags |= IFF_SUPP_NOFCS;
9750
9751 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9752
9753 netdev->netdev_ops = &i40e_netdev_ops;
9754 netdev->watchdog_timeo = 5 * HZ;
9755 i40e_set_ethtool_ops(netdev);
9756
9757
9758 netdev->min_mtu = ETH_MIN_MTU;
9759 netdev->max_mtu = I40E_MAX_RXBUFFER -
9760 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9761
9762 return 0;
9763}
9764
9765
9766
9767
9768
9769
9770
9771static void i40e_vsi_delete(struct i40e_vsi *vsi)
9772{
9773
9774 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9775 return;
9776
9777 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9778}
9779
9780
9781
9782
9783
9784
9785
9786int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9787{
9788 struct i40e_veb *veb;
9789 struct i40e_pf *pf = vsi->back;
9790
9791
9792 if (vsi->veb_idx == I40E_NO_VEB)
9793 return 1;
9794
9795 veb = pf->veb[vsi->veb_idx];
9796 if (!veb) {
9797 dev_info(&pf->pdev->dev,
9798 "There is no veb associated with the bridge\n");
9799 return -ENOENT;
9800 }
9801
9802
9803 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9804 return 0;
9805 } else {
9806
9807 return 1;
9808 }
9809
9810
9811 return 0;
9812}
9813
9814
9815
9816
9817
9818
9819
9820
9821static int i40e_add_vsi(struct i40e_vsi *vsi)
9822{
9823 int ret = -ENODEV;
9824 struct i40e_pf *pf = vsi->back;
9825 struct i40e_hw *hw = &pf->hw;
9826 struct i40e_vsi_context ctxt;
9827 struct i40e_mac_filter *f;
9828 struct hlist_node *h;
9829 int bkt;
9830
9831 u8 enabled_tc = 0x1;
9832 int f_count = 0;
9833
9834 memset(&ctxt, 0, sizeof(ctxt));
9835 switch (vsi->type) {
9836 case I40E_VSI_MAIN:
9837
9838
9839
9840
9841
9842 ctxt.seid = pf->main_vsi_seid;
9843 ctxt.pf_num = pf->hw.pf_id;
9844 ctxt.vf_num = 0;
9845 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9846 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9847 if (ret) {
9848 dev_info(&pf->pdev->dev,
9849 "couldn't get PF vsi config, err %s aq_err %s\n",
9850 i40e_stat_str(&pf->hw, ret),
9851 i40e_aq_str(&pf->hw,
9852 pf->hw.aq.asq_last_status));
9853 return -ENOENT;
9854 }
9855 vsi->info = ctxt.info;
9856 vsi->info.valid_sections = 0;
9857
9858 vsi->seid = ctxt.seid;
9859 vsi->id = ctxt.vsi_number;
9860
9861 enabled_tc = i40e_pf_get_tc_map(pf);
9862
9863
9864 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9865 !(pf->hw.func_caps.iscsi)) {
9866 memset(&ctxt, 0, sizeof(ctxt));
9867 ctxt.seid = pf->main_vsi_seid;
9868 ctxt.pf_num = pf->hw.pf_id;
9869 ctxt.vf_num = 0;
9870 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9871 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9872 if (ret) {
9873 dev_info(&pf->pdev->dev,
9874 "update vsi failed, err %s aq_err %s\n",
9875 i40e_stat_str(&pf->hw, ret),
9876 i40e_aq_str(&pf->hw,
9877 pf->hw.aq.asq_last_status));
9878 ret = -ENOENT;
9879 goto err;
9880 }
9881
9882 i40e_vsi_update_queue_map(vsi, &ctxt);
9883 vsi->info.valid_sections = 0;
9884 } else {
9885
9886
9887
9888
9889
9890
9891 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9892 if (ret) {
9893 dev_info(&pf->pdev->dev,
9894 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9895 enabled_tc,
9896 i40e_stat_str(&pf->hw, ret),
9897 i40e_aq_str(&pf->hw,
9898 pf->hw.aq.asq_last_status));
9899 ret = -ENOENT;
9900 }
9901 }
9902 break;
9903
9904 case I40E_VSI_FDIR:
9905 ctxt.pf_num = hw->pf_id;
9906 ctxt.vf_num = 0;
9907 ctxt.uplink_seid = vsi->uplink_seid;
9908 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9909 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9910 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9911 (i40e_is_vsi_uplink_mode_veb(vsi))) {
9912 ctxt.info.valid_sections |=
9913 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9914 ctxt.info.switch_id =
9915 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9916 }
9917 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9918 break;
9919
9920 case I40E_VSI_VMDQ2:
9921 ctxt.pf_num = hw->pf_id;
9922 ctxt.vf_num = 0;
9923 ctxt.uplink_seid = vsi->uplink_seid;
9924 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9925 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9926
9927
9928
9929
9930 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9931 ctxt.info.valid_sections |=
9932 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9933 ctxt.info.switch_id =
9934 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9935 }
9936
9937
9938 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9939 break;
9940
9941 case I40E_VSI_SRIOV:
9942 ctxt.pf_num = hw->pf_id;
9943 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9944 ctxt.uplink_seid = vsi->uplink_seid;
9945 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9946 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9947
9948
9949
9950
9951 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9952 ctxt.info.valid_sections |=
9953 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9954 ctxt.info.switch_id =
9955 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9956 }
9957
9958 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9959 ctxt.info.valid_sections |=
9960 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9961 ctxt.info.queueing_opt_flags |=
9962 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9963 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9964 }
9965
9966 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9967 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9968 if (pf->vf[vsi->vf_id].spoofchk) {
9969 ctxt.info.valid_sections |=
9970 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9971 ctxt.info.sec_flags |=
9972 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9973 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9974 }
9975
9976 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9977 break;
9978
9979 case I40E_VSI_IWARP:
9980
9981 break;
9982
9983 default:
9984 return -ENODEV;
9985 }
9986
9987 if (vsi->type != I40E_VSI_MAIN) {
9988 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9989 if (ret) {
9990 dev_info(&vsi->back->pdev->dev,
9991 "add vsi failed, err %s aq_err %s\n",
9992 i40e_stat_str(&pf->hw, ret),
9993 i40e_aq_str(&pf->hw,
9994 pf->hw.aq.asq_last_status));
9995 ret = -ENOENT;
9996 goto err;
9997 }
9998 vsi->info = ctxt.info;
9999 vsi->info.valid_sections = 0;
10000 vsi->seid = ctxt.seid;
10001 vsi->id = ctxt.vsi_number;
10002 }
10003
10004 vsi->active_filters = 0;
10005 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
10006 spin_lock_bh(&vsi->mac_filter_hash_lock);
10007
10008 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
10009 f->state = I40E_FILTER_NEW;
10010 f_count++;
10011 }
10012 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10013
10014 if (f_count) {
10015 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
10016 pf->flags |= I40E_FLAG_FILTER_SYNC;
10017 }
10018
10019
10020 ret = i40e_vsi_get_bw_info(vsi);
10021 if (ret) {
10022 dev_info(&pf->pdev->dev,
10023 "couldn't get vsi bw info, err %s aq_err %s\n",
10024 i40e_stat_str(&pf->hw, ret),
10025 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10026
10027 ret = 0;
10028 }
10029
10030err:
10031 return ret;
10032}
10033
10034
10035
10036
10037
10038
10039
10040int i40e_vsi_release(struct i40e_vsi *vsi)
10041{
10042 struct i40e_mac_filter *f;
10043 struct hlist_node *h;
10044 struct i40e_veb *veb = NULL;
10045 struct i40e_pf *pf;
10046 u16 uplink_seid;
10047 int i, n, bkt;
10048
10049 pf = vsi->back;
10050
10051
10052 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
10053 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
10054 vsi->seid, vsi->uplink_seid);
10055 return -ENODEV;
10056 }
10057 if (vsi == pf->vsi[pf->lan_vsi] &&
10058 !test_bit(__I40E_DOWN, pf->state)) {
10059 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
10060 return -ENODEV;
10061 }
10062
10063 uplink_seid = vsi->uplink_seid;
10064 if (vsi->type != I40E_VSI_SRIOV) {
10065 if (vsi->netdev_registered) {
10066 vsi->netdev_registered = false;
10067 if (vsi->netdev) {
10068
10069 unregister_netdev(vsi->netdev);
10070 }
10071 } else {
10072 i40e_vsi_close(vsi);
10073 }
10074 i40e_vsi_disable_irq(vsi);
10075 }
10076
10077 spin_lock_bh(&vsi->mac_filter_hash_lock);
10078
10079
10080 if (vsi->netdev) {
10081 __dev_uc_unsync(vsi->netdev, NULL);
10082 __dev_mc_unsync(vsi->netdev, NULL);
10083 }
10084
10085
10086 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
10087 __i40e_del_filter(vsi, f);
10088
10089 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10090
10091 i40e_sync_vsi_filters(vsi);
10092
10093 i40e_vsi_delete(vsi);
10094 i40e_vsi_free_q_vectors(vsi);
10095 if (vsi->netdev) {
10096 free_netdev(vsi->netdev);
10097 vsi->netdev = NULL;
10098 }
10099 i40e_vsi_clear_rings(vsi);
10100 i40e_vsi_clear(vsi);
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
10111 if (pf->vsi[i] &&
10112 pf->vsi[i]->uplink_seid == uplink_seid &&
10113 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10114 n++;
10115 }
10116 }
10117 for (i = 0; i < I40E_MAX_VEB; i++) {
10118 if (!pf->veb[i])
10119 continue;
10120 if (pf->veb[i]->uplink_seid == uplink_seid)
10121 n++;
10122 if (pf->veb[i]->seid == uplink_seid)
10123 veb = pf->veb[i];
10124 }
10125 if (n == 0 && veb && veb->uplink_seid != 0)
10126 i40e_veb_release(veb);
10127
10128 return 0;
10129}
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
10142{
10143 int ret = -ENOENT;
10144 struct i40e_pf *pf = vsi->back;
10145
10146 if (vsi->q_vectors[0]) {
10147 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
10148 vsi->seid);
10149 return -EEXIST;
10150 }
10151
10152 if (vsi->base_vector) {
10153 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
10154 vsi->seid, vsi->base_vector);
10155 return -EEXIST;
10156 }
10157
10158 ret = i40e_vsi_alloc_q_vectors(vsi);
10159 if (ret) {
10160 dev_info(&pf->pdev->dev,
10161 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
10162 vsi->num_q_vectors, vsi->seid, ret);
10163 vsi->num_q_vectors = 0;
10164 goto vector_setup_out;
10165 }
10166
10167
10168
10169
10170 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10171 return ret;
10172 if (vsi->num_q_vectors)
10173 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
10174 vsi->num_q_vectors, vsi->idx);
10175 if (vsi->base_vector < 0) {
10176 dev_info(&pf->pdev->dev,
10177 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
10178 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
10179 i40e_vsi_free_q_vectors(vsi);
10180 ret = -ENOENT;
10181 goto vector_setup_out;
10182 }
10183
10184vector_setup_out:
10185 return ret;
10186}
10187
10188
10189
10190
10191
10192
10193
10194
10195
10196
10197static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
10198{
10199 u16 alloc_queue_pairs;
10200 struct i40e_pf *pf;
10201 u8 enabled_tc;
10202 int ret;
10203
10204 if (!vsi)
10205 return NULL;
10206
10207 pf = vsi->back;
10208
10209 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10210 i40e_vsi_clear_rings(vsi);
10211
10212 i40e_vsi_free_arrays(vsi, false);
10213 i40e_set_num_rings_in_vsi(vsi);
10214 ret = i40e_vsi_alloc_arrays(vsi, false);
10215 if (ret)
10216 goto err_vsi;
10217
10218 alloc_queue_pairs = vsi->alloc_queue_pairs *
10219 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10220
10221 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10222 if (ret < 0) {
10223 dev_info(&pf->pdev->dev,
10224 "failed to get tracking for %d queues for VSI %d err %d\n",
10225 alloc_queue_pairs, vsi->seid, ret);
10226 goto err_vsi;
10227 }
10228 vsi->base_queue = ret;
10229
10230
10231
10232
10233 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10234 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10235 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10236 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10237 if (vsi->type == I40E_VSI_MAIN)
10238 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
10239
10240
10241 ret = i40e_alloc_rings(vsi);
10242 if (ret)
10243 goto err_rings;
10244
10245
10246 i40e_vsi_map_rings_to_vectors(vsi);
10247 return vsi;
10248
10249err_rings:
10250 i40e_vsi_free_q_vectors(vsi);
10251 if (vsi->netdev_registered) {
10252 vsi->netdev_registered = false;
10253 unregister_netdev(vsi->netdev);
10254 free_netdev(vsi->netdev);
10255 vsi->netdev = NULL;
10256 }
10257 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10258err_vsi:
10259 i40e_vsi_clear(vsi);
10260 return NULL;
10261}
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
10277 u16 uplink_seid, u32 param1)
10278{
10279 struct i40e_vsi *vsi = NULL;
10280 struct i40e_veb *veb = NULL;
10281 u16 alloc_queue_pairs;
10282 int ret, i;
10283 int v_idx;
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298 for (i = 0; i < I40E_MAX_VEB; i++) {
10299 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
10300 veb = pf->veb[i];
10301 break;
10302 }
10303 }
10304
10305 if (!veb && uplink_seid != pf->mac_seid) {
10306
10307 for (i = 0; i < pf->num_alloc_vsi; i++) {
10308 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
10309 vsi = pf->vsi[i];
10310 break;
10311 }
10312 }
10313 if (!vsi) {
10314 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
10315 uplink_seid);
10316 return NULL;
10317 }
10318
10319 if (vsi->uplink_seid == pf->mac_seid)
10320 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
10321 vsi->tc_config.enabled_tc);
10322 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
10323 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
10324 vsi->tc_config.enabled_tc);
10325 if (veb) {
10326 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
10327 dev_info(&vsi->back->pdev->dev,
10328 "New VSI creation error, uplink seid of LAN VSI expected.\n");
10329 return NULL;
10330 }
10331
10332
10333
10334
10335 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
10336 veb->bridge_mode = BRIDGE_MODE_VEPA;
10337 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
10338 }
10339 i40e_config_bridge_mode(veb);
10340 }
10341 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
10342 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
10343 veb = pf->veb[i];
10344 }
10345 if (!veb) {
10346 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
10347 return NULL;
10348 }
10349
10350 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10351 uplink_seid = veb->seid;
10352 }
10353
10354
10355 v_idx = i40e_vsi_mem_alloc(pf, type);
10356 if (v_idx < 0)
10357 goto err_alloc;
10358 vsi = pf->vsi[v_idx];
10359 if (!vsi)
10360 goto err_alloc;
10361 vsi->type = type;
10362 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
10363
10364 if (type == I40E_VSI_MAIN)
10365 pf->lan_vsi = v_idx;
10366 else if (type == I40E_VSI_SRIOV)
10367 vsi->vf_id = param1;
10368
10369 alloc_queue_pairs = vsi->alloc_queue_pairs *
10370 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10371
10372 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10373 if (ret < 0) {
10374 dev_info(&pf->pdev->dev,
10375 "failed to get tracking for %d queues for VSI %d err=%d\n",
10376 alloc_queue_pairs, vsi->seid, ret);
10377 goto err_vsi;
10378 }
10379 vsi->base_queue = ret;
10380
10381
10382 vsi->uplink_seid = uplink_seid;
10383 ret = i40e_add_vsi(vsi);
10384 if (ret)
10385 goto err_vsi;
10386
10387 switch (vsi->type) {
10388
10389 case I40E_VSI_MAIN:
10390
10391
10392
10393 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
10394 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
10395 if (ret) {
10396 dev_warn(&pf->pdev->dev,
10397 "could not set up macaddr; err %d\n",
10398 ret);
10399 }
10400 }
10401 case I40E_VSI_VMDQ2:
10402 ret = i40e_config_netdev(vsi);
10403 if (ret)
10404 goto err_netdev;
10405 ret = register_netdev(vsi->netdev);
10406 if (ret)
10407 goto err_netdev;
10408 vsi->netdev_registered = true;
10409 netif_carrier_off(vsi->netdev);
10410#ifdef CONFIG_I40E_DCB
10411
10412 i40e_dcbnl_setup(vsi);
10413#endif
10414
10415
10416 case I40E_VSI_FDIR:
10417
10418 ret = i40e_vsi_setup_vectors(vsi);
10419 if (ret)
10420 goto err_msix;
10421
10422 ret = i40e_alloc_rings(vsi);
10423 if (ret)
10424 goto err_rings;
10425
10426
10427 i40e_vsi_map_rings_to_vectors(vsi);
10428
10429 i40e_vsi_reset_stats(vsi);
10430 break;
10431
10432 default:
10433
10434 break;
10435 }
10436
10437 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
10438 (vsi->type == I40E_VSI_VMDQ2)) {
10439 ret = i40e_vsi_config_rss(vsi);
10440 }
10441 return vsi;
10442
10443err_rings:
10444 i40e_vsi_free_q_vectors(vsi);
10445err_msix:
10446 if (vsi->netdev_registered) {
10447 vsi->netdev_registered = false;
10448 unregister_netdev(vsi->netdev);
10449 free_netdev(vsi->netdev);
10450 vsi->netdev = NULL;
10451 }
10452err_netdev:
10453 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10454err_vsi:
10455 i40e_vsi_clear(vsi);
10456err_alloc:
10457 return NULL;
10458}
10459
10460
10461
10462
10463
10464
10465
10466static int i40e_veb_get_bw_info(struct i40e_veb *veb)
10467{
10468 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
10469 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
10470 struct i40e_pf *pf = veb->pf;
10471 struct i40e_hw *hw = &pf->hw;
10472 u32 tc_bw_max;
10473 int ret = 0;
10474 int i;
10475
10476 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10477 &bw_data, NULL);
10478 if (ret) {
10479 dev_info(&pf->pdev->dev,
10480 "query veb bw config failed, err %s aq_err %s\n",
10481 i40e_stat_str(&pf->hw, ret),
10482 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10483 goto out;
10484 }
10485
10486 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10487 &ets_data, NULL);
10488 if (ret) {
10489 dev_info(&pf->pdev->dev,
10490 "query veb bw ets config failed, err %s aq_err %s\n",
10491 i40e_stat_str(&pf->hw, ret),
10492 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10493 goto out;
10494 }
10495
10496 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
10497 veb->bw_max_quanta = ets_data.tc_bw_max;
10498 veb->is_abs_credits = bw_data.absolute_credits_enable;
10499 veb->enabled_tc = ets_data.tc_valid_bits;
10500 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
10501 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
10502 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10503 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
10504 veb->bw_tc_limit_credits[i] =
10505 le16_to_cpu(bw_data.tc_bw_limits[i]);
10506 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
10507 }
10508
10509out:
10510 return ret;
10511}
10512
10513
10514
10515
10516
10517
10518
10519
10520static int i40e_veb_mem_alloc(struct i40e_pf *pf)
10521{
10522 int ret = -ENOENT;
10523 struct i40e_veb *veb;
10524 int i;
10525
10526
10527 mutex_lock(&pf->switch_mutex);
10528
10529
10530
10531
10532
10533
10534
10535 i = 0;
10536 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10537 i++;
10538 if (i >= I40E_MAX_VEB) {
10539 ret = -ENOMEM;
10540 goto err_alloc_veb;
10541 }
10542
10543 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10544 if (!veb) {
10545 ret = -ENOMEM;
10546 goto err_alloc_veb;
10547 }
10548 veb->pf = pf;
10549 veb->idx = i;
10550 veb->enabled_tc = 1;
10551
10552 pf->veb[i] = veb;
10553 ret = i;
10554err_alloc_veb:
10555 mutex_unlock(&pf->switch_mutex);
10556 return ret;
10557}
10558
10559
10560
10561
10562
10563
10564
10565
10566static void i40e_switch_branch_release(struct i40e_veb *branch)
10567{
10568 struct i40e_pf *pf = branch->pf;
10569 u16 branch_seid = branch->seid;
10570 u16 veb_idx = branch->idx;
10571 int i;
10572
10573
10574 for (i = 0; i < I40E_MAX_VEB; i++) {
10575 if (!pf->veb[i])
10576 continue;
10577 if (pf->veb[i]->uplink_seid == branch->seid)
10578 i40e_switch_branch_release(pf->veb[i]);
10579 }
10580
10581
10582
10583
10584
10585
10586 for (i = 0; i < pf->num_alloc_vsi; i++) {
10587 if (!pf->vsi[i])
10588 continue;
10589 if (pf->vsi[i]->uplink_seid == branch_seid &&
10590 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10591 i40e_vsi_release(pf->vsi[i]);
10592 }
10593 }
10594
10595
10596
10597
10598
10599
10600 if (pf->veb[veb_idx])
10601 i40e_veb_release(pf->veb[veb_idx]);
10602}
10603
10604
10605
10606
10607
10608static void i40e_veb_clear(struct i40e_veb *veb)
10609{
10610 if (!veb)
10611 return;
10612
10613 if (veb->pf) {
10614 struct i40e_pf *pf = veb->pf;
10615
10616 mutex_lock(&pf->switch_mutex);
10617 if (pf->veb[veb->idx] == veb)
10618 pf->veb[veb->idx] = NULL;
10619 mutex_unlock(&pf->switch_mutex);
10620 }
10621
10622 kfree(veb);
10623}
10624
10625
10626
10627
10628
10629void i40e_veb_release(struct i40e_veb *veb)
10630{
10631 struct i40e_vsi *vsi = NULL;
10632 struct i40e_pf *pf;
10633 int i, n = 0;
10634
10635 pf = veb->pf;
10636
10637
10638 for (i = 0; i < pf->num_alloc_vsi; i++) {
10639 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10640 n++;
10641 vsi = pf->vsi[i];
10642 }
10643 }
10644 if (n != 1) {
10645 dev_info(&pf->pdev->dev,
10646 "can't remove VEB %d with %d VSIs left\n",
10647 veb->seid, n);
10648 return;
10649 }
10650
10651
10652 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10653 if (veb->uplink_seid) {
10654 vsi->uplink_seid = veb->uplink_seid;
10655 if (veb->uplink_seid == pf->mac_seid)
10656 vsi->veb_idx = I40E_NO_VEB;
10657 else
10658 vsi->veb_idx = veb->veb_idx;
10659 } else {
10660
10661 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10662 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10663 }
10664
10665 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10666 i40e_veb_clear(veb);
10667}
10668
10669
10670
10671
10672
10673
10674static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10675{
10676 struct i40e_pf *pf = veb->pf;
10677 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10678 int ret;
10679
10680 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10681 veb->enabled_tc, false,
10682 &veb->seid, enable_stats, NULL);
10683
10684
10685 if (ret) {
10686 dev_info(&pf->pdev->dev,
10687 "couldn't add VEB, err %s aq_err %s\n",
10688 i40e_stat_str(&pf->hw, ret),
10689 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10690 return -EPERM;
10691 }
10692
10693
10694 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10695 &veb->stats_idx, NULL, NULL, NULL);
10696 if (ret) {
10697 dev_info(&pf->pdev->dev,
10698 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10699 i40e_stat_str(&pf->hw, ret),
10700 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10701 return -EPERM;
10702 }
10703 ret = i40e_veb_get_bw_info(veb);
10704 if (ret) {
10705 dev_info(&pf->pdev->dev,
10706 "couldn't get VEB bw info, err %s aq_err %s\n",
10707 i40e_stat_str(&pf->hw, ret),
10708 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10709 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10710 return -ENOENT;
10711 }
10712
10713 vsi->uplink_seid = veb->seid;
10714 vsi->veb_idx = veb->idx;
10715 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10716
10717 return 0;
10718}
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10737 u16 uplink_seid, u16 vsi_seid,
10738 u8 enabled_tc)
10739{
10740 struct i40e_veb *veb, *uplink_veb = NULL;
10741 int vsi_idx, veb_idx;
10742 int ret;
10743
10744
10745 if ((uplink_seid == 0 || vsi_seid == 0) &&
10746 (uplink_seid + vsi_seid != 0)) {
10747 dev_info(&pf->pdev->dev,
10748 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10749 uplink_seid, vsi_seid);
10750 return NULL;
10751 }
10752
10753
10754 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10755 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10756 break;
10757 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10758 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10759 vsi_seid);
10760 return NULL;
10761 }
10762
10763 if (uplink_seid && uplink_seid != pf->mac_seid) {
10764 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10765 if (pf->veb[veb_idx] &&
10766 pf->veb[veb_idx]->seid == uplink_seid) {
10767 uplink_veb = pf->veb[veb_idx];
10768 break;
10769 }
10770 }
10771 if (!uplink_veb) {
10772 dev_info(&pf->pdev->dev,
10773 "uplink seid %d not found\n", uplink_seid);
10774 return NULL;
10775 }
10776 }
10777
10778
10779 veb_idx = i40e_veb_mem_alloc(pf);
10780 if (veb_idx < 0)
10781 goto err_alloc;
10782 veb = pf->veb[veb_idx];
10783 veb->flags = flags;
10784 veb->uplink_seid = uplink_seid;
10785 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10786 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10787
10788
10789 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10790 if (ret)
10791 goto err_veb;
10792 if (vsi_idx == pf->lan_vsi)
10793 pf->lan_veb = veb->idx;
10794
10795 return veb;
10796
10797err_veb:
10798 i40e_veb_clear(veb);
10799err_alloc:
10800 return NULL;
10801}
10802
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10813 struct i40e_aqc_switch_config_element_resp *ele,
10814 u16 num_reported, bool printconfig)
10815{
10816 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10817 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10818 u8 element_type = ele->element_type;
10819 u16 seid = le16_to_cpu(ele->seid);
10820
10821 if (printconfig)
10822 dev_info(&pf->pdev->dev,
10823 "type=%d seid=%d uplink=%d downlink=%d\n",
10824 element_type, seid, uplink_seid, downlink_seid);
10825
10826 switch (element_type) {
10827 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10828 pf->mac_seid = seid;
10829 break;
10830 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10831
10832 if (uplink_seid != pf->mac_seid)
10833 break;
10834 if (pf->lan_veb == I40E_NO_VEB) {
10835 int v;
10836
10837
10838 for (v = 0; v < I40E_MAX_VEB; v++) {
10839 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10840 pf->lan_veb = v;
10841 break;
10842 }
10843 }
10844 if (pf->lan_veb == I40E_NO_VEB) {
10845 v = i40e_veb_mem_alloc(pf);
10846 if (v < 0)
10847 break;
10848 pf->lan_veb = v;
10849 }
10850 }
10851
10852 pf->veb[pf->lan_veb]->seid = seid;
10853 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10854 pf->veb[pf->lan_veb]->pf = pf;
10855 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10856 break;
10857 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10858 if (num_reported != 1)
10859 break;
10860
10861
10862
10863 pf->mac_seid = uplink_seid;
10864 pf->pf_seid = downlink_seid;
10865 pf->main_vsi_seid = seid;
10866 if (printconfig)
10867 dev_info(&pf->pdev->dev,
10868 "pf_seid=%d main_vsi_seid=%d\n",
10869 pf->pf_seid, pf->main_vsi_seid);
10870 break;
10871 case I40E_SWITCH_ELEMENT_TYPE_PF:
10872 case I40E_SWITCH_ELEMENT_TYPE_VF:
10873 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10874 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10875 case I40E_SWITCH_ELEMENT_TYPE_PE:
10876 case I40E_SWITCH_ELEMENT_TYPE_PA:
10877
10878 break;
10879 default:
10880 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10881 element_type, seid);
10882 break;
10883 }
10884}
10885
10886
10887
10888
10889
10890
10891
10892
10893
10894int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10895{
10896 struct i40e_aqc_get_switch_config_resp *sw_config;
10897 u16 next_seid = 0;
10898 int ret = 0;
10899 u8 *aq_buf;
10900 int i;
10901
10902 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10903 if (!aq_buf)
10904 return -ENOMEM;
10905
10906 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10907 do {
10908 u16 num_reported, num_total;
10909
10910 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10911 I40E_AQ_LARGE_BUF,
10912 &next_seid, NULL);
10913 if (ret) {
10914 dev_info(&pf->pdev->dev,
10915 "get switch config failed err %s aq_err %s\n",
10916 i40e_stat_str(&pf->hw, ret),
10917 i40e_aq_str(&pf->hw,
10918 pf->hw.aq.asq_last_status));
10919 kfree(aq_buf);
10920 return -ENOENT;
10921 }
10922
10923 num_reported = le16_to_cpu(sw_config->header.num_reported);
10924 num_total = le16_to_cpu(sw_config->header.num_total);
10925
10926 if (printconfig)
10927 dev_info(&pf->pdev->dev,
10928 "header: %d reported %d total\n",
10929 num_reported, num_total);
10930
10931 for (i = 0; i < num_reported; i++) {
10932 struct i40e_aqc_switch_config_element_resp *ele =
10933 &sw_config->element[i];
10934
10935 i40e_setup_pf_switch_element(pf, ele, num_reported,
10936 printconfig);
10937 }
10938 } while (next_seid != 0);
10939
10940 kfree(aq_buf);
10941 return ret;
10942}
10943
10944
10945
10946
10947
10948
10949
10950
10951static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10952{
10953 u16 flags = 0;
10954 int ret;
10955
10956
10957 ret = i40e_fetch_switch_configuration(pf, false);
10958 if (ret) {
10959 dev_info(&pf->pdev->dev,
10960 "couldn't fetch switch config, err %s aq_err %s\n",
10961 i40e_stat_str(&pf->hw, ret),
10962 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10963 return ret;
10964 }
10965 i40e_pf_reset_stats(pf);
10966
10967
10968
10969
10970
10971
10972
10973 if ((pf->hw.pf_id == 0) &&
10974 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10975 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10976
10977 if (pf->hw.pf_id == 0) {
10978 u16 valid_flags;
10979
10980 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10981 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10982 NULL);
10983 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10984 dev_info(&pf->pdev->dev,
10985 "couldn't set switch config bits, err %s aq_err %s\n",
10986 i40e_stat_str(&pf->hw, ret),
10987 i40e_aq_str(&pf->hw,
10988 pf->hw.aq.asq_last_status));
10989
10990 }
10991 }
10992
10993
10994 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10995 struct i40e_vsi *vsi = NULL;
10996 u16 uplink_seid;
10997
10998
10999
11000
11001 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
11002 uplink_seid = pf->veb[pf->lan_veb]->seid;
11003 else
11004 uplink_seid = pf->mac_seid;
11005 if (pf->lan_vsi == I40E_NO_VSI)
11006 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
11007 else if (reinit)
11008 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
11009 if (!vsi) {
11010 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
11011 i40e_fdir_teardown(pf);
11012 return -EAGAIN;
11013 }
11014 } else {
11015
11016 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
11017
11018 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
11019 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
11020 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
11021 }
11022 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
11023
11024 i40e_fdir_sb_setup(pf);
11025
11026
11027 ret = i40e_setup_pf_filter_control(pf);
11028 if (ret) {
11029 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
11030 ret);
11031
11032 }
11033
11034
11035
11036
11037 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
11038 i40e_pf_config_rss(pf);
11039
11040
11041 i40e_link_event(pf);
11042
11043
11044 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
11045 I40E_AQ_AN_COMPLETED) ? true : false);
11046
11047 i40e_ptp_init(pf);
11048
11049
11050 i40e_sync_udp_filters(pf);
11051
11052 return ret;
11053}
11054
11055
11056
11057
11058
11059static void i40e_determine_queue_usage(struct i40e_pf *pf)
11060{
11061 int queues_left;
11062
11063 pf->num_lan_qps = 0;
11064
11065
11066
11067
11068
11069 queues_left = pf->hw.func_caps.num_tx_qp;
11070
11071 if ((queues_left == 1) ||
11072 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
11073
11074 queues_left = 0;
11075 pf->alloc_rss_size = pf->num_lan_qps = 1;
11076
11077
11078 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11079 I40E_FLAG_IWARP_ENABLED |
11080 I40E_FLAG_FD_SB_ENABLED |
11081 I40E_FLAG_FD_ATR_ENABLED |
11082 I40E_FLAG_DCB_CAPABLE |
11083 I40E_FLAG_DCB_ENABLED |
11084 I40E_FLAG_SRIOV_ENABLED |
11085 I40E_FLAG_VMDQ_ENABLED);
11086 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
11087 I40E_FLAG_FD_SB_ENABLED |
11088 I40E_FLAG_FD_ATR_ENABLED |
11089 I40E_FLAG_DCB_CAPABLE))) {
11090
11091 pf->alloc_rss_size = pf->num_lan_qps = 1;
11092 queues_left -= pf->num_lan_qps;
11093
11094 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11095 I40E_FLAG_IWARP_ENABLED |
11096 I40E_FLAG_FD_SB_ENABLED |
11097 I40E_FLAG_FD_ATR_ENABLED |
11098 I40E_FLAG_DCB_ENABLED |
11099 I40E_FLAG_VMDQ_ENABLED);
11100 } else {
11101
11102 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
11103 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
11104 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
11105 I40E_FLAG_DCB_ENABLED);
11106 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
11107 }
11108 pf->num_lan_qps = max_t(int, pf->rss_size_max,
11109 num_online_cpus());
11110 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
11111 pf->hw.func_caps.num_tx_qp);
11112
11113 queues_left -= pf->num_lan_qps;
11114 }
11115
11116 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11117 if (queues_left > 1) {
11118 queues_left -= 1;
11119 } else {
11120 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11121 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
11122 }
11123 }
11124
11125 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11126 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
11127 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
11128 (queues_left / pf->num_vf_qps));
11129 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
11130 }
11131
11132 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11133 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
11134 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
11135 (queues_left / pf->num_vmdq_qps));
11136 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
11137 }
11138
11139 pf->queues_left = queues_left;
11140 dev_dbg(&pf->pdev->dev,
11141 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
11142 pf->hw.func_caps.num_tx_qp,
11143 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
11144 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
11145 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
11146 queues_left);
11147}
11148
11149
11150
11151
11152
11153
11154
11155
11156
11157
11158
11159
11160static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
11161{
11162 struct i40e_filter_control_settings *settings = &pf->filter_settings;
11163
11164 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
11165
11166
11167 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
11168 settings->enable_fdir = true;
11169
11170
11171 settings->enable_ethtype = true;
11172 settings->enable_macvlan = true;
11173
11174 if (i40e_set_filter_control(&pf->hw, settings))
11175 return -ENOENT;
11176
11177 return 0;
11178}
11179
11180#define INFO_STRING_LEN 255
11181#define REMAIN(__x) (INFO_STRING_LEN - (__x))
11182static void i40e_print_features(struct i40e_pf *pf)
11183{
11184 struct i40e_hw *hw = &pf->hw;
11185 char *buf;
11186 int i;
11187
11188 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
11189 if (!buf)
11190 return;
11191
11192 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
11193#ifdef CONFIG_PCI_IOV
11194 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
11195#endif
11196 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
11197 pf->hw.func_caps.num_vsis,
11198 pf->vsi[pf->lan_vsi]->num_queue_pairs);
11199 if (pf->flags & I40E_FLAG_RSS_ENABLED)
11200 i += snprintf(&buf[i], REMAIN(i), " RSS");
11201 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
11202 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
11203 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11204 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
11205 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
11206 }
11207 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
11208 i += snprintf(&buf[i], REMAIN(i), " DCB");
11209 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
11210 i += snprintf(&buf[i], REMAIN(i), " Geneve");
11211 if (pf->flags & I40E_FLAG_PTP)
11212 i += snprintf(&buf[i], REMAIN(i), " PTP");
11213 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
11214 i += snprintf(&buf[i], REMAIN(i), " VEB");
11215 else
11216 i += snprintf(&buf[i], REMAIN(i), " VEPA");
11217
11218 dev_info(&pf->pdev->dev, "%s\n", buf);
11219 kfree(buf);
11220 WARN_ON(i > INFO_STRING_LEN);
11221}
11222
11223
11224
11225
11226
11227
11228
11229
11230
11231
11232
11233static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
11234{
11235 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
11236 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
11237}
11238
11239
11240
11241
11242
11243
11244
11245
11246
11247
11248
11249
11250static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11251{
11252 struct i40e_aq_get_phy_abilities_resp abilities;
11253 struct i40e_pf *pf;
11254 struct i40e_hw *hw;
11255 static u16 pfs_found;
11256 u16 wol_nvm_bits;
11257 u16 link_status;
11258 int err;
11259 u32 val;
11260 u32 i;
11261 u8 set_fc_aq_fail;
11262
11263 err = pci_enable_device_mem(pdev);
11264 if (err)
11265 return err;
11266
11267
11268 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11269 if (err) {
11270 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11271 if (err) {
11272 dev_err(&pdev->dev,
11273 "DMA configuration failed: 0x%x\n", err);
11274 goto err_dma;
11275 }
11276 }
11277
11278
11279 err = pci_request_mem_regions(pdev, i40e_driver_name);
11280 if (err) {
11281 dev_info(&pdev->dev,
11282 "pci_request_selected_regions failed %d\n", err);
11283 goto err_pci_reg;
11284 }
11285
11286 pci_enable_pcie_error_reporting(pdev);
11287 pci_set_master(pdev);
11288
11289
11290
11291
11292
11293
11294 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
11295 if (!pf) {
11296 err = -ENOMEM;
11297 goto err_pf_alloc;
11298 }
11299 pf->next_vsi = 0;
11300 pf->pdev = pdev;
11301 set_bit(__I40E_DOWN, pf->state);
11302
11303 hw = &pf->hw;
11304 hw->back = pf;
11305
11306 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
11307 I40E_MAX_CSR_SPACE);
11308
11309 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
11310 if (!hw->hw_addr) {
11311 err = -EIO;
11312 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
11313 (unsigned int)pci_resource_start(pdev, 0),
11314 pf->ioremap_len, err);
11315 goto err_ioremap;
11316 }
11317 hw->vendor_id = pdev->vendor;
11318 hw->device_id = pdev->device;
11319 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
11320 hw->subsystem_vendor_id = pdev->subsystem_vendor;
11321 hw->subsystem_device_id = pdev->subsystem_device;
11322 hw->bus.device = PCI_SLOT(pdev->devfn);
11323 hw->bus.func = PCI_FUNC(pdev->devfn);
11324 hw->bus.bus_id = pdev->bus->number;
11325 pf->instance = pfs_found;
11326
11327 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
11328 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
11329
11330
11331
11332
11333 mutex_init(&hw->aq.asq_mutex);
11334 mutex_init(&hw->aq.arq_mutex);
11335
11336 pf->msg_enable = netif_msg_init(debug,
11337 NETIF_MSG_DRV |
11338 NETIF_MSG_PROBE |
11339 NETIF_MSG_LINK);
11340 if (debug < -1)
11341 pf->hw.debug_mask = debug;
11342
11343
11344 if (hw->revision_id == 0 &&
11345 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
11346 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
11347 i40e_flush(hw);
11348 msleep(200);
11349 pf->corer_count++;
11350
11351 i40e_clear_pxe_mode(hw);
11352 }
11353
11354
11355 i40e_clear_hw(hw);
11356 err = i40e_pf_reset(hw);
11357 if (err) {
11358 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
11359 goto err_pf_reset;
11360 }
11361 pf->pfr_count++;
11362
11363 hw->aq.num_arq_entries = I40E_AQ_LEN;
11364 hw->aq.num_asq_entries = I40E_AQ_LEN;
11365 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11366 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11367 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
11368
11369 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
11370 "%s-%s:misc",
11371 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
11372
11373 err = i40e_init_shared_code(hw);
11374 if (err) {
11375 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
11376 err);
11377 goto err_pf_reset;
11378 }
11379
11380
11381 pf->hw.fc.requested_mode = I40E_FC_NONE;
11382
11383 err = i40e_init_adminq(hw);
11384 if (err) {
11385 if (err == I40E_ERR_FIRMWARE_API_VERSION)
11386 dev_info(&pdev->dev,
11387 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
11388 else
11389 dev_info(&pdev->dev,
11390 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
11391
11392 goto err_pf_reset;
11393 }
11394 i40e_get_oem_version(hw);
11395
11396
11397 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
11398 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
11399 hw->aq.api_maj_ver, hw->aq.api_min_ver,
11400 i40e_nvm_version_str(hw));
11401
11402 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
11403 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
11404 dev_info(&pdev->dev,
11405 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
11406 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
11407 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
11408 dev_info(&pdev->dev,
11409 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
11410
11411 i40e_verify_eeprom(pf);
11412
11413
11414 if (hw->revision_id < 1)
11415 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
11416
11417 i40e_clear_pxe_mode(hw);
11418 err = i40e_get_capabilities(pf);
11419 if (err)
11420 goto err_adminq_setup;
11421
11422 err = i40e_sw_init(pf);
11423 if (err) {
11424 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
11425 goto err_sw_init;
11426 }
11427
11428 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
11429 hw->func_caps.num_rx_qp, 0, 0);
11430 if (err) {
11431 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
11432 goto err_init_lan_hmc;
11433 }
11434
11435 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
11436 if (err) {
11437 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
11438 err = -ENOENT;
11439 goto err_configure_lan_hmc;
11440 }
11441
11442
11443
11444
11445
11446 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
11447 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
11448 i40e_aq_stop_lldp(hw, true, NULL);
11449 }
11450
11451
11452 i40e_get_platform_mac_addr(pdev, pf);
11453
11454 if (!is_valid_ether_addr(hw->mac.addr)) {
11455 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
11456 err = -EIO;
11457 goto err_mac_addr;
11458 }
11459 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
11460 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
11461 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
11462 if (is_valid_ether_addr(hw->mac.port_addr))
11463 pf->flags |= I40E_FLAG_PORT_ID_VALID;
11464
11465 pci_set_drvdata(pdev, pf);
11466 pci_save_state(pdev);
11467#ifdef CONFIG_I40E_DCB
11468 err = i40e_init_pf_dcb(pf);
11469 if (err) {
11470 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
11471 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
11472
11473 }
11474#endif
11475
11476
11477 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
11478 pf->service_timer_period = HZ;
11479
11480 INIT_WORK(&pf->service_task, i40e_service_task);
11481 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11482
11483
11484 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
11485 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
11486 pf->wol_en = false;
11487 else
11488 pf->wol_en = true;
11489 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
11490
11491
11492 i40e_determine_queue_usage(pf);
11493 err = i40e_init_interrupt_scheme(pf);
11494 if (err)
11495 goto err_switch_setup;
11496
11497
11498
11499
11500
11501
11502 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11503 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11504 else
11505 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11506
11507
11508 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11509 GFP_KERNEL);
11510 if (!pf->vsi) {
11511 err = -ENOMEM;
11512 goto err_switch_setup;
11513 }
11514
11515#ifdef CONFIG_PCI_IOV
11516
11517 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11518 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11519 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11520 if (pci_num_vf(pdev))
11521 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11522 }
11523#endif
11524 err = i40e_setup_pf_switch(pf, false);
11525 if (err) {
11526 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11527 goto err_vsis;
11528 }
11529
11530
11531 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11532 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11533 dev_dbg(&pf->pdev->dev,
11534 "Set fc with err %s aq_err %s on get_phy_cap\n",
11535 i40e_stat_str(hw, err),
11536 i40e_aq_str(hw, hw->aq.asq_last_status));
11537 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11538 dev_dbg(&pf->pdev->dev,
11539 "Set fc with err %s aq_err %s on set_phy_config\n",
11540 i40e_stat_str(hw, err),
11541 i40e_aq_str(hw, hw->aq.asq_last_status));
11542 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11543 dev_dbg(&pf->pdev->dev,
11544 "Set fc with err %s aq_err %s on get_link_info\n",
11545 i40e_stat_str(hw, err),
11546 i40e_aq_str(hw, hw->aq.asq_last_status));
11547
11548
11549 for (i = 0; i < pf->num_alloc_vsi; i++) {
11550 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11551 i40e_vsi_open(pf->vsi[i]);
11552 break;
11553 }
11554 }
11555
11556
11557
11558
11559 err = i40e_aq_set_phy_int_mask(&pf->hw,
11560 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11561 I40E_AQ_EVENT_MEDIA_NA |
11562 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11563 if (err)
11564 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11565 i40e_stat_str(&pf->hw, err),
11566 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11567
11568
11569
11570
11571
11572 val = rd32(hw, I40E_REG_MSS);
11573 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11574 val &= ~I40E_REG_MSS_MIN_MASK;
11575 val |= I40E_64BYTE_MSS;
11576 wr32(hw, I40E_REG_MSS, val);
11577 }
11578
11579 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
11580 msleep(75);
11581 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11582 if (err)
11583 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11584 i40e_stat_str(&pf->hw, err),
11585 i40e_aq_str(&pf->hw,
11586 pf->hw.aq.asq_last_status));
11587 }
11588
11589
11590
11591
11592 clear_bit(__I40E_DOWN, pf->state);
11593
11594
11595
11596
11597
11598
11599 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11600 err = i40e_setup_misc_vector(pf);
11601 if (err) {
11602 dev_info(&pdev->dev,
11603 "setup of misc vector failed: %d\n", err);
11604 goto err_vsis;
11605 }
11606 }
11607
11608#ifdef CONFIG_PCI_IOV
11609
11610 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11611 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11612 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11613
11614 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11615 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11616 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11617 i40e_flush(hw);
11618
11619 if (pci_num_vf(pdev)) {
11620 dev_info(&pdev->dev,
11621 "Active VFs found, allocating resources.\n");
11622 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11623 if (err)
11624 dev_info(&pdev->dev,
11625 "Error %d allocating resources for existing VFs\n",
11626 err);
11627 }
11628 }
11629#endif
11630
11631 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11632 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11633 pf->num_iwarp_msix,
11634 I40E_IWARP_IRQ_PILE_ID);
11635 if (pf->iwarp_base_vector < 0) {
11636 dev_info(&pdev->dev,
11637 "failed to get tracking for %d vectors for IWARP err=%d\n",
11638 pf->num_iwarp_msix, pf->iwarp_base_vector);
11639 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11640 }
11641 }
11642
11643 i40e_dbg_pf_init(pf);
11644
11645
11646 i40e_send_version(pf);
11647
11648
11649 mod_timer(&pf->service_timer,
11650 round_jiffies(jiffies + pf->service_timer_period));
11651
11652
11653 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11654 err = i40e_lan_add_device(pf);
11655 if (err)
11656 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11657 err);
11658 }
11659
11660#define PCI_SPEED_SIZE 8
11661#define PCI_WIDTH_SIZE 8
11662
11663
11664
11665
11666 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11667 char speed[PCI_SPEED_SIZE] = "Unknown";
11668 char width[PCI_WIDTH_SIZE] = "Unknown";
11669
11670
11671
11672
11673 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11674 &link_status);
11675
11676 i40e_set_pci_config_data(hw, link_status);
11677
11678 switch (hw->bus.speed) {
11679 case i40e_bus_speed_8000:
11680 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11681 case i40e_bus_speed_5000:
11682 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11683 case i40e_bus_speed_2500:
11684 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11685 default:
11686 break;
11687 }
11688 switch (hw->bus.width) {
11689 case i40e_bus_width_pcie_x8:
11690 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11691 case i40e_bus_width_pcie_x4:
11692 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11693 case i40e_bus_width_pcie_x2:
11694 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11695 case i40e_bus_width_pcie_x1:
11696 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11697 default:
11698 break;
11699 }
11700
11701 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11702 speed, width);
11703
11704 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11705 hw->bus.speed < i40e_bus_speed_8000) {
11706 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11707 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11708 }
11709 }
11710
11711
11712 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11713 if (err)
11714 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11715 i40e_stat_str(&pf->hw, err),
11716 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11717 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11718
11719
11720 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11721 if (err)
11722 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11723 i40e_stat_str(&pf->hw, err),
11724 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11725
11726
11727
11728
11729
11730
11731
11732 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11733 pf->main_vsi_seid);
11734
11735 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11736 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11737 pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS;
11738 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
11739 pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER;
11740
11741 i40e_print_features(pf);
11742
11743 return 0;
11744
11745
11746err_vsis:
11747 set_bit(__I40E_DOWN, pf->state);
11748 i40e_clear_interrupt_scheme(pf);
11749 kfree(pf->vsi);
11750err_switch_setup:
11751 i40e_reset_interrupt_capability(pf);
11752 del_timer_sync(&pf->service_timer);
11753err_mac_addr:
11754err_configure_lan_hmc:
11755 (void)i40e_shutdown_lan_hmc(hw);
11756err_init_lan_hmc:
11757 kfree(pf->qp_pile);
11758err_sw_init:
11759err_adminq_setup:
11760err_pf_reset:
11761 iounmap(hw->hw_addr);
11762err_ioremap:
11763 kfree(pf);
11764err_pf_alloc:
11765 pci_disable_pcie_error_reporting(pdev);
11766 pci_release_mem_regions(pdev);
11767err_pci_reg:
11768err_dma:
11769 pci_disable_device(pdev);
11770 return err;
11771}
11772
11773
11774
11775
11776
11777
11778
11779
11780
11781
11782static void i40e_remove(struct pci_dev *pdev)
11783{
11784 struct i40e_pf *pf = pci_get_drvdata(pdev);
11785 struct i40e_hw *hw = &pf->hw;
11786 i40e_status ret_code;
11787 int i;
11788
11789 i40e_dbg_pf_exit(pf);
11790
11791 i40e_ptp_stop(pf);
11792
11793
11794 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11795 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11796
11797
11798 set_bit(__I40E_SUSPENDED, pf->state);
11799 set_bit(__I40E_DOWN, pf->state);
11800 if (pf->service_timer.data)
11801 del_timer_sync(&pf->service_timer);
11802 if (pf->service_task.func)
11803 cancel_work_sync(&pf->service_task);
11804
11805
11806
11807
11808 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
11809
11810 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11811 i40e_free_vfs(pf);
11812 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11813 }
11814
11815 i40e_fdir_teardown(pf);
11816
11817
11818
11819
11820 for (i = 0; i < I40E_MAX_VEB; i++) {
11821 if (!pf->veb[i])
11822 continue;
11823
11824 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11825 pf->veb[i]->uplink_seid == 0)
11826 i40e_switch_branch_release(pf->veb[i]);
11827 }
11828
11829
11830
11831
11832 if (pf->vsi[pf->lan_vsi])
11833 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11834
11835
11836 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11837 ret_code = i40e_lan_del_device(pf);
11838 if (ret_code)
11839 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11840 ret_code);
11841 }
11842
11843
11844 if (hw->hmc.hmc_obj) {
11845 ret_code = i40e_shutdown_lan_hmc(hw);
11846 if (ret_code)
11847 dev_warn(&pdev->dev,
11848 "Failed to destroy the HMC resources: %d\n",
11849 ret_code);
11850 }
11851
11852
11853 i40e_shutdown_adminq(hw);
11854
11855
11856 mutex_destroy(&hw->aq.arq_mutex);
11857 mutex_destroy(&hw->aq.asq_mutex);
11858
11859
11860 i40e_clear_interrupt_scheme(pf);
11861 for (i = 0; i < pf->num_alloc_vsi; i++) {
11862 if (pf->vsi[i]) {
11863 i40e_vsi_clear_rings(pf->vsi[i]);
11864 i40e_vsi_clear(pf->vsi[i]);
11865 pf->vsi[i] = NULL;
11866 }
11867 }
11868
11869 for (i = 0; i < I40E_MAX_VEB; i++) {
11870 kfree(pf->veb[i]);
11871 pf->veb[i] = NULL;
11872 }
11873
11874 kfree(pf->qp_pile);
11875 kfree(pf->vsi);
11876
11877 iounmap(hw->hw_addr);
11878 kfree(pf);
11879 pci_release_mem_regions(pdev);
11880
11881 pci_disable_pcie_error_reporting(pdev);
11882 pci_disable_device(pdev);
11883}
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11894 enum pci_channel_state error)
11895{
11896 struct i40e_pf *pf = pci_get_drvdata(pdev);
11897
11898 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11899
11900 if (!pf) {
11901 dev_info(&pdev->dev,
11902 "Cannot recover - error happened during device probe\n");
11903 return PCI_ERS_RESULT_DISCONNECT;
11904 }
11905
11906
11907 if (!test_bit(__I40E_SUSPENDED, pf->state))
11908 i40e_prep_for_reset(pf, false);
11909
11910
11911 return PCI_ERS_RESULT_NEED_RESET;
11912}
11913
11914
11915
11916
11917
11918
11919
11920
11921
11922
11923static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11924{
11925 struct i40e_pf *pf = pci_get_drvdata(pdev);
11926 pci_ers_result_t result;
11927 int err;
11928 u32 reg;
11929
11930 dev_dbg(&pdev->dev, "%s\n", __func__);
11931 if (pci_enable_device_mem(pdev)) {
11932 dev_info(&pdev->dev,
11933 "Cannot re-enable PCI device after reset.\n");
11934 result = PCI_ERS_RESULT_DISCONNECT;
11935 } else {
11936 pci_set_master(pdev);
11937 pci_restore_state(pdev);
11938 pci_save_state(pdev);
11939 pci_wake_from_d3(pdev, false);
11940
11941 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11942 if (reg == 0)
11943 result = PCI_ERS_RESULT_RECOVERED;
11944 else
11945 result = PCI_ERS_RESULT_DISCONNECT;
11946 }
11947
11948 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11949 if (err) {
11950 dev_info(&pdev->dev,
11951 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11952 err);
11953
11954 }
11955
11956 return result;
11957}
11958
11959
11960
11961
11962
11963
11964
11965
11966static void i40e_pci_error_resume(struct pci_dev *pdev)
11967{
11968 struct i40e_pf *pf = pci_get_drvdata(pdev);
11969
11970 dev_dbg(&pdev->dev, "%s\n", __func__);
11971 if (test_bit(__I40E_SUSPENDED, pf->state))
11972 return;
11973
11974 i40e_handle_reset_warning(pf, false);
11975}
11976
11977
11978
11979
11980
11981
11982static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
11983{
11984 struct i40e_hw *hw = &pf->hw;
11985 i40e_status ret;
11986 u8 mac_addr[6];
11987 u16 flags = 0;
11988
11989
11990 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
11991 ether_addr_copy(mac_addr,
11992 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
11993 } else {
11994 dev_err(&pf->pdev->dev,
11995 "Failed to retrieve MAC address; using default\n");
11996 ether_addr_copy(mac_addr, hw->mac.addr);
11997 }
11998
11999
12000
12001
12002
12003 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
12004
12005 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
12006 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
12007
12008 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12009 if (ret) {
12010 dev_err(&pf->pdev->dev,
12011 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
12012 return;
12013 }
12014
12015 flags = I40E_AQC_MC_MAG_EN
12016 | I40E_AQC_WOL_PRESERVE_ON_PFR
12017 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
12018 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12019 if (ret)
12020 dev_err(&pf->pdev->dev,
12021 "Failed to enable Multicast Magic Packet wake up\n");
12022}
12023
12024
12025
12026
12027
12028static void i40e_shutdown(struct pci_dev *pdev)
12029{
12030 struct i40e_pf *pf = pci_get_drvdata(pdev);
12031 struct i40e_hw *hw = &pf->hw;
12032
12033 set_bit(__I40E_SUSPENDED, pf->state);
12034 set_bit(__I40E_DOWN, pf->state);
12035 rtnl_lock();
12036 i40e_prep_for_reset(pf, true);
12037 rtnl_unlock();
12038
12039 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12040 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12041
12042 del_timer_sync(&pf->service_timer);
12043 cancel_work_sync(&pf->service_task);
12044 i40e_fdir_teardown(pf);
12045
12046
12047
12048
12049 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
12050
12051 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
12052 i40e_enable_mc_magic_wake(pf);
12053
12054 i40e_prep_for_reset(pf, false);
12055
12056 wr32(hw, I40E_PFPM_APM,
12057 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12058 wr32(hw, I40E_PFPM_WUFC,
12059 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12060
12061 i40e_clear_interrupt_scheme(pf);
12062
12063 if (system_state == SYSTEM_POWER_OFF) {
12064 pci_wake_from_d3(pdev, pf->wol_en);
12065 pci_set_power_state(pdev, PCI_D3hot);
12066 }
12067}
12068
12069#ifdef CONFIG_PM
12070
12071
12072
12073
12074static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
12075{
12076 struct i40e_pf *pf = pci_get_drvdata(pdev);
12077 struct i40e_hw *hw = &pf->hw;
12078 int retval = 0;
12079
12080 set_bit(__I40E_SUSPENDED, pf->state);
12081 set_bit(__I40E_DOWN, pf->state);
12082
12083 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
12084 i40e_enable_mc_magic_wake(pf);
12085
12086 i40e_prep_for_reset(pf, false);
12087
12088 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12089 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12090
12091 i40e_stop_misc_vector(pf);
12092
12093 retval = pci_save_state(pdev);
12094 if (retval)
12095 return retval;
12096
12097 pci_wake_from_d3(pdev, pf->wol_en);
12098 pci_set_power_state(pdev, PCI_D3hot);
12099
12100 return retval;
12101}
12102
12103
12104
12105
12106
12107static int i40e_resume(struct pci_dev *pdev)
12108{
12109 struct i40e_pf *pf = pci_get_drvdata(pdev);
12110 u32 err;
12111
12112 pci_set_power_state(pdev, PCI_D0);
12113 pci_restore_state(pdev);
12114
12115
12116
12117 pci_save_state(pdev);
12118
12119 err = pci_enable_device_mem(pdev);
12120 if (err) {
12121 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
12122 return err;
12123 }
12124 pci_set_master(pdev);
12125
12126
12127 pci_wake_from_d3(pdev, false);
12128
12129
12130 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
12131 clear_bit(__I40E_DOWN, pf->state);
12132 i40e_reset_and_rebuild(pf, false, false);
12133 }
12134
12135 return 0;
12136}
12137
12138#endif
12139static const struct pci_error_handlers i40e_err_handler = {
12140 .error_detected = i40e_pci_error_detected,
12141 .slot_reset = i40e_pci_error_slot_reset,
12142 .resume = i40e_pci_error_resume,
12143};
12144
12145static struct pci_driver i40e_driver = {
12146 .name = i40e_driver_name,
12147 .id_table = i40e_pci_tbl,
12148 .probe = i40e_probe,
12149 .remove = i40e_remove,
12150#ifdef CONFIG_PM
12151 .suspend = i40e_suspend,
12152 .resume = i40e_resume,
12153#endif
12154 .shutdown = i40e_shutdown,
12155 .err_handler = &i40e_err_handler,
12156 .sriov_configure = i40e_pci_sriov_configure,
12157};
12158
12159
12160
12161
12162
12163
12164
12165static int __init i40e_init_module(void)
12166{
12167 pr_info("%s: %s - version %s\n", i40e_driver_name,
12168 i40e_driver_string, i40e_driver_version_str);
12169 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
12170
12171
12172
12173
12174
12175 i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
12176 i40e_driver_name);
12177 if (!i40e_wq) {
12178 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
12179 return -ENOMEM;
12180 }
12181
12182 i40e_dbg_init();
12183 return pci_register_driver(&i40e_driver);
12184}
12185module_init(i40e_init_module);
12186
12187
12188
12189
12190
12191
12192
12193static void __exit i40e_exit_module(void)
12194{
12195 pci_unregister_driver(&i40e_driver);
12196 destroy_workqueue(i40e_wq);
12197 i40e_dbg_exit();
12198}
12199module_exit(i40e_exit_module);
12200