1
2
3
4#include <linux/etherdevice.h>
5#include <linux/of_net.h>
6#include <linux/pci.h>
7#include <linux/bpf.h>
8#include <generated/utsrelease.h>
9
10
11#include "i40e.h"
12#include "i40e_diag.h"
13#include "i40e_xsk.h"
14#include <net/udp_tunnel.h>
15#include <net/xdp_sock_drv.h>
16
17
18
19
20#define CREATE_TRACE_POINTS
21#include "i40e_trace.h"
22
23const char i40e_driver_name[] = "i40e";
24static const char i40e_driver_string[] =
25 "Intel(R) Ethernet Connection XL710 Network Driver";
26
27static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
28
29
30static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
31static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
32static int i40e_add_vsi(struct i40e_vsi *vsi);
33static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
34static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
35static int i40e_setup_misc_vector(struct i40e_pf *pf);
36static void i40e_determine_queue_usage(struct i40e_pf *pf);
37static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
38static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
39static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
40 bool lock_acquired);
41static int i40e_reset(struct i40e_pf *pf);
42static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
43static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
44static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
45static bool i40e_check_recovery_mode(struct i40e_pf *pf);
46static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
47static void i40e_fdir_sb_setup(struct i40e_pf *pf);
48static int i40e_veb_get_bw_info(struct i40e_veb *veb);
49static int i40e_get_capabilities(struct i40e_pf *pf,
50 enum i40e_admin_queue_opc list_type);
51static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
52
53
54
55
56
57
58
59
60static const struct pci_device_id i40e_pci_tbl[] = {
61 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
85
86 {0, }
87};
88MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
89
90#define I40E_MAX_VF_COUNT 128
91static int debug = -1;
92module_param(debug, uint, 0);
93MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
94
95MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
96MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
97MODULE_LICENSE("GPL v2");
98
99static struct workqueue_struct *i40e_wq;
100
101
102
103
104
105
106
107
108int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
109 u64 size, u32 alignment)
110{
111 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
112
113 mem->size = ALIGN(size, alignment);
114 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
115 GFP_KERNEL);
116 if (!mem->va)
117 return -ENOMEM;
118
119 return 0;
120}
121
122
123
124
125
126
127int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
128{
129 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
130
131 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
132 mem->va = NULL;
133 mem->pa = 0;
134 mem->size = 0;
135
136 return 0;
137}
138
139
140
141
142
143
144
145int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
146 u32 size)
147{
148 mem->size = size;
149 mem->va = kzalloc(size, GFP_KERNEL);
150
151 if (!mem->va)
152 return -ENOMEM;
153
154 return 0;
155}
156
157
158
159
160
161
162int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
163{
164
165 kfree(mem->va);
166 mem->va = NULL;
167 mem->size = 0;
168
169 return 0;
170}
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
186 u16 needed, u16 id)
187{
188 int ret = -ENOMEM;
189 int i, j;
190
191 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
192 dev_info(&pf->pdev->dev,
193 "param err: pile=%s needed=%d id=0x%04x\n",
194 pile ? "<valid>" : "<null>", needed, id);
195 return -EINVAL;
196 }
197
198
199 i = pile->search_hint;
200 while (i < pile->num_entries) {
201
202 if (pile->list[i] & I40E_PILE_VALID_BIT) {
203 i++;
204 continue;
205 }
206
207
208 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
209 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
210 break;
211 }
212
213 if (j == needed) {
214
215 for (j = 0; j < needed; j++)
216 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
217 ret = i;
218 pile->search_hint = i + j;
219 break;
220 }
221
222
223 i += j;
224 }
225
226 return ret;
227}
228
229
230
231
232
233
234
235
236
237static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
238{
239 int valid_id = (id | I40E_PILE_VALID_BIT);
240 int count = 0;
241 int i;
242
243 if (!pile || index >= pile->num_entries)
244 return -EINVAL;
245
246 for (i = index;
247 i < pile->num_entries && pile->list[i] == valid_id;
248 i++) {
249 pile->list[i] = 0;
250 count++;
251 }
252
253 if (count && index < pile->search_hint)
254 pile->search_hint = index;
255
256 return count;
257}
258
259
260
261
262
263
264struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
265{
266 int i;
267
268 for (i = 0; i < pf->num_alloc_vsi; i++)
269 if (pf->vsi[i] && (pf->vsi[i]->id == id))
270 return pf->vsi[i];
271
272 return NULL;
273}
274
275
276
277
278
279
280
281void i40e_service_event_schedule(struct i40e_pf *pf)
282{
283 if ((!test_bit(__I40E_DOWN, pf->state) &&
284 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
285 test_bit(__I40E_RECOVERY_MODE, pf->state))
286 queue_work(i40e_wq, &pf->service_task);
287}
288
289
290
291
292
293
294
295
296
297
298static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
299{
300 struct i40e_netdev_priv *np = netdev_priv(netdev);
301 struct i40e_vsi *vsi = np->vsi;
302 struct i40e_pf *pf = vsi->back;
303 struct i40e_ring *tx_ring = NULL;
304 unsigned int i;
305 u32 head, val;
306
307 pf->tx_timeout_count++;
308
309
310 for (i = 0; i < vsi->num_queue_pairs; i++) {
311 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
312 if (txqueue ==
313 vsi->tx_rings[i]->queue_index) {
314 tx_ring = vsi->tx_rings[i];
315 break;
316 }
317 }
318 }
319
320 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
321 pf->tx_timeout_recovery_level = 1;
322 else if (time_before(jiffies,
323 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
324 return;
325
326
327 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
328 return;
329
330 if (tx_ring) {
331 head = i40e_get_head(tx_ring);
332
333 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
334 val = rd32(&pf->hw,
335 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
336 tx_ring->vsi->base_vector - 1));
337 else
338 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
339
340 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
341 vsi->seid, txqueue, tx_ring->next_to_clean,
342 head, tx_ring->next_to_use,
343 readl(tx_ring->tail), val);
344 }
345
346 pf->tx_timeout_last_recovery = jiffies;
347 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
348 pf->tx_timeout_recovery_level, txqueue);
349
350 switch (pf->tx_timeout_recovery_level) {
351 case 1:
352 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
353 break;
354 case 2:
355 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
356 break;
357 case 3:
358 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
359 break;
360 default:
361 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
362 break;
363 }
364
365 i40e_service_event_schedule(pf);
366 pf->tx_timeout_recovery_level++;
367}
368
369
370
371
372
373
374
375
376struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
377{
378 return &vsi->net_stats;
379}
380
381
382
383
384
385
386static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
387 struct rtnl_link_stats64 *stats)
388{
389 u64 bytes, packets;
390 unsigned int start;
391
392 do {
393 start = u64_stats_fetch_begin_irq(&ring->syncp);
394 packets = ring->stats.packets;
395 bytes = ring->stats.bytes;
396 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
397
398 stats->tx_packets += packets;
399 stats->tx_bytes += bytes;
400}
401
402
403
404
405
406
407
408
409
410static void i40e_get_netdev_stats_struct(struct net_device *netdev,
411 struct rtnl_link_stats64 *stats)
412{
413 struct i40e_netdev_priv *np = netdev_priv(netdev);
414 struct i40e_vsi *vsi = np->vsi;
415 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
416 struct i40e_ring *ring;
417 int i;
418
419 if (test_bit(__I40E_VSI_DOWN, vsi->state))
420 return;
421
422 if (!vsi->tx_rings)
423 return;
424
425 rcu_read_lock();
426 for (i = 0; i < vsi->num_queue_pairs; i++) {
427 u64 bytes, packets;
428 unsigned int start;
429
430 ring = READ_ONCE(vsi->tx_rings[i]);
431 if (!ring)
432 continue;
433 i40e_get_netdev_stats_struct_tx(ring, stats);
434
435 if (i40e_enabled_xdp_vsi(vsi)) {
436 ring = READ_ONCE(vsi->xdp_rings[i]);
437 if (!ring)
438 continue;
439 i40e_get_netdev_stats_struct_tx(ring, stats);
440 }
441
442 ring = READ_ONCE(vsi->rx_rings[i]);
443 if (!ring)
444 continue;
445 do {
446 start = u64_stats_fetch_begin_irq(&ring->syncp);
447 packets = ring->stats.packets;
448 bytes = ring->stats.bytes;
449 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
450
451 stats->rx_packets += packets;
452 stats->rx_bytes += bytes;
453
454 }
455 rcu_read_unlock();
456
457
458 stats->multicast = vsi_stats->multicast;
459 stats->tx_errors = vsi_stats->tx_errors;
460 stats->tx_dropped = vsi_stats->tx_dropped;
461 stats->rx_errors = vsi_stats->rx_errors;
462 stats->rx_dropped = vsi_stats->rx_dropped;
463 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
464 stats->rx_length_errors = vsi_stats->rx_length_errors;
465}
466
467
468
469
470
471void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
472{
473 struct rtnl_link_stats64 *ns;
474 int i;
475
476 if (!vsi)
477 return;
478
479 ns = i40e_get_vsi_stats_struct(vsi);
480 memset(ns, 0, sizeof(*ns));
481 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
482 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
483 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
484 if (vsi->rx_rings && vsi->rx_rings[0]) {
485 for (i = 0; i < vsi->num_queue_pairs; i++) {
486 memset(&vsi->rx_rings[i]->stats, 0,
487 sizeof(vsi->rx_rings[i]->stats));
488 memset(&vsi->rx_rings[i]->rx_stats, 0,
489 sizeof(vsi->rx_rings[i]->rx_stats));
490 memset(&vsi->tx_rings[i]->stats, 0,
491 sizeof(vsi->tx_rings[i]->stats));
492 memset(&vsi->tx_rings[i]->tx_stats, 0,
493 sizeof(vsi->tx_rings[i]->tx_stats));
494 }
495 }
496 vsi->stat_offsets_loaded = false;
497}
498
499
500
501
502
503void i40e_pf_reset_stats(struct i40e_pf *pf)
504{
505 int i;
506
507 memset(&pf->stats, 0, sizeof(pf->stats));
508 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
509 pf->stat_offsets_loaded = false;
510
511 for (i = 0; i < I40E_MAX_VEB; i++) {
512 if (pf->veb[i]) {
513 memset(&pf->veb[i]->stats, 0,
514 sizeof(pf->veb[i]->stats));
515 memset(&pf->veb[i]->stats_offsets, 0,
516 sizeof(pf->veb[i]->stats_offsets));
517 memset(&pf->veb[i]->tc_stats, 0,
518 sizeof(pf->veb[i]->tc_stats));
519 memset(&pf->veb[i]->tc_stats_offsets, 0,
520 sizeof(pf->veb[i]->tc_stats_offsets));
521 pf->veb[i]->stat_offsets_loaded = false;
522 }
523 }
524 pf->hw_csum_rx_error = 0;
525}
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
543 bool offset_loaded, u64 *offset, u64 *stat)
544{
545 u64 new_data;
546
547 if (hw->device_id == I40E_DEV_ID_QEMU) {
548 new_data = rd32(hw, loreg);
549 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
550 } else {
551 new_data = rd64(hw, loreg);
552 }
553 if (!offset_loaded)
554 *offset = new_data;
555 if (likely(new_data >= *offset))
556 *stat = new_data - *offset;
557 else
558 *stat = (new_data + BIT_ULL(48)) - *offset;
559 *stat &= 0xFFFFFFFFFFFFULL;
560}
561
562
563
564
565
566
567
568
569
570static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
571 bool offset_loaded, u64 *offset, u64 *stat)
572{
573 u32 new_data;
574
575 new_data = rd32(hw, reg);
576 if (!offset_loaded)
577 *offset = new_data;
578 if (likely(new_data >= *offset))
579 *stat = (u32)(new_data - *offset);
580 else
581 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
582}
583
584
585
586
587
588
589
590static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
591{
592 u32 new_data = rd32(hw, reg);
593
594 wr32(hw, reg, 1);
595 *stat += new_data;
596}
597
598
599
600
601
602void i40e_update_eth_stats(struct i40e_vsi *vsi)
603{
604 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
605 struct i40e_pf *pf = vsi->back;
606 struct i40e_hw *hw = &pf->hw;
607 struct i40e_eth_stats *oes;
608 struct i40e_eth_stats *es;
609
610 es = &vsi->eth_stats;
611 oes = &vsi->eth_stats_offsets;
612
613
614 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
615 vsi->stat_offsets_loaded,
616 &oes->tx_errors, &es->tx_errors);
617 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
618 vsi->stat_offsets_loaded,
619 &oes->rx_discards, &es->rx_discards);
620 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
621 vsi->stat_offsets_loaded,
622 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
623
624 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
625 I40E_GLV_GORCL(stat_idx),
626 vsi->stat_offsets_loaded,
627 &oes->rx_bytes, &es->rx_bytes);
628 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
629 I40E_GLV_UPRCL(stat_idx),
630 vsi->stat_offsets_loaded,
631 &oes->rx_unicast, &es->rx_unicast);
632 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
633 I40E_GLV_MPRCL(stat_idx),
634 vsi->stat_offsets_loaded,
635 &oes->rx_multicast, &es->rx_multicast);
636 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
637 I40E_GLV_BPRCL(stat_idx),
638 vsi->stat_offsets_loaded,
639 &oes->rx_broadcast, &es->rx_broadcast);
640
641 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
642 I40E_GLV_GOTCL(stat_idx),
643 vsi->stat_offsets_loaded,
644 &oes->tx_bytes, &es->tx_bytes);
645 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
646 I40E_GLV_UPTCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->tx_unicast, &es->tx_unicast);
649 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
650 I40E_GLV_MPTCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->tx_multicast, &es->tx_multicast);
653 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
654 I40E_GLV_BPTCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->tx_broadcast, &es->tx_broadcast);
657 vsi->stat_offsets_loaded = true;
658}
659
660
661
662
663
664void i40e_update_veb_stats(struct i40e_veb *veb)
665{
666 struct i40e_pf *pf = veb->pf;
667 struct i40e_hw *hw = &pf->hw;
668 struct i40e_eth_stats *oes;
669 struct i40e_eth_stats *es;
670 struct i40e_veb_tc_stats *veb_oes;
671 struct i40e_veb_tc_stats *veb_es;
672 int i, idx = 0;
673
674 idx = veb->stats_idx;
675 es = &veb->stats;
676 oes = &veb->stats_offsets;
677 veb_es = &veb->tc_stats;
678 veb_oes = &veb->tc_stats_offsets;
679
680
681 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
682 veb->stat_offsets_loaded,
683 &oes->tx_discards, &es->tx_discards);
684 if (hw->revision_id > 0)
685 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
686 veb->stat_offsets_loaded,
687 &oes->rx_unknown_protocol,
688 &es->rx_unknown_protocol);
689 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
690 veb->stat_offsets_loaded,
691 &oes->rx_bytes, &es->rx_bytes);
692 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_unicast, &es->rx_unicast);
695 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_multicast, &es->rx_multicast);
698 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
699 veb->stat_offsets_loaded,
700 &oes->rx_broadcast, &es->rx_broadcast);
701
702 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_bytes, &es->tx_bytes);
705 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->tx_unicast, &es->tx_unicast);
708 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->tx_multicast, &es->tx_multicast);
711 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
712 veb->stat_offsets_loaded,
713 &oes->tx_broadcast, &es->tx_broadcast);
714 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
715 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
716 I40E_GLVEBTC_RPCL(i, idx),
717 veb->stat_offsets_loaded,
718 &veb_oes->tc_rx_packets[i],
719 &veb_es->tc_rx_packets[i]);
720 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
721 I40E_GLVEBTC_RBCL(i, idx),
722 veb->stat_offsets_loaded,
723 &veb_oes->tc_rx_bytes[i],
724 &veb_es->tc_rx_bytes[i]);
725 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
726 I40E_GLVEBTC_TPCL(i, idx),
727 veb->stat_offsets_loaded,
728 &veb_oes->tc_tx_packets[i],
729 &veb_es->tc_tx_packets[i]);
730 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
731 I40E_GLVEBTC_TBCL(i, idx),
732 veb->stat_offsets_loaded,
733 &veb_oes->tc_tx_bytes[i],
734 &veb_es->tc_tx_bytes[i]);
735 }
736 veb->stat_offsets_loaded = true;
737}
738
739
740
741
742
743
744
745
746
747
748
749static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
750{
751 struct i40e_pf *pf = vsi->back;
752 struct rtnl_link_stats64 *ons;
753 struct rtnl_link_stats64 *ns;
754 struct i40e_eth_stats *oes;
755 struct i40e_eth_stats *es;
756 u32 tx_restart, tx_busy;
757 struct i40e_ring *p;
758 u32 rx_page, rx_buf;
759 u64 bytes, packets;
760 unsigned int start;
761 u64 tx_linearize;
762 u64 tx_force_wb;
763 u64 rx_p, rx_b;
764 u64 tx_p, tx_b;
765 u16 q;
766
767 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
768 test_bit(__I40E_CONFIG_BUSY, pf->state))
769 return;
770
771 ns = i40e_get_vsi_stats_struct(vsi);
772 ons = &vsi->net_stats_offsets;
773 es = &vsi->eth_stats;
774 oes = &vsi->eth_stats_offsets;
775
776
777
778
779 rx_b = rx_p = 0;
780 tx_b = tx_p = 0;
781 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
782 rx_page = 0;
783 rx_buf = 0;
784 rcu_read_lock();
785 for (q = 0; q < vsi->num_queue_pairs; q++) {
786
787 p = READ_ONCE(vsi->tx_rings[q]);
788 if (!p)
789 continue;
790
791 do {
792 start = u64_stats_fetch_begin_irq(&p->syncp);
793 packets = p->stats.packets;
794 bytes = p->stats.bytes;
795 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
796 tx_b += bytes;
797 tx_p += packets;
798 tx_restart += p->tx_stats.restart_queue;
799 tx_busy += p->tx_stats.tx_busy;
800 tx_linearize += p->tx_stats.tx_linearize;
801 tx_force_wb += p->tx_stats.tx_force_wb;
802
803
804 p = READ_ONCE(vsi->rx_rings[q]);
805 if (!p)
806 continue;
807
808 do {
809 start = u64_stats_fetch_begin_irq(&p->syncp);
810 packets = p->stats.packets;
811 bytes = p->stats.bytes;
812 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
813 rx_b += bytes;
814 rx_p += packets;
815 rx_buf += p->rx_stats.alloc_buff_failed;
816 rx_page += p->rx_stats.alloc_page_failed;
817
818 if (i40e_enabled_xdp_vsi(vsi)) {
819
820 p = READ_ONCE(vsi->xdp_rings[q]);
821 if (!p)
822 continue;
823
824 do {
825 start = u64_stats_fetch_begin_irq(&p->syncp);
826 packets = p->stats.packets;
827 bytes = p->stats.bytes;
828 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
829 tx_b += bytes;
830 tx_p += packets;
831 tx_restart += p->tx_stats.restart_queue;
832 tx_busy += p->tx_stats.tx_busy;
833 tx_linearize += p->tx_stats.tx_linearize;
834 tx_force_wb += p->tx_stats.tx_force_wb;
835 }
836 }
837 rcu_read_unlock();
838 vsi->tx_restart = tx_restart;
839 vsi->tx_busy = tx_busy;
840 vsi->tx_linearize = tx_linearize;
841 vsi->tx_force_wb = tx_force_wb;
842 vsi->rx_page_failed = rx_page;
843 vsi->rx_buf_failed = rx_buf;
844
845 ns->rx_packets = rx_p;
846 ns->rx_bytes = rx_b;
847 ns->tx_packets = tx_p;
848 ns->tx_bytes = tx_b;
849
850
851 i40e_update_eth_stats(vsi);
852 ons->tx_errors = oes->tx_errors;
853 ns->tx_errors = es->tx_errors;
854 ons->multicast = oes->rx_multicast;
855 ns->multicast = es->rx_multicast;
856 ons->rx_dropped = oes->rx_discards;
857 ns->rx_dropped = es->rx_discards;
858 ons->tx_dropped = oes->tx_discards;
859 ns->tx_dropped = es->tx_discards;
860
861
862 if (vsi == pf->vsi[pf->lan_vsi]) {
863 ns->rx_crc_errors = pf->stats.crc_errors;
864 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
865 ns->rx_length_errors = pf->stats.rx_length_errors;
866 }
867}
868
869
870
871
872
873static void i40e_update_pf_stats(struct i40e_pf *pf)
874{
875 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
876 struct i40e_hw_port_stats *nsd = &pf->stats;
877 struct i40e_hw *hw = &pf->hw;
878 u32 val;
879 int i;
880
881 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
882 I40E_GLPRT_GORCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
885 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
886 I40E_GLPRT_GOTCL(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
889 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
890 pf->stat_offsets_loaded,
891 &osd->eth.rx_discards,
892 &nsd->eth.rx_discards);
893 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
894 I40E_GLPRT_UPRCL(hw->port),
895 pf->stat_offsets_loaded,
896 &osd->eth.rx_unicast,
897 &nsd->eth.rx_unicast);
898 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
899 I40E_GLPRT_MPRCL(hw->port),
900 pf->stat_offsets_loaded,
901 &osd->eth.rx_multicast,
902 &nsd->eth.rx_multicast);
903 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
904 I40E_GLPRT_BPRCL(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->eth.rx_broadcast,
907 &nsd->eth.rx_broadcast);
908 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
909 I40E_GLPRT_UPTCL(hw->port),
910 pf->stat_offsets_loaded,
911 &osd->eth.tx_unicast,
912 &nsd->eth.tx_unicast);
913 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
914 I40E_GLPRT_MPTCL(hw->port),
915 pf->stat_offsets_loaded,
916 &osd->eth.tx_multicast,
917 &nsd->eth.tx_multicast);
918 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
919 I40E_GLPRT_BPTCL(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->eth.tx_broadcast,
922 &nsd->eth.tx_broadcast);
923
924 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->tx_dropped_link_down,
927 &nsd->tx_dropped_link_down);
928
929 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->crc_errors, &nsd->crc_errors);
932
933 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->illegal_bytes, &nsd->illegal_bytes);
936
937 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->mac_local_faults,
940 &nsd->mac_local_faults);
941 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->mac_remote_faults,
944 &nsd->mac_remote_faults);
945
946 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->rx_length_errors,
949 &nsd->rx_length_errors);
950
951 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_rx, &nsd->link_xon_rx);
954 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xon_tx, &nsd->link_xon_tx);
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_rx, &nsd->link_xoff_rx);
960 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
961 pf->stat_offsets_loaded,
962 &osd->link_xoff_tx, &nsd->link_xoff_tx);
963
964 for (i = 0; i < 8; i++) {
965 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
966 pf->stat_offsets_loaded,
967 &osd->priority_xoff_rx[i],
968 &nsd->priority_xoff_rx[i]);
969 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
970 pf->stat_offsets_loaded,
971 &osd->priority_xon_rx[i],
972 &nsd->priority_xon_rx[i]);
973 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
974 pf->stat_offsets_loaded,
975 &osd->priority_xon_tx[i],
976 &nsd->priority_xon_tx[i]);
977 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
978 pf->stat_offsets_loaded,
979 &osd->priority_xoff_tx[i],
980 &nsd->priority_xoff_tx[i]);
981 i40e_stat_update32(hw,
982 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
983 pf->stat_offsets_loaded,
984 &osd->priority_xon_2_xoff[i],
985 &nsd->priority_xon_2_xoff[i]);
986 }
987
988 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
989 I40E_GLPRT_PRC64L(hw->port),
990 pf->stat_offsets_loaded,
991 &osd->rx_size_64, &nsd->rx_size_64);
992 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
993 I40E_GLPRT_PRC127L(hw->port),
994 pf->stat_offsets_loaded,
995 &osd->rx_size_127, &nsd->rx_size_127);
996 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
997 I40E_GLPRT_PRC255L(hw->port),
998 pf->stat_offsets_loaded,
999 &osd->rx_size_255, &nsd->rx_size_255);
1000 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1001 I40E_GLPRT_PRC511L(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->rx_size_511, &nsd->rx_size_511);
1004 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1005 I40E_GLPRT_PRC1023L(hw->port),
1006 pf->stat_offsets_loaded,
1007 &osd->rx_size_1023, &nsd->rx_size_1023);
1008 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1009 I40E_GLPRT_PRC1522L(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->rx_size_1522, &nsd->rx_size_1522);
1012 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1013 I40E_GLPRT_PRC9522L(hw->port),
1014 pf->stat_offsets_loaded,
1015 &osd->rx_size_big, &nsd->rx_size_big);
1016
1017 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1018 I40E_GLPRT_PTC64L(hw->port),
1019 pf->stat_offsets_loaded,
1020 &osd->tx_size_64, &nsd->tx_size_64);
1021 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1022 I40E_GLPRT_PTC127L(hw->port),
1023 pf->stat_offsets_loaded,
1024 &osd->tx_size_127, &nsd->tx_size_127);
1025 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1026 I40E_GLPRT_PTC255L(hw->port),
1027 pf->stat_offsets_loaded,
1028 &osd->tx_size_255, &nsd->tx_size_255);
1029 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1030 I40E_GLPRT_PTC511L(hw->port),
1031 pf->stat_offsets_loaded,
1032 &osd->tx_size_511, &nsd->tx_size_511);
1033 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1034 I40E_GLPRT_PTC1023L(hw->port),
1035 pf->stat_offsets_loaded,
1036 &osd->tx_size_1023, &nsd->tx_size_1023);
1037 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1038 I40E_GLPRT_PTC1522L(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->tx_size_1522, &nsd->tx_size_1522);
1041 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1042 I40E_GLPRT_PTC9522L(hw->port),
1043 pf->stat_offsets_loaded,
1044 &osd->tx_size_big, &nsd->tx_size_big);
1045
1046 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_undersize, &nsd->rx_undersize);
1049 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_fragments, &nsd->rx_fragments);
1052 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_oversize, &nsd->rx_oversize);
1055 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->rx_jabber, &nsd->rx_jabber);
1058
1059
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1062 &nsd->fd_atr_match);
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1065 &nsd->fd_sb_match);
1066 i40e_stat_update_and_clear32(hw,
1067 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1068 &nsd->fd_atr_tunnel_match);
1069
1070 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1071 nsd->tx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1074 nsd->rx_lpi_status =
1075 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1076 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1077 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1080 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1081 pf->stat_offsets_loaded,
1082 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1083
1084 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1085 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1086 nsd->fd_sb_status = true;
1087 else
1088 nsd->fd_sb_status = false;
1089
1090 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1091 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1092 nsd->fd_atr_status = true;
1093 else
1094 nsd->fd_atr_status = false;
1095
1096 pf->stat_offsets_loaded = true;
1097}
1098
1099
1100
1101
1102
1103
1104
1105void i40e_update_stats(struct i40e_vsi *vsi)
1106{
1107 struct i40e_pf *pf = vsi->back;
1108
1109 if (vsi == pf->vsi[pf->lan_vsi])
1110 i40e_update_pf_stats(pf);
1111
1112 i40e_update_vsi_stats(vsi);
1113}
1114
1115
1116
1117
1118
1119
1120
1121int i40e_count_filters(struct i40e_vsi *vsi)
1122{
1123 struct i40e_mac_filter *f;
1124 struct hlist_node *h;
1125 int bkt;
1126 int cnt = 0;
1127
1128 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1129 ++cnt;
1130
1131 return cnt;
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1143 const u8 *macaddr, s16 vlan)
1144{
1145 struct i40e_mac_filter *f;
1146 u64 key;
1147
1148 if (!vsi || !macaddr)
1149 return NULL;
1150
1151 key = i40e_addr_to_hkey(macaddr);
1152 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1153 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1154 (vlan == f->vlan))
1155 return f;
1156 }
1157 return NULL;
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1169{
1170 struct i40e_mac_filter *f;
1171 u64 key;
1172
1173 if (!vsi || !macaddr)
1174 return NULL;
1175
1176 key = i40e_addr_to_hkey(macaddr);
1177 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1178 if ((ether_addr_equal(macaddr, f->macaddr)))
1179 return f;
1180 }
1181 return NULL;
1182}
1183
1184
1185
1186
1187
1188
1189
1190bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1191{
1192
1193 if (vsi->info.pvid)
1194 return true;
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 return vsi->has_vlan_filter;
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1249 struct hlist_head *tmp_add_list,
1250 struct hlist_head *tmp_del_list,
1251 int vlan_filters)
1252{
1253 s16 pvid = le16_to_cpu(vsi->info.pvid);
1254 struct i40e_mac_filter *f, *add_head;
1255 struct i40e_new_mac_filter *new;
1256 struct hlist_node *h;
1257 int bkt, new_vlan;
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 hlist_for_each_entry(new, tmp_add_list, hlist) {
1275 if (pvid && new->f->vlan != pvid)
1276 new->f->vlan = pvid;
1277 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1278 new->f->vlan = 0;
1279 else if (!vlan_filters && new->f->vlan == 0)
1280 new->f->vlan = I40E_VLAN_ANY;
1281 }
1282
1283
1284 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1285
1286
1287
1288
1289
1290 if ((pvid && f->vlan != pvid) ||
1291 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1292 (!vlan_filters && f->vlan == 0)) {
1293
1294 if (pvid)
1295 new_vlan = pvid;
1296 else if (vlan_filters)
1297 new_vlan = 0;
1298 else
1299 new_vlan = I40E_VLAN_ANY;
1300
1301
1302 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1303 if (!add_head)
1304 return -ENOMEM;
1305
1306
1307 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1308 if (!new)
1309 return -ENOMEM;
1310
1311 new->f = add_head;
1312 new->state = add_head->state;
1313
1314
1315 hlist_add_head(&new->hlist, tmp_add_list);
1316
1317
1318 f->state = I40E_FILTER_REMOVE;
1319 hash_del(&f->hlist);
1320 hlist_add_head(&f->hlist, tmp_del_list);
1321 }
1322 }
1323
1324 vsi->has_vlan_filter = !!vlan_filters;
1325
1326 return 0;
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1338{
1339 struct i40e_aqc_remove_macvlan_element_data element;
1340 struct i40e_pf *pf = vsi->back;
1341
1342
1343 if (vsi->type != I40E_VSI_MAIN)
1344 return;
1345
1346 memset(&element, 0, sizeof(element));
1347 ether_addr_copy(element.mac_addr, macaddr);
1348 element.vlan_tag = 0;
1349
1350 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1351 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1352
1353 memset(&element, 0, sizeof(element));
1354 ether_addr_copy(element.mac_addr, macaddr);
1355 element.vlan_tag = 0;
1356
1357 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1358 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1359 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1374 const u8 *macaddr, s16 vlan)
1375{
1376 struct i40e_mac_filter *f;
1377 u64 key;
1378
1379 if (!vsi || !macaddr)
1380 return NULL;
1381
1382 f = i40e_find_filter(vsi, macaddr, vlan);
1383 if (!f) {
1384 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1385 if (!f)
1386 return NULL;
1387
1388
1389
1390
1391 if (vlan >= 0)
1392 vsi->has_vlan_filter = true;
1393
1394 ether_addr_copy(f->macaddr, macaddr);
1395 f->vlan = vlan;
1396 f->state = I40E_FILTER_NEW;
1397 INIT_HLIST_NODE(&f->hlist);
1398
1399 key = i40e_addr_to_hkey(macaddr);
1400 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1401
1402 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1403 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 if (f->state == I40E_FILTER_REMOVE)
1415 f->state = I40E_FILTER_ACTIVE;
1416
1417 return f;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1436{
1437 if (!f)
1438 return;
1439
1440
1441
1442
1443
1444 if ((f->state == I40E_FILTER_FAILED) ||
1445 (f->state == I40E_FILTER_NEW)) {
1446 hash_del(&f->hlist);
1447 kfree(f);
1448 } else {
1449 f->state = I40E_FILTER_REMOVE;
1450 }
1451
1452 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1453 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1454}
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1469{
1470 struct i40e_mac_filter *f;
1471
1472 if (!vsi || !macaddr)
1473 return;
1474
1475 f = i40e_find_filter(vsi, macaddr, vlan);
1476 __i40e_del_filter(vsi, f);
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1492 const u8 *macaddr)
1493{
1494 struct i40e_mac_filter *f, *add = NULL;
1495 struct hlist_node *h;
1496 int bkt;
1497
1498 if (vsi->info.pvid)
1499 return i40e_add_filter(vsi, macaddr,
1500 le16_to_cpu(vsi->info.pvid));
1501
1502 if (!i40e_is_vsi_in_vlan(vsi))
1503 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1504
1505 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1506 if (f->state == I40E_FILTER_REMOVE)
1507 continue;
1508 add = i40e_add_filter(vsi, macaddr, f->vlan);
1509 if (!add)
1510 return NULL;
1511 }
1512
1513 return add;
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1527{
1528 struct i40e_mac_filter *f;
1529 struct hlist_node *h;
1530 bool found = false;
1531 int bkt;
1532
1533 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1534 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1535 if (ether_addr_equal(macaddr, f->macaddr)) {
1536 __i40e_del_filter(vsi, f);
1537 found = true;
1538 }
1539 }
1540
1541 if (found)
1542 return 0;
1543 else
1544 return -ENOENT;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554static int i40e_set_mac(struct net_device *netdev, void *p)
1555{
1556 struct i40e_netdev_priv *np = netdev_priv(netdev);
1557 struct i40e_vsi *vsi = np->vsi;
1558 struct i40e_pf *pf = vsi->back;
1559 struct i40e_hw *hw = &pf->hw;
1560 struct sockaddr *addr = p;
1561
1562 if (!is_valid_ether_addr(addr->sa_data))
1563 return -EADDRNOTAVAIL;
1564
1565 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1566 netdev_info(netdev, "already using mac address %pM\n",
1567 addr->sa_data);
1568 return 0;
1569 }
1570
1571 if (test_bit(__I40E_DOWN, pf->state) ||
1572 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1573 return -EADDRNOTAVAIL;
1574
1575 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1576 netdev_info(netdev, "returning to hw mac address %pM\n",
1577 hw->mac.addr);
1578 else
1579 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1580
1581
1582
1583
1584
1585
1586
1587 spin_lock_bh(&vsi->mac_filter_hash_lock);
1588 i40e_del_mac_filter(vsi, netdev->dev_addr);
1589 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1590 i40e_add_mac_filter(vsi, netdev->dev_addr);
1591 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1592
1593 if (vsi->type == I40E_VSI_MAIN) {
1594 i40e_status ret;
1595
1596 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1597 addr->sa_data, NULL);
1598 if (ret)
1599 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1600 i40e_stat_str(hw, ret),
1601 i40e_aq_str(hw, hw->aq.asq_last_status));
1602 }
1603
1604
1605
1606
1607 i40e_service_event_schedule(pf);
1608 return 0;
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1619 u8 *lut, u16 lut_size)
1620{
1621 struct i40e_pf *pf = vsi->back;
1622 struct i40e_hw *hw = &pf->hw;
1623 int ret = 0;
1624
1625 if (seed) {
1626 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1627 (struct i40e_aqc_get_set_rss_key_data *)seed;
1628 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1629 if (ret) {
1630 dev_info(&pf->pdev->dev,
1631 "Cannot set RSS key, err %s aq_err %s\n",
1632 i40e_stat_str(hw, ret),
1633 i40e_aq_str(hw, hw->aq.asq_last_status));
1634 return ret;
1635 }
1636 }
1637 if (lut) {
1638 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1639
1640 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1641 if (ret) {
1642 dev_info(&pf->pdev->dev,
1643 "Cannot set RSS lut, err %s aq_err %s\n",
1644 i40e_stat_str(hw, ret),
1645 i40e_aq_str(hw, hw->aq.asq_last_status));
1646 return ret;
1647 }
1648 }
1649 return ret;
1650}
1651
1652
1653
1654
1655
1656static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1657{
1658 struct i40e_pf *pf = vsi->back;
1659 u8 seed[I40E_HKEY_ARRAY_SIZE];
1660 u8 *lut;
1661 int ret;
1662
1663 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1664 return 0;
1665 if (!vsi->rss_size)
1666 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1667 vsi->num_queue_pairs);
1668 if (!vsi->rss_size)
1669 return -EINVAL;
1670 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1671 if (!lut)
1672 return -ENOMEM;
1673
1674
1675
1676
1677 if (vsi->rss_lut_user)
1678 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1679 else
1680 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1681 if (vsi->rss_hkey_user)
1682 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1683 else
1684 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1685 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1686 kfree(lut);
1687 return ret;
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1699 struct i40e_vsi_context *ctxt,
1700 u8 enabled_tc)
1701{
1702 u16 qcount = 0, max_qcount, qmap, sections = 0;
1703 int i, override_q, pow, num_qps, ret;
1704 u8 netdev_tc = 0, offset = 0;
1705
1706 if (vsi->type != I40E_VSI_MAIN)
1707 return -EINVAL;
1708 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1709 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1710 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1711 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1712 num_qps = vsi->mqprio_qopt.qopt.count[0];
1713
1714
1715 pow = ilog2(num_qps);
1716 if (!is_power_of_2(num_qps))
1717 pow++;
1718 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1719 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1720
1721
1722 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1723 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1724
1725 if (vsi->tc_config.enabled_tc & BIT(i)) {
1726 offset = vsi->mqprio_qopt.qopt.offset[i];
1727 qcount = vsi->mqprio_qopt.qopt.count[i];
1728 if (qcount > max_qcount)
1729 max_qcount = qcount;
1730 vsi->tc_config.tc_info[i].qoffset = offset;
1731 vsi->tc_config.tc_info[i].qcount = qcount;
1732 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1733 } else {
1734
1735
1736
1737
1738 vsi->tc_config.tc_info[i].qoffset = 0;
1739 vsi->tc_config.tc_info[i].qcount = 1;
1740 vsi->tc_config.tc_info[i].netdev_tc = 0;
1741 }
1742 }
1743
1744
1745 vsi->num_queue_pairs = offset + qcount;
1746
1747
1748 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1749 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1750 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1751 ctxt->info.valid_sections |= cpu_to_le16(sections);
1752
1753
1754 vsi->rss_size = max_qcount;
1755 ret = i40e_vsi_config_rss(vsi);
1756 if (ret) {
1757 dev_info(&vsi->back->pdev->dev,
1758 "Failed to reconfig rss for num_queues (%u)\n",
1759 max_qcount);
1760 return ret;
1761 }
1762 vsi->reconfig_rss = true;
1763 dev_dbg(&vsi->back->pdev->dev,
1764 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1765
1766
1767
1768
1769 override_q = vsi->mqprio_qopt.qopt.count[0];
1770 if (override_q && override_q < vsi->num_queue_pairs) {
1771 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1772 vsi->next_base_queue = override_q;
1773 }
1774 return 0;
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1787 struct i40e_vsi_context *ctxt,
1788 u8 enabled_tc,
1789 bool is_add)
1790{
1791 struct i40e_pf *pf = vsi->back;
1792 u16 sections = 0;
1793 u8 netdev_tc = 0;
1794 u16 numtc = 1;
1795 u16 qcount;
1796 u8 offset;
1797 u16 qmap;
1798 int i;
1799 u16 num_tc_qps = 0;
1800
1801 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1802 offset = 0;
1803
1804
1805 num_tc_qps = vsi->alloc_queue_pairs;
1806 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1807
1808 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1809 if (enabled_tc & BIT(i))
1810 numtc++;
1811 }
1812 if (!numtc) {
1813 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1814 numtc = 1;
1815 }
1816 num_tc_qps = num_tc_qps / numtc;
1817 num_tc_qps = min_t(int, num_tc_qps,
1818 i40e_pf_get_max_q_per_tc(pf));
1819 }
1820
1821 vsi->tc_config.numtc = numtc;
1822 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1823
1824
1825 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1826 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1827
1828
1829 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1830
1831 if (vsi->tc_config.enabled_tc & BIT(i)) {
1832
1833 int pow, num_qps;
1834
1835 switch (vsi->type) {
1836 case I40E_VSI_MAIN:
1837 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1838 I40E_FLAG_FD_ATR_ENABLED)) ||
1839 vsi->tc_config.enabled_tc != 1) {
1840 qcount = min_t(int, pf->alloc_rss_size,
1841 num_tc_qps);
1842 break;
1843 }
1844 fallthrough;
1845 case I40E_VSI_FDIR:
1846 case I40E_VSI_SRIOV:
1847 case I40E_VSI_VMDQ2:
1848 default:
1849 qcount = num_tc_qps;
1850 WARN_ON(i != 0);
1851 break;
1852 }
1853 vsi->tc_config.tc_info[i].qoffset = offset;
1854 vsi->tc_config.tc_info[i].qcount = qcount;
1855
1856
1857 num_qps = qcount;
1858 pow = 0;
1859 while (num_qps && (BIT_ULL(pow) < qcount)) {
1860 pow++;
1861 num_qps >>= 1;
1862 }
1863
1864 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1865 qmap =
1866 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1867 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1868
1869 offset += qcount;
1870 } else {
1871
1872
1873
1874
1875 vsi->tc_config.tc_info[i].qoffset = 0;
1876 vsi->tc_config.tc_info[i].qcount = 1;
1877 vsi->tc_config.tc_info[i].netdev_tc = 0;
1878
1879 qmap = 0;
1880 }
1881 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1882 }
1883
1884
1885 vsi->num_queue_pairs = offset;
1886 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1887 if (vsi->req_queue_pairs > 0)
1888 vsi->num_queue_pairs = vsi->req_queue_pairs;
1889 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1890 vsi->num_queue_pairs = pf->num_lan_msix;
1891 }
1892
1893
1894 if (is_add) {
1895 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1896
1897 ctxt->info.up_enable_bits = enabled_tc;
1898 }
1899 if (vsi->type == I40E_VSI_SRIOV) {
1900 ctxt->info.mapping_flags |=
1901 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1902 for (i = 0; i < vsi->num_queue_pairs; i++)
1903 ctxt->info.queue_mapping[i] =
1904 cpu_to_le16(vsi->base_queue + i);
1905 } else {
1906 ctxt->info.mapping_flags |=
1907 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1908 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1909 }
1910 ctxt->info.valid_sections |= cpu_to_le16(sections);
1911}
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1922{
1923 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi;
1925
1926 if (i40e_add_mac_filter(vsi, addr))
1927 return 0;
1928 else
1929 return -ENOMEM;
1930}
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1941{
1942 struct i40e_netdev_priv *np = netdev_priv(netdev);
1943 struct i40e_vsi *vsi = np->vsi;
1944
1945
1946
1947
1948
1949
1950 if (ether_addr_equal(addr, netdev->dev_addr))
1951 return 0;
1952
1953 i40e_del_mac_filter(vsi, addr);
1954
1955 return 0;
1956}
1957
1958
1959
1960
1961
1962static void i40e_set_rx_mode(struct net_device *netdev)
1963{
1964 struct i40e_netdev_priv *np = netdev_priv(netdev);
1965 struct i40e_vsi *vsi = np->vsi;
1966
1967 spin_lock_bh(&vsi->mac_filter_hash_lock);
1968
1969 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1970 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1971
1972 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1973
1974
1975 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1976 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1977 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1978 }
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1990 struct hlist_head *from)
1991{
1992 struct i40e_mac_filter *f;
1993 struct hlist_node *h;
1994
1995 hlist_for_each_entry_safe(f, h, from, hlist) {
1996 u64 key = i40e_addr_to_hkey(f->macaddr);
1997
1998
1999 hlist_del(&f->hlist);
2000 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2001 }
2002}
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2013 struct hlist_head *from)
2014{
2015 struct i40e_new_mac_filter *new;
2016 struct hlist_node *h;
2017
2018 hlist_for_each_entry_safe(new, h, from, hlist) {
2019
2020 hlist_del(&new->hlist);
2021 kfree(new);
2022 }
2023}
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static
2034struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2035{
2036 hlist_for_each_entry_continue(next, hlist) {
2037 if (!is_broadcast_ether_addr(next->f->macaddr))
2038 return next;
2039 }
2040
2041 return NULL;
2042}
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054static int
2055i40e_update_filter_state(int count,
2056 struct i40e_aqc_add_macvlan_element_data *add_list,
2057 struct i40e_new_mac_filter *add_head)
2058{
2059 int retval = 0;
2060 int i;
2061
2062 for (i = 0; i < count; i++) {
2063
2064
2065
2066
2067
2068
2069 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2070 add_head->state = I40E_FILTER_FAILED;
2071 } else {
2072 add_head->state = I40E_FILTER_ACTIVE;
2073 retval++;
2074 }
2075
2076 add_head = i40e_next_filter(add_head);
2077 if (!add_head)
2078 break;
2079 }
2080
2081 return retval;
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static
2098void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2099 struct i40e_aqc_remove_macvlan_element_data *list,
2100 int num_del, int *retval)
2101{
2102 struct i40e_hw *hw = &vsi->back->hw;
2103 i40e_status aq_ret;
2104 int aq_err;
2105
2106 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2107 aq_err = hw->aq.asq_last_status;
2108
2109
2110 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2111 *retval = -EIO;
2112 dev_info(&vsi->back->pdev->dev,
2113 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2114 vsi_name, i40e_stat_str(hw, aq_ret),
2115 i40e_aq_str(hw, aq_err));
2116 }
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131static
2132void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2133 struct i40e_aqc_add_macvlan_element_data *list,
2134 struct i40e_new_mac_filter *add_head,
2135 int num_add)
2136{
2137 struct i40e_hw *hw = &vsi->back->hw;
2138 int aq_err, fcnt;
2139
2140 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2141 aq_err = hw->aq.asq_last_status;
2142 fcnt = i40e_update_filter_state(num_add, list, add_head);
2143
2144 if (fcnt != num_add) {
2145 if (vsi->type == I40E_VSI_MAIN) {
2146 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2147 dev_warn(&vsi->back->pdev->dev,
2148 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2149 i40e_aq_str(hw, aq_err), vsi_name);
2150 } else if (vsi->type == I40E_VSI_SRIOV ||
2151 vsi->type == I40E_VSI_VMDQ1 ||
2152 vsi->type == I40E_VSI_VMDQ2) {
2153 dev_warn(&vsi->back->pdev->dev,
2154 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2155 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2156 } else {
2157 dev_warn(&vsi->back->pdev->dev,
2158 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2159 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2160 }
2161 }
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176static i40e_status
2177i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2178 struct i40e_mac_filter *f)
2179{
2180 bool enable = f->state == I40E_FILTER_NEW;
2181 struct i40e_hw *hw = &vsi->back->hw;
2182 i40e_status aq_ret;
2183
2184 if (f->vlan == I40E_VLAN_ANY) {
2185 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2186 vsi->seid,
2187 enable,
2188 NULL);
2189 } else {
2190 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2191 vsi->seid,
2192 enable,
2193 f->vlan,
2194 NULL);
2195 }
2196
2197 if (aq_ret) {
2198 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2199 dev_warn(&vsi->back->pdev->dev,
2200 "Error %s, forcing overflow promiscuous on %s\n",
2201 i40e_aq_str(hw, hw->aq.asq_last_status),
2202 vsi_name);
2203 }
2204
2205 return aq_ret;
2206}
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2218{
2219 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2220 struct i40e_hw *hw = &pf->hw;
2221 i40e_status aq_ret;
2222
2223 if (vsi->type == I40E_VSI_MAIN &&
2224 pf->lan_veb != I40E_NO_VEB &&
2225 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2226
2227
2228
2229
2230
2231 if (promisc)
2232 aq_ret = i40e_aq_set_default_vsi(hw,
2233 vsi->seid,
2234 NULL);
2235 else
2236 aq_ret = i40e_aq_clear_default_vsi(hw,
2237 vsi->seid,
2238 NULL);
2239 if (aq_ret) {
2240 dev_info(&pf->pdev->dev,
2241 "Set default VSI failed, err %s, aq_err %s\n",
2242 i40e_stat_str(hw, aq_ret),
2243 i40e_aq_str(hw, hw->aq.asq_last_status));
2244 }
2245 } else {
2246 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2247 hw,
2248 vsi->seid,
2249 promisc, NULL,
2250 true);
2251 if (aq_ret) {
2252 dev_info(&pf->pdev->dev,
2253 "set unicast promisc failed, err %s, aq_err %s\n",
2254 i40e_stat_str(hw, aq_ret),
2255 i40e_aq_str(hw, hw->aq.asq_last_status));
2256 }
2257 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2258 hw,
2259 vsi->seid,
2260 promisc, NULL);
2261 if (aq_ret) {
2262 dev_info(&pf->pdev->dev,
2263 "set multicast promisc failed, err %s, aq_err %s\n",
2264 i40e_stat_str(hw, aq_ret),
2265 i40e_aq_str(hw, hw->aq.asq_last_status));
2266 }
2267 }
2268
2269 if (!aq_ret)
2270 pf->cur_promisc = promisc;
2271
2272 return aq_ret;
2273}
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2284{
2285 struct hlist_head tmp_add_list, tmp_del_list;
2286 struct i40e_mac_filter *f;
2287 struct i40e_new_mac_filter *new, *add_head = NULL;
2288 struct i40e_hw *hw = &vsi->back->hw;
2289 bool old_overflow, new_overflow;
2290 unsigned int failed_filters = 0;
2291 unsigned int vlan_filters = 0;
2292 char vsi_name[16] = "PF";
2293 int filter_list_len = 0;
2294 i40e_status aq_ret = 0;
2295 u32 changed_flags = 0;
2296 struct hlist_node *h;
2297 struct i40e_pf *pf;
2298 int num_add = 0;
2299 int num_del = 0;
2300 int retval = 0;
2301 u16 cmd_flags;
2302 int list_size;
2303 int bkt;
2304
2305
2306 struct i40e_aqc_add_macvlan_element_data *add_list;
2307 struct i40e_aqc_remove_macvlan_element_data *del_list;
2308
2309 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2310 usleep_range(1000, 2000);
2311 pf = vsi->back;
2312
2313 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2314
2315 if (vsi->netdev) {
2316 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2317 vsi->current_netdev_flags = vsi->netdev->flags;
2318 }
2319
2320 INIT_HLIST_HEAD(&tmp_add_list);
2321 INIT_HLIST_HEAD(&tmp_del_list);
2322
2323 if (vsi->type == I40E_VSI_SRIOV)
2324 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2325 else if (vsi->type != I40E_VSI_MAIN)
2326 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2327
2328 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2329 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2330
2331 spin_lock_bh(&vsi->mac_filter_hash_lock);
2332
2333 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2334 if (f->state == I40E_FILTER_REMOVE) {
2335
2336 hash_del(&f->hlist);
2337 hlist_add_head(&f->hlist, &tmp_del_list);
2338
2339
2340 continue;
2341 }
2342 if (f->state == I40E_FILTER_NEW) {
2343
2344 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2345 if (!new)
2346 goto err_no_memory_locked;
2347
2348
2349 new->f = f;
2350 new->state = f->state;
2351
2352
2353 hlist_add_head(&new->hlist, &tmp_add_list);
2354 }
2355
2356
2357
2358
2359
2360 if (f->vlan > 0)
2361 vlan_filters++;
2362 }
2363
2364 retval = i40e_correct_mac_vlan_filters(vsi,
2365 &tmp_add_list,
2366 &tmp_del_list,
2367 vlan_filters);
2368 if (retval)
2369 goto err_no_memory_locked;
2370
2371 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2372 }
2373
2374
2375 if (!hlist_empty(&tmp_del_list)) {
2376 filter_list_len = hw->aq.asq_buf_size /
2377 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2378 list_size = filter_list_len *
2379 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2380 del_list = kzalloc(list_size, GFP_ATOMIC);
2381 if (!del_list)
2382 goto err_no_memory;
2383
2384 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2385 cmd_flags = 0;
2386
2387
2388
2389
2390 if (is_broadcast_ether_addr(f->macaddr)) {
2391 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2392
2393 hlist_del(&f->hlist);
2394 kfree(f);
2395 continue;
2396 }
2397
2398
2399 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2400 if (f->vlan == I40E_VLAN_ANY) {
2401 del_list[num_del].vlan_tag = 0;
2402 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2403 } else {
2404 del_list[num_del].vlan_tag =
2405 cpu_to_le16((u16)(f->vlan));
2406 }
2407
2408 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2409 del_list[num_del].flags = cmd_flags;
2410 num_del++;
2411
2412
2413 if (num_del == filter_list_len) {
2414 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2415 num_del, &retval);
2416 memset(del_list, 0, list_size);
2417 num_del = 0;
2418 }
2419
2420
2421
2422 hlist_del(&f->hlist);
2423 kfree(f);
2424 }
2425
2426 if (num_del) {
2427 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2428 num_del, &retval);
2429 }
2430
2431 kfree(del_list);
2432 del_list = NULL;
2433 }
2434
2435 if (!hlist_empty(&tmp_add_list)) {
2436
2437 filter_list_len = hw->aq.asq_buf_size /
2438 sizeof(struct i40e_aqc_add_macvlan_element_data);
2439 list_size = filter_list_len *
2440 sizeof(struct i40e_aqc_add_macvlan_element_data);
2441 add_list = kzalloc(list_size, GFP_ATOMIC);
2442 if (!add_list)
2443 goto err_no_memory;
2444
2445 num_add = 0;
2446 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2447
2448
2449
2450 if (is_broadcast_ether_addr(new->f->macaddr)) {
2451 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2452 new->f))
2453 new->state = I40E_FILTER_FAILED;
2454 else
2455 new->state = I40E_FILTER_ACTIVE;
2456 continue;
2457 }
2458
2459
2460 if (num_add == 0)
2461 add_head = new;
2462 cmd_flags = 0;
2463 ether_addr_copy(add_list[num_add].mac_addr,
2464 new->f->macaddr);
2465 if (new->f->vlan == I40E_VLAN_ANY) {
2466 add_list[num_add].vlan_tag = 0;
2467 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2468 } else {
2469 add_list[num_add].vlan_tag =
2470 cpu_to_le16((u16)(new->f->vlan));
2471 }
2472 add_list[num_add].queue_number = 0;
2473
2474 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2475 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2476 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2477 num_add++;
2478
2479
2480 if (num_add == filter_list_len) {
2481 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2482 add_head, num_add);
2483 memset(add_list, 0, list_size);
2484 num_add = 0;
2485 }
2486 }
2487 if (num_add) {
2488 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2489 num_add);
2490 }
2491
2492
2493
2494 spin_lock_bh(&vsi->mac_filter_hash_lock);
2495 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2496
2497 if (new->f->state == I40E_FILTER_NEW)
2498 new->f->state = new->state;
2499 hlist_del(&new->hlist);
2500 kfree(new);
2501 }
2502 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2503 kfree(add_list);
2504 add_list = NULL;
2505 }
2506
2507
2508 spin_lock_bh(&vsi->mac_filter_hash_lock);
2509 vsi->active_filters = 0;
2510 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2511 if (f->state == I40E_FILTER_ACTIVE)
2512 vsi->active_filters++;
2513 else if (f->state == I40E_FILTER_FAILED)
2514 failed_filters++;
2515 }
2516 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2517
2518
2519
2520
2521
2522 if (old_overflow && !failed_filters &&
2523 vsi->active_filters < vsi->promisc_threshold) {
2524 dev_info(&pf->pdev->dev,
2525 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2526 vsi_name);
2527 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2528 vsi->promisc_threshold = 0;
2529 }
2530
2531
2532 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2533 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2534 goto out;
2535 }
2536
2537 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2538
2539
2540
2541
2542 if (!old_overflow && new_overflow)
2543 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2544
2545
2546 if (changed_flags & IFF_ALLMULTI) {
2547 bool cur_multipromisc;
2548
2549 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2550 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2551 vsi->seid,
2552 cur_multipromisc,
2553 NULL);
2554 if (aq_ret) {
2555 retval = i40e_aq_rc_to_posix(aq_ret,
2556 hw->aq.asq_last_status);
2557 dev_info(&pf->pdev->dev,
2558 "set multi promisc failed on %s, err %s aq_err %s\n",
2559 vsi_name,
2560 i40e_stat_str(hw, aq_ret),
2561 i40e_aq_str(hw, hw->aq.asq_last_status));
2562 } else {
2563 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
2564 vsi->netdev->name,
2565 cur_multipromisc ? "entering" : "leaving");
2566 }
2567 }
2568
2569 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2570 bool cur_promisc;
2571
2572 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2573 new_overflow);
2574 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2575 if (aq_ret) {
2576 retval = i40e_aq_rc_to_posix(aq_ret,
2577 hw->aq.asq_last_status);
2578 dev_info(&pf->pdev->dev,
2579 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2580 cur_promisc ? "on" : "off",
2581 vsi_name,
2582 i40e_stat_str(hw, aq_ret),
2583 i40e_aq_str(hw, hw->aq.asq_last_status));
2584 }
2585 }
2586out:
2587
2588 if (retval)
2589 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2590
2591 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2592 return retval;
2593
2594err_no_memory:
2595
2596 spin_lock_bh(&vsi->mac_filter_hash_lock);
2597err_no_memory_locked:
2598 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2599 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2600 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2601
2602 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2603 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2604 return -ENOMEM;
2605}
2606
2607
2608
2609
2610
2611static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2612{
2613 int v;
2614
2615 if (!pf)
2616 return;
2617 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2618 return;
2619 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2620 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2621 return;
2622 }
2623
2624 for (v = 0; v < pf->num_alloc_vsi; v++) {
2625 if (pf->vsi[v] &&
2626 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2627 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2628
2629 if (ret) {
2630
2631 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2632 pf->state);
2633 break;
2634 }
2635 }
2636 }
2637 clear_bit(__I40E_VF_DISABLE, pf->state);
2638}
2639
2640
2641
2642
2643
2644static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2645{
2646 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2647 return I40E_RXBUFFER_2048;
2648 else
2649 return I40E_RXBUFFER_3072;
2650}
2651
2652
2653
2654
2655
2656
2657
2658
2659static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2660{
2661 struct i40e_netdev_priv *np = netdev_priv(netdev);
2662 struct i40e_vsi *vsi = np->vsi;
2663 struct i40e_pf *pf = vsi->back;
2664
2665 if (i40e_enabled_xdp_vsi(vsi)) {
2666 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2667
2668 if (frame_size > i40e_max_xdp_frame_size(vsi))
2669 return -EINVAL;
2670 }
2671
2672 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2673 netdev->mtu, new_mtu);
2674 netdev->mtu = new_mtu;
2675 if (netif_running(netdev))
2676 i40e_vsi_reinit_locked(vsi);
2677 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2678 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2679 return 0;
2680}
2681
2682
2683
2684
2685
2686
2687
2688int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2689{
2690 struct i40e_netdev_priv *np = netdev_priv(netdev);
2691 struct i40e_pf *pf = np->vsi->back;
2692
2693 switch (cmd) {
2694 case SIOCGHWTSTAMP:
2695 return i40e_ptp_get_ts_config(pf, ifr);
2696 case SIOCSHWTSTAMP:
2697 return i40e_ptp_set_ts_config(pf, ifr);
2698 default:
2699 return -EOPNOTSUPP;
2700 }
2701}
2702
2703
2704
2705
2706
2707void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2708{
2709 struct i40e_vsi_context ctxt;
2710 i40e_status ret;
2711
2712
2713 if (vsi->info.pvid)
2714 return;
2715
2716 if ((vsi->info.valid_sections &
2717 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2718 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2719 return;
2720
2721 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2722 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2723 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2724
2725 ctxt.seid = vsi->seid;
2726 ctxt.info = vsi->info;
2727 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2728 if (ret) {
2729 dev_info(&vsi->back->pdev->dev,
2730 "update vlan stripping failed, err %s aq_err %s\n",
2731 i40e_stat_str(&vsi->back->hw, ret),
2732 i40e_aq_str(&vsi->back->hw,
2733 vsi->back->hw.aq.asq_last_status));
2734 }
2735}
2736
2737
2738
2739
2740
2741void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2742{
2743 struct i40e_vsi_context ctxt;
2744 i40e_status ret;
2745
2746
2747 if (vsi->info.pvid)
2748 return;
2749
2750 if ((vsi->info.valid_sections &
2751 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2752 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2753 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2754 return;
2755
2756 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2757 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2758 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2759
2760 ctxt.seid = vsi->seid;
2761 ctxt.info = vsi->info;
2762 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2763 if (ret) {
2764 dev_info(&vsi->back->pdev->dev,
2765 "update vlan stripping failed, err %s aq_err %s\n",
2766 i40e_stat_str(&vsi->back->hw, ret),
2767 i40e_aq_str(&vsi->back->hw,
2768 vsi->back->hw.aq.asq_last_status));
2769 }
2770}
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2786{
2787 struct i40e_mac_filter *f, *add_f;
2788 struct hlist_node *h;
2789 int bkt;
2790
2791 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2792 if (f->state == I40E_FILTER_REMOVE)
2793 continue;
2794 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2795 if (!add_f) {
2796 dev_info(&vsi->back->pdev->dev,
2797 "Could not add vlan filter %d for %pM\n",
2798 vid, f->macaddr);
2799 return -ENOMEM;
2800 }
2801 }
2802
2803 return 0;
2804}
2805
2806
2807
2808
2809
2810
2811int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2812{
2813 int err;
2814
2815 if (vsi->info.pvid)
2816 return -EINVAL;
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826 if (!vid)
2827 return 0;
2828
2829
2830 spin_lock_bh(&vsi->mac_filter_hash_lock);
2831 err = i40e_add_vlan_all_mac(vsi, vid);
2832 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2833 if (err)
2834 return err;
2835
2836
2837
2838
2839 i40e_service_event_schedule(vsi->back);
2840 return 0;
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2857{
2858 struct i40e_mac_filter *f;
2859 struct hlist_node *h;
2860 int bkt;
2861
2862 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2863 if (f->vlan == vid)
2864 __i40e_del_filter(vsi, f);
2865 }
2866}
2867
2868
2869
2870
2871
2872
2873void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2874{
2875 if (!vid || vsi->info.pvid)
2876 return;
2877
2878 spin_lock_bh(&vsi->mac_filter_hash_lock);
2879 i40e_rm_vlan_all_mac(vsi, vid);
2880 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2881
2882
2883
2884
2885 i40e_service_event_schedule(vsi->back);
2886}
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2897 __always_unused __be16 proto, u16 vid)
2898{
2899 struct i40e_netdev_priv *np = netdev_priv(netdev);
2900 struct i40e_vsi *vsi = np->vsi;
2901 int ret = 0;
2902
2903 if (vid >= VLAN_N_VID)
2904 return -EINVAL;
2905
2906 ret = i40e_vsi_add_vlan(vsi, vid);
2907 if (!ret)
2908 set_bit(vid, vsi->active_vlans);
2909
2910 return ret;
2911}
2912
2913
2914
2915
2916
2917
2918
2919static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2920 __always_unused __be16 proto, u16 vid)
2921{
2922 struct i40e_netdev_priv *np = netdev_priv(netdev);
2923 struct i40e_vsi *vsi = np->vsi;
2924
2925 if (vid >= VLAN_N_VID)
2926 return;
2927 set_bit(vid, vsi->active_vlans);
2928}
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2939 __always_unused __be16 proto, u16 vid)
2940{
2941 struct i40e_netdev_priv *np = netdev_priv(netdev);
2942 struct i40e_vsi *vsi = np->vsi;
2943
2944
2945
2946
2947
2948 i40e_vsi_kill_vlan(vsi, vid);
2949
2950 clear_bit(vid, vsi->active_vlans);
2951
2952 return 0;
2953}
2954
2955
2956
2957
2958
2959static void i40e_restore_vlan(struct i40e_vsi *vsi)
2960{
2961 u16 vid;
2962
2963 if (!vsi->netdev)
2964 return;
2965
2966 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2967 i40e_vlan_stripping_enable(vsi);
2968 else
2969 i40e_vlan_stripping_disable(vsi);
2970
2971 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2972 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2973 vid);
2974}
2975
2976
2977
2978
2979
2980
2981int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2982{
2983 struct i40e_vsi_context ctxt;
2984 i40e_status ret;
2985
2986 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2987 vsi->info.pvid = cpu_to_le16(vid);
2988 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2989 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2990 I40E_AQ_VSI_PVLAN_EMOD_STR;
2991
2992 ctxt.seid = vsi->seid;
2993 ctxt.info = vsi->info;
2994 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2995 if (ret) {
2996 dev_info(&vsi->back->pdev->dev,
2997 "add pvid failed, err %s aq_err %s\n",
2998 i40e_stat_str(&vsi->back->hw, ret),
2999 i40e_aq_str(&vsi->back->hw,
3000 vsi->back->hw.aq.asq_last_status));
3001 return -ENOENT;
3002 }
3003
3004 return 0;
3005}
3006
3007
3008
3009
3010
3011
3012
3013void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3014{
3015 vsi->info.pvid = 0;
3016
3017 i40e_vlan_stripping_disable(vsi);
3018}
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3031{
3032 int i, err = 0;
3033
3034 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3035 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3036
3037 if (!i40e_enabled_xdp_vsi(vsi))
3038 return err;
3039
3040 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3041 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3042
3043 return err;
3044}
3045
3046
3047
3048
3049
3050
3051
3052static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3053{
3054 int i;
3055
3056 if (vsi->tx_rings) {
3057 for (i = 0; i < vsi->num_queue_pairs; i++)
3058 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3059 i40e_free_tx_resources(vsi->tx_rings[i]);
3060 }
3061
3062 if (vsi->xdp_rings) {
3063 for (i = 0; i < vsi->num_queue_pairs; i++)
3064 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3065 i40e_free_tx_resources(vsi->xdp_rings[i]);
3066 }
3067}
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3080{
3081 int i, err = 0;
3082
3083 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3084 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3085 return err;
3086}
3087
3088
3089
3090
3091
3092
3093
3094static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3095{
3096 int i;
3097
3098 if (!vsi->rx_rings)
3099 return;
3100
3101 for (i = 0; i < vsi->num_queue_pairs; i++)
3102 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3103 i40e_free_rx_resources(vsi->rx_rings[i]);
3104}
3105
3106
3107
3108
3109
3110
3111
3112
3113static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3114{
3115 int cpu;
3116
3117 if (!ring->q_vector || !ring->netdev || ring->ch)
3118 return;
3119
3120
3121 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3122 return;
3123
3124 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3125 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3126 ring->queue_index);
3127}
3128
3129
3130
3131
3132
3133
3134
3135static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3136{
3137 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3138 int qid = ring->queue_index;
3139
3140 if (ring_is_xdp(ring))
3141 qid -= ring->vsi->alloc_queue_pairs;
3142
3143 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3144 return NULL;
3145
3146 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3147}
3148
3149
3150
3151
3152
3153
3154
3155static int i40e_configure_tx_ring(struct i40e_ring *ring)
3156{
3157 struct i40e_vsi *vsi = ring->vsi;
3158 u16 pf_q = vsi->base_queue + ring->queue_index;
3159 struct i40e_hw *hw = &vsi->back->hw;
3160 struct i40e_hmc_obj_txq tx_ctx;
3161 i40e_status err = 0;
3162 u32 qtx_ctl = 0;
3163
3164 if (ring_is_xdp(ring))
3165 ring->xsk_pool = i40e_xsk_pool(ring);
3166
3167
3168 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3169 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3170 ring->atr_count = 0;
3171 } else {
3172 ring->atr_sample_rate = 0;
3173 }
3174
3175
3176 i40e_config_xps_tx_ring(ring);
3177
3178
3179 memset(&tx_ctx, 0, sizeof(tx_ctx));
3180
3181 tx_ctx.new_context = 1;
3182 tx_ctx.base = (ring->dma / 128);
3183 tx_ctx.qlen = ring->count;
3184 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3185 I40E_FLAG_FD_ATR_ENABLED));
3186 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3187
3188 if (vsi->type != I40E_VSI_FDIR)
3189 tx_ctx.head_wb_ena = 1;
3190 tx_ctx.head_wb_addr = ring->dma +
3191 (ring->count * sizeof(struct i40e_tx_desc));
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204 if (ring->ch)
3205 tx_ctx.rdylist =
3206 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3207
3208 else
3209 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3210
3211 tx_ctx.rdylist_act = 0;
3212
3213
3214 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3215 if (err) {
3216 dev_info(&vsi->back->pdev->dev,
3217 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3218 ring->queue_index, pf_q, err);
3219 return -ENOMEM;
3220 }
3221
3222
3223 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3224 if (err) {
3225 dev_info(&vsi->back->pdev->dev,
3226 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3227 ring->queue_index, pf_q, err);
3228 return -ENOMEM;
3229 }
3230
3231
3232 if (ring->ch) {
3233 if (ring->ch->type == I40E_VSI_VMDQ2)
3234 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3235 else
3236 return -EINVAL;
3237
3238 qtx_ctl |= (ring->ch->vsi_number <<
3239 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3240 I40E_QTX_CTL_VFVM_INDX_MASK;
3241 } else {
3242 if (vsi->type == I40E_VSI_VMDQ2) {
3243 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3244 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3245 I40E_QTX_CTL_VFVM_INDX_MASK;
3246 } else {
3247 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3248 }
3249 }
3250
3251 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3252 I40E_QTX_CTL_PF_INDX_MASK);
3253 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3254 i40e_flush(hw);
3255
3256
3257 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3258
3259 return 0;
3260}
3261
3262
3263
3264
3265
3266
3267
3268static int i40e_configure_rx_ring(struct i40e_ring *ring)
3269{
3270 struct i40e_vsi *vsi = ring->vsi;
3271 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3272 u16 pf_q = vsi->base_queue + ring->queue_index;
3273 struct i40e_hw *hw = &vsi->back->hw;
3274 struct i40e_hmc_obj_rxq rx_ctx;
3275 i40e_status err = 0;
3276 bool ok;
3277 int ret;
3278
3279 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3280
3281
3282 memset(&rx_ctx, 0, sizeof(rx_ctx));
3283
3284 if (ring->vsi->type == I40E_VSI_MAIN)
3285 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3286
3287 kfree(ring->rx_bi);
3288 ring->xsk_pool = i40e_xsk_pool(ring);
3289 if (ring->xsk_pool) {
3290 ret = i40e_alloc_rx_bi_zc(ring);
3291 if (ret)
3292 return ret;
3293 ring->rx_buf_len =
3294 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3295
3296
3297
3298
3299 chain_len = 1;
3300 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3301 MEM_TYPE_XSK_BUFF_POOL,
3302 NULL);
3303 if (ret)
3304 return ret;
3305 dev_info(&vsi->back->pdev->dev,
3306 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3307 ring->queue_index);
3308
3309 } else {
3310 ret = i40e_alloc_rx_bi(ring);
3311 if (ret)
3312 return ret;
3313 ring->rx_buf_len = vsi->rx_buf_len;
3314 if (ring->vsi->type == I40E_VSI_MAIN) {
3315 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3316 MEM_TYPE_PAGE_SHARED,
3317 NULL);
3318 if (ret)
3319 return ret;
3320 }
3321 }
3322
3323 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3324 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3325
3326 rx_ctx.base = (ring->dma / 128);
3327 rx_ctx.qlen = ring->count;
3328
3329
3330 rx_ctx.dsize = 0;
3331
3332
3333
3334
3335 rx_ctx.hsplit_0 = 0;
3336
3337 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3338 if (hw->revision_id == 0)
3339 rx_ctx.lrxqthresh = 0;
3340 else
3341 rx_ctx.lrxqthresh = 1;
3342 rx_ctx.crcstrip = 1;
3343 rx_ctx.l2tsel = 1;
3344
3345 rx_ctx.showiv = 0;
3346
3347 rx_ctx.prefena = 1;
3348
3349
3350 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3351 if (err) {
3352 dev_info(&vsi->back->pdev->dev,
3353 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3354 ring->queue_index, pf_q, err);
3355 return -ENOMEM;
3356 }
3357
3358
3359 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3360 if (err) {
3361 dev_info(&vsi->back->pdev->dev,
3362 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3363 ring->queue_index, pf_q, err);
3364 return -ENOMEM;
3365 }
3366
3367
3368 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3369 clear_ring_build_skb_enabled(ring);
3370 else
3371 set_ring_build_skb_enabled(ring);
3372
3373
3374 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3375 writel(0, ring->tail);
3376
3377 if (ring->xsk_pool) {
3378 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3379 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3380 } else {
3381 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3382 }
3383 if (!ok) {
3384
3385
3386
3387 dev_info(&vsi->back->pdev->dev,
3388 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3389 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3390 ring->queue_index, pf_q);
3391 }
3392
3393 return 0;
3394}
3395
3396
3397
3398
3399
3400
3401
3402static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3403{
3404 int err = 0;
3405 u16 i;
3406
3407 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3408 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3409
3410 if (err || !i40e_enabled_xdp_vsi(vsi))
3411 return err;
3412
3413 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3414 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3415
3416 return err;
3417}
3418
3419
3420
3421
3422
3423
3424
3425static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3426{
3427 int err = 0;
3428 u16 i;
3429
3430 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3431 vsi->max_frame = I40E_MAX_RXBUFFER;
3432 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3433#if (PAGE_SIZE < 8192)
3434 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3435 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3436 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3437 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3438#endif
3439 } else {
3440 vsi->max_frame = I40E_MAX_RXBUFFER;
3441 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3442 I40E_RXBUFFER_2048;
3443 }
3444
3445
3446 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3447 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3448
3449 return err;
3450}
3451
3452
3453
3454
3455
3456static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3457{
3458 struct i40e_ring *tx_ring, *rx_ring;
3459 u16 qoffset, qcount;
3460 int i, n;
3461
3462 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3463
3464 for (i = 0; i < vsi->num_queue_pairs; i++) {
3465 rx_ring = vsi->rx_rings[i];
3466 tx_ring = vsi->tx_rings[i];
3467 rx_ring->dcb_tc = 0;
3468 tx_ring->dcb_tc = 0;
3469 }
3470 return;
3471 }
3472
3473 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3474 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3475 continue;
3476
3477 qoffset = vsi->tc_config.tc_info[n].qoffset;
3478 qcount = vsi->tc_config.tc_info[n].qcount;
3479 for (i = qoffset; i < (qoffset + qcount); i++) {
3480 rx_ring = vsi->rx_rings[i];
3481 tx_ring = vsi->tx_rings[i];
3482 rx_ring->dcb_tc = n;
3483 tx_ring->dcb_tc = n;
3484 }
3485 }
3486}
3487
3488
3489
3490
3491
3492static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3493{
3494 if (vsi->netdev)
3495 i40e_set_rx_mode(vsi->netdev);
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3506{
3507 struct i40e_fdir_filter *filter;
3508 struct i40e_pf *pf = vsi->back;
3509 struct hlist_node *node;
3510
3511 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3512 return;
3513
3514
3515 pf->fd_tcp4_filter_cnt = 0;
3516 pf->fd_udp4_filter_cnt = 0;
3517 pf->fd_sctp4_filter_cnt = 0;
3518 pf->fd_ip4_filter_cnt = 0;
3519
3520 hlist_for_each_entry_safe(filter, node,
3521 &pf->fdir_filter_list, fdir_node) {
3522 i40e_add_del_fdir(vsi, filter, true);
3523 }
3524}
3525
3526
3527
3528
3529
3530static int i40e_vsi_configure(struct i40e_vsi *vsi)
3531{
3532 int err;
3533
3534 i40e_set_vsi_rx_mode(vsi);
3535 i40e_restore_vlan(vsi);
3536 i40e_vsi_config_dcb_rings(vsi);
3537 err = i40e_vsi_configure_tx(vsi);
3538 if (!err)
3539 err = i40e_vsi_configure_rx(vsi);
3540
3541 return err;
3542}
3543
3544
3545
3546
3547
3548static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3549{
3550 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3551 struct i40e_pf *pf = vsi->back;
3552 struct i40e_hw *hw = &pf->hw;
3553 u16 vector;
3554 int i, q;
3555 u32 qp;
3556
3557
3558
3559
3560
3561 qp = vsi->base_queue;
3562 vector = vsi->base_vector;
3563 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3564 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3565
3566 q_vector->rx.next_update = jiffies + 1;
3567 q_vector->rx.target_itr =
3568 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3569 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3570 q_vector->rx.target_itr >> 1);
3571 q_vector->rx.current_itr = q_vector->rx.target_itr;
3572
3573 q_vector->tx.next_update = jiffies + 1;
3574 q_vector->tx.target_itr =
3575 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3576 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3577 q_vector->tx.target_itr >> 1);
3578 q_vector->tx.current_itr = q_vector->tx.target_itr;
3579
3580 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3581 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3582
3583
3584 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3585 for (q = 0; q < q_vector->num_ringpairs; q++) {
3586 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3587 u32 val;
3588
3589 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3590 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3591 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3592 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3593 (I40E_QUEUE_TYPE_TX <<
3594 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3595
3596 wr32(hw, I40E_QINT_RQCTL(qp), val);
3597
3598 if (has_xdp) {
3599 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3600 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3601 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3602 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3603 (I40E_QUEUE_TYPE_TX <<
3604 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3605
3606 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3607 }
3608
3609 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3610 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3611 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3612 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3613 (I40E_QUEUE_TYPE_RX <<
3614 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3615
3616
3617 if (q == (q_vector->num_ringpairs - 1))
3618 val |= (I40E_QUEUE_END_OF_LIST <<
3619 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3620
3621 wr32(hw, I40E_QINT_TQCTL(qp), val);
3622 qp++;
3623 }
3624 }
3625
3626 i40e_flush(hw);
3627}
3628
3629
3630
3631
3632
3633static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3634{
3635 struct i40e_hw *hw = &pf->hw;
3636 u32 val;
3637
3638
3639 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3640 rd32(hw, I40E_PFINT_ICR0);
3641
3642 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3643 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3644 I40E_PFINT_ICR0_ENA_GRST_MASK |
3645 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3646 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3647 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3648 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3649 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3650
3651 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3652 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3653
3654 if (pf->flags & I40E_FLAG_PTP)
3655 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3656
3657 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3658
3659
3660 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3661 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3662
3663
3664 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3665}
3666
3667
3668
3669
3670
3671static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3672{
3673 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3674 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3675 struct i40e_pf *pf = vsi->back;
3676 struct i40e_hw *hw = &pf->hw;
3677 u32 val;
3678
3679
3680 q_vector->rx.next_update = jiffies + 1;
3681 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3682 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3683 q_vector->rx.current_itr = q_vector->rx.target_itr;
3684 q_vector->tx.next_update = jiffies + 1;
3685 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3686 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3687 q_vector->tx.current_itr = q_vector->tx.target_itr;
3688
3689 i40e_enable_misc_int_causes(pf);
3690
3691
3692 wr32(hw, I40E_PFINT_LNKLST0, 0);
3693
3694
3695 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3696 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3697 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3698 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3699
3700 wr32(hw, I40E_QINT_RQCTL(0), val);
3701
3702 if (i40e_enabled_xdp_vsi(vsi)) {
3703 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3704 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3705 (I40E_QUEUE_TYPE_TX
3706 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3707
3708 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3709 }
3710
3711 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3712 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3713 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3714
3715 wr32(hw, I40E_QINT_TQCTL(0), val);
3716 i40e_flush(hw);
3717}
3718
3719
3720
3721
3722
3723void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3724{
3725 struct i40e_hw *hw = &pf->hw;
3726
3727 wr32(hw, I40E_PFINT_DYN_CTL0,
3728 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3729 i40e_flush(hw);
3730}
3731
3732
3733
3734
3735
3736void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3737{
3738 struct i40e_hw *hw = &pf->hw;
3739 u32 val;
3740
3741 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3742 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3743 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3744
3745 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3746 i40e_flush(hw);
3747}
3748
3749
3750
3751
3752
3753
3754static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3755{
3756 struct i40e_q_vector *q_vector = data;
3757
3758 if (!q_vector->tx.ring && !q_vector->rx.ring)
3759 return IRQ_HANDLED;
3760
3761 napi_schedule_irqoff(&q_vector->napi);
3762
3763 return IRQ_HANDLED;
3764}
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3775 const cpumask_t *mask)
3776{
3777 struct i40e_q_vector *q_vector =
3778 container_of(notify, struct i40e_q_vector, affinity_notify);
3779
3780 cpumask_copy(&q_vector->affinity_mask, mask);
3781}
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791static void i40e_irq_affinity_release(struct kref *ref) {}
3792
3793
3794
3795
3796
3797
3798
3799
3800static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3801{
3802 int q_vectors = vsi->num_q_vectors;
3803 struct i40e_pf *pf = vsi->back;
3804 int base = vsi->base_vector;
3805 int rx_int_idx = 0;
3806 int tx_int_idx = 0;
3807 int vector, err;
3808 int irq_num;
3809 int cpu;
3810
3811 for (vector = 0; vector < q_vectors; vector++) {
3812 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3813
3814 irq_num = pf->msix_entries[base + vector].vector;
3815
3816 if (q_vector->tx.ring && q_vector->rx.ring) {
3817 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3818 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3819 tx_int_idx++;
3820 } else if (q_vector->rx.ring) {
3821 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3822 "%s-%s-%d", basename, "rx", rx_int_idx++);
3823 } else if (q_vector->tx.ring) {
3824 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3825 "%s-%s-%d", basename, "tx", tx_int_idx++);
3826 } else {
3827
3828 continue;
3829 }
3830 err = request_irq(irq_num,
3831 vsi->irq_handler,
3832 0,
3833 q_vector->name,
3834 q_vector);
3835 if (err) {
3836 dev_info(&pf->pdev->dev,
3837 "MSIX request_irq failed, error: %d\n", err);
3838 goto free_queue_irqs;
3839 }
3840
3841
3842 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3843 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3844 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3845
3846
3847
3848
3849
3850
3851 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3852 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3853 }
3854
3855 vsi->irqs_ready = true;
3856 return 0;
3857
3858free_queue_irqs:
3859 while (vector) {
3860 vector--;
3861 irq_num = pf->msix_entries[base + vector].vector;
3862 irq_set_affinity_notifier(irq_num, NULL);
3863 irq_set_affinity_hint(irq_num, NULL);
3864 free_irq(irq_num, &vsi->q_vectors[vector]);
3865 }
3866 return err;
3867}
3868
3869
3870
3871
3872
3873static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3874{
3875 struct i40e_pf *pf = vsi->back;
3876 struct i40e_hw *hw = &pf->hw;
3877 int base = vsi->base_vector;
3878 int i;
3879
3880
3881 for (i = 0; i < vsi->num_queue_pairs; i++) {
3882 u32 val;
3883
3884 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3885 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3886 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3887
3888 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3889 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3890 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3891
3892 if (!i40e_enabled_xdp_vsi(vsi))
3893 continue;
3894 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3895 }
3896
3897
3898 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3899 for (i = vsi->base_vector;
3900 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3901 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3902
3903 i40e_flush(hw);
3904 for (i = 0; i < vsi->num_q_vectors; i++)
3905 synchronize_irq(pf->msix_entries[i + base].vector);
3906 } else {
3907
3908 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3909 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3910 i40e_flush(hw);
3911 synchronize_irq(pf->pdev->irq);
3912 }
3913}
3914
3915
3916
3917
3918
3919static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3920{
3921 struct i40e_pf *pf = vsi->back;
3922 int i;
3923
3924 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3925 for (i = 0; i < vsi->num_q_vectors; i++)
3926 i40e_irq_dynamic_enable(vsi, i);
3927 } else {
3928 i40e_irq_dynamic_enable_icr0(pf);
3929 }
3930
3931 i40e_flush(&pf->hw);
3932 return 0;
3933}
3934
3935
3936
3937
3938
3939static void i40e_free_misc_vector(struct i40e_pf *pf)
3940{
3941
3942 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3943 i40e_flush(&pf->hw);
3944
3945 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3946 synchronize_irq(pf->msix_entries[0].vector);
3947 free_irq(pf->msix_entries[0].vector, pf);
3948 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3949 }
3950}
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961static irqreturn_t i40e_intr(int irq, void *data)
3962{
3963 struct i40e_pf *pf = (struct i40e_pf *)data;
3964 struct i40e_hw *hw = &pf->hw;
3965 irqreturn_t ret = IRQ_NONE;
3966 u32 icr0, icr0_remaining;
3967 u32 val, ena_mask;
3968
3969 icr0 = rd32(hw, I40E_PFINT_ICR0);
3970 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3971
3972
3973 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3974 goto enable_intr;
3975
3976
3977 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3978 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3979 pf->sw_int_count++;
3980
3981 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3982 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3983 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3984 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3985 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3986 }
3987
3988
3989 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3990 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3991 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3992
3993
3994
3995
3996
3997
3998
3999 if (!test_bit(__I40E_DOWN, pf->state))
4000 napi_schedule_irqoff(&q_vector->napi);
4001 }
4002
4003 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4004 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4005 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4006 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4007 }
4008
4009 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4010 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4011 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4012 }
4013
4014 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4015
4016 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4017 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4018
4019 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4020 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4021 } else {
4022 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4023 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4024 }
4025 }
4026
4027 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4028 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4029 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4030 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4031 val = rd32(hw, I40E_GLGEN_RSTAT);
4032 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4033 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4034 if (val == I40E_RESET_CORER) {
4035 pf->corer_count++;
4036 } else if (val == I40E_RESET_GLOBR) {
4037 pf->globr_count++;
4038 } else if (val == I40E_RESET_EMPR) {
4039 pf->empr_count++;
4040 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4041 }
4042 }
4043
4044 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4045 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4046 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4047 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4048 rd32(hw, I40E_PFHMC_ERRORINFO),
4049 rd32(hw, I40E_PFHMC_ERRORDATA));
4050 }
4051
4052 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4053 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4054
4055 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4056 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4057 i40e_ptp_tx_hwtstamp(pf);
4058 }
4059 }
4060
4061
4062
4063
4064
4065 icr0_remaining = icr0 & ena_mask;
4066 if (icr0_remaining) {
4067 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4068 icr0_remaining);
4069 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4070 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4071 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4072 dev_info(&pf->pdev->dev, "device will be reset\n");
4073 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4074 i40e_service_event_schedule(pf);
4075 }
4076 ena_mask &= ~icr0_remaining;
4077 }
4078 ret = IRQ_HANDLED;
4079
4080enable_intr:
4081
4082 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4083 if (!test_bit(__I40E_DOWN, pf->state) ||
4084 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4085 i40e_service_event_schedule(pf);
4086 i40e_irq_dynamic_enable_icr0(pf);
4087 }
4088
4089 return ret;
4090}
4091
4092
4093
4094
4095
4096
4097
4098
4099static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4100{
4101 struct i40e_vsi *vsi = tx_ring->vsi;
4102 u16 i = tx_ring->next_to_clean;
4103 struct i40e_tx_buffer *tx_buf;
4104 struct i40e_tx_desc *tx_desc;
4105
4106 tx_buf = &tx_ring->tx_bi[i];
4107 tx_desc = I40E_TX_DESC(tx_ring, i);
4108 i -= tx_ring->count;
4109
4110 do {
4111 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4112
4113
4114 if (!eop_desc)
4115 break;
4116
4117
4118 smp_rmb();
4119
4120
4121 if (!(eop_desc->cmd_type_offset_bsz &
4122 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4123 break;
4124
4125
4126 tx_buf->next_to_watch = NULL;
4127
4128 tx_desc->buffer_addr = 0;
4129 tx_desc->cmd_type_offset_bsz = 0;
4130
4131 tx_buf++;
4132 tx_desc++;
4133 i++;
4134 if (unlikely(!i)) {
4135 i -= tx_ring->count;
4136 tx_buf = tx_ring->tx_bi;
4137 tx_desc = I40E_TX_DESC(tx_ring, 0);
4138 }
4139
4140 dma_unmap_single(tx_ring->dev,
4141 dma_unmap_addr(tx_buf, dma),
4142 dma_unmap_len(tx_buf, len),
4143 DMA_TO_DEVICE);
4144 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4145 kfree(tx_buf->raw_buf);
4146
4147 tx_buf->raw_buf = NULL;
4148 tx_buf->tx_flags = 0;
4149 tx_buf->next_to_watch = NULL;
4150 dma_unmap_len_set(tx_buf, len, 0);
4151 tx_desc->buffer_addr = 0;
4152 tx_desc->cmd_type_offset_bsz = 0;
4153
4154
4155 tx_buf++;
4156 tx_desc++;
4157 i++;
4158 if (unlikely(!i)) {
4159 i -= tx_ring->count;
4160 tx_buf = tx_ring->tx_bi;
4161 tx_desc = I40E_TX_DESC(tx_ring, 0);
4162 }
4163
4164
4165 budget--;
4166 } while (likely(budget));
4167
4168 i += tx_ring->count;
4169 tx_ring->next_to_clean = i;
4170
4171 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4172 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4173
4174 return budget > 0;
4175}
4176
4177
4178
4179
4180
4181
4182static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4183{
4184 struct i40e_q_vector *q_vector = data;
4185 struct i40e_vsi *vsi;
4186
4187 if (!q_vector->tx.ring)
4188 return IRQ_HANDLED;
4189
4190 vsi = q_vector->tx.ring->vsi;
4191 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4192
4193 return IRQ_HANDLED;
4194}
4195
4196
4197
4198
4199
4200
4201
4202static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4203{
4204 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4205 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4206 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4207
4208 tx_ring->q_vector = q_vector;
4209 tx_ring->next = q_vector->tx.ring;
4210 q_vector->tx.ring = tx_ring;
4211 q_vector->tx.count++;
4212
4213
4214 if (i40e_enabled_xdp_vsi(vsi)) {
4215 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4216
4217 xdp_ring->q_vector = q_vector;
4218 xdp_ring->next = q_vector->tx.ring;
4219 q_vector->tx.ring = xdp_ring;
4220 q_vector->tx.count++;
4221 }
4222
4223 rx_ring->q_vector = q_vector;
4224 rx_ring->next = q_vector->rx.ring;
4225 q_vector->rx.ring = rx_ring;
4226 q_vector->rx.count++;
4227}
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4239{
4240 int qp_remaining = vsi->num_queue_pairs;
4241 int q_vectors = vsi->num_q_vectors;
4242 int num_ringpairs;
4243 int v_start = 0;
4244 int qp_idx = 0;
4245
4246
4247
4248
4249
4250
4251
4252
4253 for (; v_start < q_vectors; v_start++) {
4254 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4255
4256 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4257
4258 q_vector->num_ringpairs = num_ringpairs;
4259 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4260
4261 q_vector->rx.count = 0;
4262 q_vector->tx.count = 0;
4263 q_vector->rx.ring = NULL;
4264 q_vector->tx.ring = NULL;
4265
4266 while (num_ringpairs--) {
4267 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4268 qp_idx++;
4269 qp_remaining--;
4270 }
4271 }
4272}
4273
4274
4275
4276
4277
4278
4279static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4280{
4281 struct i40e_pf *pf = vsi->back;
4282 int err;
4283
4284 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4285 err = i40e_vsi_request_irq_msix(vsi, basename);
4286 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4287 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4288 pf->int_name, pf);
4289 else
4290 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4291 pf->int_name, pf);
4292
4293 if (err)
4294 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4295
4296 return err;
4297}
4298
4299#ifdef CONFIG_NET_POLL_CONTROLLER
4300
4301
4302
4303
4304
4305
4306
4307static void i40e_netpoll(struct net_device *netdev)
4308{
4309 struct i40e_netdev_priv *np = netdev_priv(netdev);
4310 struct i40e_vsi *vsi = np->vsi;
4311 struct i40e_pf *pf = vsi->back;
4312 int i;
4313
4314
4315 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4316 return;
4317
4318 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4319 for (i = 0; i < vsi->num_q_vectors; i++)
4320 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4321 } else {
4322 i40e_intr(pf->pdev->irq, netdev);
4323 }
4324}
4325#endif
4326
4327#define I40E_QTX_ENA_WAIT_COUNT 50
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4341{
4342 int i;
4343 u32 tx_reg;
4344
4345 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4346 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4347 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4348 break;
4349
4350 usleep_range(10, 20);
4351 }
4352 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4353 return -ETIMEDOUT;
4354
4355 return 0;
4356}
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4369{
4370 struct i40e_hw *hw = &pf->hw;
4371 u32 tx_reg;
4372 int i;
4373
4374
4375 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4376 if (!enable)
4377 usleep_range(10, 20);
4378
4379 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4380 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4381 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4382 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4383 break;
4384 usleep_range(1000, 2000);
4385 }
4386
4387
4388 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4389 return;
4390
4391
4392 if (enable) {
4393 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4394 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4395 } else {
4396 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4397 }
4398
4399 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4400}
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4411 bool is_xdp, bool enable)
4412{
4413 int ret;
4414
4415 i40e_control_tx_q(pf, pf_q, enable);
4416
4417
4418 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4419 if (ret) {
4420 dev_info(&pf->pdev->dev,
4421 "VSI seid %d %sTx ring %d %sable timeout\n",
4422 seid, (is_xdp ? "XDP " : ""), pf_q,
4423 (enable ? "en" : "dis"));
4424 }
4425
4426 return ret;
4427}
4428
4429
4430
4431
4432
4433
4434static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4435{
4436 struct i40e_pf *pf = vsi->back;
4437 int i, pf_q, ret = 0;
4438
4439 pf_q = vsi->base_queue;
4440 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4441 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4442 pf_q,
4443 false , enable);
4444 if (ret)
4445 break;
4446
4447 if (!i40e_enabled_xdp_vsi(vsi))
4448 continue;
4449
4450 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4451 pf_q + vsi->alloc_queue_pairs,
4452 true , enable);
4453 if (ret)
4454 break;
4455 }
4456 return ret;
4457}
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4471{
4472 int i;
4473 u32 rx_reg;
4474
4475 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4476 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4477 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4478 break;
4479
4480 usleep_range(10, 20);
4481 }
4482 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4483 return -ETIMEDOUT;
4484
4485 return 0;
4486}
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4499{
4500 struct i40e_hw *hw = &pf->hw;
4501 u32 rx_reg;
4502 int i;
4503
4504 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4505 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4506 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4507 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4508 break;
4509 usleep_range(1000, 2000);
4510 }
4511
4512
4513 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4514 return;
4515
4516
4517 if (enable)
4518 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4519 else
4520 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4521
4522 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4523}
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4536{
4537 int ret = 0;
4538
4539 i40e_control_rx_q(pf, pf_q, enable);
4540
4541
4542 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4543 if (ret)
4544 return ret;
4545
4546 return ret;
4547}
4548
4549
4550
4551
4552
4553
4554static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4555{
4556 struct i40e_pf *pf = vsi->back;
4557 int i, pf_q, ret = 0;
4558
4559 pf_q = vsi->base_queue;
4560 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4561 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4562 if (ret) {
4563 dev_info(&pf->pdev->dev,
4564 "VSI seid %d Rx ring %d %sable timeout\n",
4565 vsi->seid, pf_q, (enable ? "en" : "dis"));
4566 break;
4567 }
4568 }
4569
4570
4571
4572
4573 if (!enable)
4574 mdelay(50);
4575
4576 return ret;
4577}
4578
4579
4580
4581
4582
4583int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4584{
4585 int ret = 0;
4586
4587
4588 ret = i40e_vsi_control_rx(vsi, true);
4589 if (ret)
4590 return ret;
4591 ret = i40e_vsi_control_tx(vsi, true);
4592
4593 return ret;
4594}
4595
4596
4597
4598
4599
4600void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4601{
4602
4603 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4604 return i40e_vsi_stop_rings_no_wait(vsi);
4605
4606
4607
4608
4609 i40e_vsi_control_tx(vsi, false);
4610 i40e_vsi_control_rx(vsi, false);
4611}
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4625{
4626 struct i40e_pf *pf = vsi->back;
4627 int i, pf_q;
4628
4629 pf_q = vsi->base_queue;
4630 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4631 i40e_control_tx_q(pf, pf_q, false);
4632 i40e_control_rx_q(pf, pf_q, false);
4633 }
4634}
4635
4636
4637
4638
4639
4640static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4641{
4642 struct i40e_pf *pf = vsi->back;
4643 struct i40e_hw *hw = &pf->hw;
4644 int base = vsi->base_vector;
4645 u32 val, qp;
4646 int i;
4647
4648 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4649 if (!vsi->q_vectors)
4650 return;
4651
4652 if (!vsi->irqs_ready)
4653 return;
4654
4655 vsi->irqs_ready = false;
4656 for (i = 0; i < vsi->num_q_vectors; i++) {
4657 int irq_num;
4658 u16 vector;
4659
4660 vector = i + base;
4661 irq_num = pf->msix_entries[vector].vector;
4662
4663
4664 if (!vsi->q_vectors[i] ||
4665 !vsi->q_vectors[i]->num_ringpairs)
4666 continue;
4667
4668
4669 irq_set_affinity_notifier(irq_num, NULL);
4670
4671 irq_set_affinity_hint(irq_num, NULL);
4672 synchronize_irq(irq_num);
4673 free_irq(irq_num, vsi->q_vectors[i]);
4674
4675
4676
4677
4678
4679
4680
4681
4682 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4683 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4684 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4685 val |= I40E_QUEUE_END_OF_LIST
4686 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4687 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4688
4689 while (qp != I40E_QUEUE_END_OF_LIST) {
4690 u32 next;
4691
4692 val = rd32(hw, I40E_QINT_RQCTL(qp));
4693
4694 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4695 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4696 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4697 I40E_QINT_RQCTL_INTEVENT_MASK);
4698
4699 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4700 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4701
4702 wr32(hw, I40E_QINT_RQCTL(qp), val);
4703
4704 val = rd32(hw, I40E_QINT_TQCTL(qp));
4705
4706 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4707 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4708
4709 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4710 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4711 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4712 I40E_QINT_TQCTL_INTEVENT_MASK);
4713
4714 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4715 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4716
4717 wr32(hw, I40E_QINT_TQCTL(qp), val);
4718 qp = next;
4719 }
4720 }
4721 } else {
4722 free_irq(pf->pdev->irq, pf);
4723
4724 val = rd32(hw, I40E_PFINT_LNKLST0);
4725 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4726 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4727 val |= I40E_QUEUE_END_OF_LIST
4728 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4729 wr32(hw, I40E_PFINT_LNKLST0, val);
4730
4731 val = rd32(hw, I40E_QINT_RQCTL(qp));
4732 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4733 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4734 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4735 I40E_QINT_RQCTL_INTEVENT_MASK);
4736
4737 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4738 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4739
4740 wr32(hw, I40E_QINT_RQCTL(qp), val);
4741
4742 val = rd32(hw, I40E_QINT_TQCTL(qp));
4743
4744 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4745 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4746 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4747 I40E_QINT_TQCTL_INTEVENT_MASK);
4748
4749 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4750 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4751
4752 wr32(hw, I40E_QINT_TQCTL(qp), val);
4753 }
4754}
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4766{
4767 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4768 struct i40e_ring *ring;
4769
4770 if (!q_vector)
4771 return;
4772
4773
4774 i40e_for_each_ring(ring, q_vector->tx)
4775 ring->q_vector = NULL;
4776
4777 i40e_for_each_ring(ring, q_vector->rx)
4778 ring->q_vector = NULL;
4779
4780
4781 if (vsi->netdev)
4782 netif_napi_del(&q_vector->napi);
4783
4784 vsi->q_vectors[v_idx] = NULL;
4785
4786 kfree_rcu(q_vector, rcu);
4787}
4788
4789
4790
4791
4792
4793
4794
4795
4796static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4797{
4798 int v_idx;
4799
4800 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4801 i40e_free_q_vector(vsi, v_idx);
4802}
4803
4804
4805
4806
4807
4808static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4809{
4810
4811 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4812 pci_disable_msix(pf->pdev);
4813 kfree(pf->msix_entries);
4814 pf->msix_entries = NULL;
4815 kfree(pf->irq_pile);
4816 pf->irq_pile = NULL;
4817 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4818 pci_disable_msi(pf->pdev);
4819 }
4820 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4821}
4822
4823
4824
4825
4826
4827
4828
4829
4830static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4831{
4832 int i;
4833
4834 i40e_free_misc_vector(pf);
4835
4836 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4837 I40E_IWARP_IRQ_PILE_ID);
4838
4839 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4840 for (i = 0; i < pf->num_alloc_vsi; i++)
4841 if (pf->vsi[i])
4842 i40e_vsi_free_q_vectors(pf->vsi[i]);
4843 i40e_reset_interrupt_capability(pf);
4844}
4845
4846
4847
4848
4849
4850static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4851{
4852 int q_idx;
4853
4854 if (!vsi->netdev)
4855 return;
4856
4857 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4858 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4859
4860 if (q_vector->rx.ring || q_vector->tx.ring)
4861 napi_enable(&q_vector->napi);
4862 }
4863}
4864
4865
4866
4867
4868
4869static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4870{
4871 int q_idx;
4872
4873 if (!vsi->netdev)
4874 return;
4875
4876 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4877 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4878
4879 if (q_vector->rx.ring || q_vector->tx.ring)
4880 napi_disable(&q_vector->napi);
4881 }
4882}
4883
4884
4885
4886
4887
4888static void i40e_vsi_close(struct i40e_vsi *vsi)
4889{
4890 struct i40e_pf *pf = vsi->back;
4891 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4892 i40e_down(vsi);
4893 i40e_vsi_free_irq(vsi);
4894 i40e_vsi_free_tx_resources(vsi);
4895 i40e_vsi_free_rx_resources(vsi);
4896 vsi->current_netdev_flags = 0;
4897 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4898 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4899 set_bit(__I40E_CLIENT_RESET, pf->state);
4900}
4901
4902
4903
4904
4905
4906static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4907{
4908 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4909 return;
4910
4911 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4912 if (vsi->netdev && netif_running(vsi->netdev))
4913 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4914 else
4915 i40e_vsi_close(vsi);
4916}
4917
4918
4919
4920
4921
4922static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4923{
4924 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4925 return;
4926
4927 if (vsi->netdev && netif_running(vsi->netdev))
4928 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4929 else
4930 i40e_vsi_open(vsi);
4931}
4932
4933
4934
4935
4936
4937static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4938{
4939 int v;
4940
4941 for (v = 0; v < pf->num_alloc_vsi; v++) {
4942 if (pf->vsi[v])
4943 i40e_quiesce_vsi(pf->vsi[v]);
4944 }
4945}
4946
4947
4948
4949
4950
4951static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4952{
4953 int v;
4954
4955 for (v = 0; v < pf->num_alloc_vsi; v++) {
4956 if (pf->vsi[v])
4957 i40e_unquiesce_vsi(pf->vsi[v]);
4958 }
4959}
4960
4961
4962
4963
4964
4965
4966
4967int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4968{
4969 struct i40e_pf *pf = vsi->back;
4970 int i, pf_q, ret;
4971
4972 pf_q = vsi->base_queue;
4973 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4974
4975 ret = i40e_pf_txq_wait(pf, pf_q, false);
4976 if (ret) {
4977 dev_info(&pf->pdev->dev,
4978 "VSI seid %d Tx ring %d disable timeout\n",
4979 vsi->seid, pf_q);
4980 return ret;
4981 }
4982
4983 if (!i40e_enabled_xdp_vsi(vsi))
4984 goto wait_rx;
4985
4986
4987 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4988 false);
4989 if (ret) {
4990 dev_info(&pf->pdev->dev,
4991 "VSI seid %d XDP Tx ring %d disable timeout\n",
4992 vsi->seid, pf_q);
4993 return ret;
4994 }
4995wait_rx:
4996
4997 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4998 if (ret) {
4999 dev_info(&pf->pdev->dev,
5000 "VSI seid %d Rx ring %d disable timeout\n",
5001 vsi->seid, pf_q);
5002 return ret;
5003 }
5004 }
5005
5006 return 0;
5007}
5008
5009#ifdef CONFIG_I40E_DCB
5010
5011
5012
5013
5014
5015
5016
5017static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5018{
5019 int v, ret = 0;
5020
5021 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5022 if (pf->vsi[v]) {
5023 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5024 if (ret)
5025 break;
5026 }
5027 }
5028
5029 return ret;
5030}
5031
5032#endif
5033
5034
5035
5036
5037
5038
5039
5040
5041static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5042{
5043 struct i40e_dcb_app_priority_table app;
5044 struct i40e_hw *hw = &pf->hw;
5045 u8 enabled_tc = 1;
5046 u8 tc, i;
5047
5048 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5049
5050 for (i = 0; i < dcbcfg->numapps; i++) {
5051 app = dcbcfg->app[i];
5052 if (app.selector == I40E_APP_SEL_TCPIP &&
5053 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5054 tc = dcbcfg->etscfg.prioritytable[app.priority];
5055 enabled_tc |= BIT(tc);
5056 break;
5057 }
5058 }
5059
5060 return enabled_tc;
5061}
5062
5063
5064
5065
5066
5067
5068
5069static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5070{
5071 int i, tc_unused = 0;
5072 u8 num_tc = 0;
5073 u8 ret = 0;
5074
5075
5076
5077
5078
5079 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5080 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5081
5082
5083
5084
5085 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5086 if (num_tc & BIT(i)) {
5087 if (!tc_unused) {
5088 ret++;
5089 } else {
5090 pr_err("Non-contiguous TC - Disabling DCB\n");
5091 return 1;
5092 }
5093 } else {
5094 tc_unused = 1;
5095 }
5096 }
5097
5098
5099 if (!ret)
5100 ret = 1;
5101
5102 return ret;
5103}
5104
5105
5106
5107
5108
5109
5110
5111
5112static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5113{
5114 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5115 u8 enabled_tc = 1;
5116 u8 i;
5117
5118 for (i = 0; i < num_tc; i++)
5119 enabled_tc |= BIT(i);
5120
5121 return enabled_tc;
5122}
5123
5124
5125
5126
5127
5128
5129
5130
5131static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5132{
5133 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5134 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5135 u8 enabled_tc = 1, i;
5136
5137 for (i = 1; i < num_tc; i++)
5138 enabled_tc |= BIT(i);
5139 return enabled_tc;
5140}
5141
5142
5143
5144
5145
5146
5147
5148static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5149{
5150 struct i40e_hw *hw = &pf->hw;
5151 u8 i, enabled_tc = 1;
5152 u8 num_tc = 0;
5153 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5154
5155 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5156 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5157
5158
5159 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5160 return 1;
5161
5162
5163 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5164 return i40e_dcb_get_num_tc(dcbcfg);
5165
5166
5167 if (pf->hw.func_caps.iscsi)
5168 enabled_tc = i40e_get_iscsi_tc_map(pf);
5169 else
5170 return 1;
5171
5172 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5173 if (enabled_tc & BIT(i))
5174 num_tc++;
5175 }
5176 return num_tc;
5177}
5178
5179
5180
5181
5182
5183
5184
5185static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5186{
5187 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5188 return i40e_mqprio_get_enabled_tc(pf);
5189
5190
5191
5192
5193 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5194 return I40E_DEFAULT_TRAFFIC_CLASS;
5195
5196
5197 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5198 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5199
5200
5201 if (pf->hw.func_caps.iscsi)
5202 return i40e_get_iscsi_tc_map(pf);
5203 else
5204 return I40E_DEFAULT_TRAFFIC_CLASS;
5205}
5206
5207
5208
5209
5210
5211
5212
5213static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5214{
5215 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5216 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5217 struct i40e_pf *pf = vsi->back;
5218 struct i40e_hw *hw = &pf->hw;
5219 i40e_status ret;
5220 u32 tc_bw_max;
5221 int i;
5222
5223
5224 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5225 if (ret) {
5226 dev_info(&pf->pdev->dev,
5227 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5228 i40e_stat_str(&pf->hw, ret),
5229 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5230 return -EINVAL;
5231 }
5232
5233
5234 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5235 NULL);
5236 if (ret) {
5237 dev_info(&pf->pdev->dev,
5238 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5239 i40e_stat_str(&pf->hw, ret),
5240 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5241 return -EINVAL;
5242 }
5243
5244 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5245 dev_info(&pf->pdev->dev,
5246 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5247 bw_config.tc_valid_bits,
5248 bw_ets_config.tc_valid_bits);
5249
5250 }
5251
5252 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5253 vsi->bw_max_quanta = bw_config.max_bw;
5254 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5255 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5256 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5257 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5258 vsi->bw_ets_limit_credits[i] =
5259 le16_to_cpu(bw_ets_config.credits[i]);
5260
5261 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5262 }
5263
5264 return 0;
5265}
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5276 u8 *bw_share)
5277{
5278 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5279 struct i40e_pf *pf = vsi->back;
5280 i40e_status ret;
5281 int i;
5282
5283
5284 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5285 return 0;
5286 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5287 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5288 if (ret)
5289 dev_info(&pf->pdev->dev,
5290 "Failed to reset tx rate for vsi->seid %u\n",
5291 vsi->seid);
5292 return ret;
5293 }
5294 bw_data.tc_valid_bits = enabled_tc;
5295 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5296 bw_data.tc_bw_credits[i] = bw_share[i];
5297
5298 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5299 if (ret) {
5300 dev_info(&pf->pdev->dev,
5301 "AQ command Config VSI BW allocation per TC failed = %d\n",
5302 pf->hw.aq.asq_last_status);
5303 return -EINVAL;
5304 }
5305
5306 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5307 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5308
5309 return 0;
5310}
5311
5312
5313
5314
5315
5316
5317
5318static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5319{
5320 struct net_device *netdev = vsi->netdev;
5321 struct i40e_pf *pf = vsi->back;
5322 struct i40e_hw *hw = &pf->hw;
5323 u8 netdev_tc = 0;
5324 int i;
5325 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5326
5327 if (!netdev)
5328 return;
5329
5330 if (!enabled_tc) {
5331 netdev_reset_tc(netdev);
5332 return;
5333 }
5334
5335
5336 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5337 return;
5338
5339
5340 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5341
5342
5343
5344
5345
5346
5347
5348 if (vsi->tc_config.enabled_tc & BIT(i))
5349 netdev_set_tc_queue(netdev,
5350 vsi->tc_config.tc_info[i].netdev_tc,
5351 vsi->tc_config.tc_info[i].qcount,
5352 vsi->tc_config.tc_info[i].qoffset);
5353 }
5354
5355 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5356 return;
5357
5358
5359 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5360
5361 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5362
5363 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5364 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5365 }
5366}
5367
5368
5369
5370
5371
5372
5373static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5374 struct i40e_vsi_context *ctxt)
5375{
5376
5377
5378
5379
5380 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5381 memcpy(&vsi->info.queue_mapping,
5382 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5383 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5384 sizeof(vsi->info.tc_mapping));
5385}
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5401{
5402 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5403 struct i40e_pf *pf = vsi->back;
5404 struct i40e_hw *hw = &pf->hw;
5405 struct i40e_vsi_context ctxt;
5406 int ret = 0;
5407 int i;
5408
5409
5410 if (vsi->tc_config.enabled_tc == enabled_tc &&
5411 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5412 return ret;
5413
5414
5415 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5416 if (enabled_tc & BIT(i))
5417 bw_share[i] = 1;
5418 }
5419
5420 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5421 if (ret) {
5422 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5423
5424 dev_info(&pf->pdev->dev,
5425 "Failed configuring TC map %d for VSI %d\n",
5426 enabled_tc, vsi->seid);
5427 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5428 &bw_config, NULL);
5429 if (ret) {
5430 dev_info(&pf->pdev->dev,
5431 "Failed querying vsi bw info, err %s aq_err %s\n",
5432 i40e_stat_str(hw, ret),
5433 i40e_aq_str(hw, hw->aq.asq_last_status));
5434 goto out;
5435 }
5436 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5437 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5438
5439 if (!valid_tc)
5440 valid_tc = bw_config.tc_valid_bits;
5441
5442 valid_tc |= 1;
5443 dev_info(&pf->pdev->dev,
5444 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5445 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5446 enabled_tc = valid_tc;
5447 }
5448
5449 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5450 if (ret) {
5451 dev_err(&pf->pdev->dev,
5452 "Unable to configure TC map %d for VSI %d\n",
5453 enabled_tc, vsi->seid);
5454 goto out;
5455 }
5456 }
5457
5458
5459 ctxt.seid = vsi->seid;
5460 ctxt.pf_num = vsi->back->hw.pf_id;
5461 ctxt.vf_num = 0;
5462 ctxt.uplink_seid = vsi->uplink_seid;
5463 ctxt.info = vsi->info;
5464 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5465 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5466 if (ret)
5467 goto out;
5468 } else {
5469 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5470 }
5471
5472
5473
5474
5475 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5476 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5477 vsi->num_queue_pairs);
5478 ret = i40e_vsi_config_rss(vsi);
5479 if (ret) {
5480 dev_info(&vsi->back->pdev->dev,
5481 "Failed to reconfig rss for num_queues\n");
5482 return ret;
5483 }
5484 vsi->reconfig_rss = false;
5485 }
5486 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5487 ctxt.info.valid_sections |=
5488 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5489 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5490 }
5491
5492
5493
5494
5495 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5496 if (ret) {
5497 dev_info(&pf->pdev->dev,
5498 "Update vsi tc config failed, err %s aq_err %s\n",
5499 i40e_stat_str(hw, ret),
5500 i40e_aq_str(hw, hw->aq.asq_last_status));
5501 goto out;
5502 }
5503
5504 i40e_vsi_update_queue_map(vsi, &ctxt);
5505 vsi->info.valid_sections = 0;
5506
5507
5508 ret = i40e_vsi_get_bw_info(vsi);
5509 if (ret) {
5510 dev_info(&pf->pdev->dev,
5511 "Failed updating vsi bw info, err %s aq_err %s\n",
5512 i40e_stat_str(hw, ret),
5513 i40e_aq_str(hw, hw->aq.asq_last_status));
5514 goto out;
5515 }
5516
5517
5518 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5519out:
5520 return ret;
5521}
5522
5523
5524
5525
5526
5527
5528static int i40e_get_link_speed(struct i40e_vsi *vsi)
5529{
5530 struct i40e_pf *pf = vsi->back;
5531
5532 switch (pf->hw.phy.link_info.link_speed) {
5533 case I40E_LINK_SPEED_40GB:
5534 return 40000;
5535 case I40E_LINK_SPEED_25GB:
5536 return 25000;
5537 case I40E_LINK_SPEED_20GB:
5538 return 20000;
5539 case I40E_LINK_SPEED_10GB:
5540 return 10000;
5541 case I40E_LINK_SPEED_1GB:
5542 return 1000;
5543 default:
5544 return -EINVAL;
5545 }
5546}
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5557{
5558 struct i40e_pf *pf = vsi->back;
5559 u64 credits = 0;
5560 int speed = 0;
5561 int ret = 0;
5562
5563 speed = i40e_get_link_speed(vsi);
5564 if (max_tx_rate > speed) {
5565 dev_err(&pf->pdev->dev,
5566 "Invalid max tx rate %llu specified for VSI seid %d.",
5567 max_tx_rate, seid);
5568 return -EINVAL;
5569 }
5570 if (max_tx_rate && max_tx_rate < 50) {
5571 dev_warn(&pf->pdev->dev,
5572 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5573 max_tx_rate = 50;
5574 }
5575
5576
5577 credits = max_tx_rate;
5578 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5579 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5580 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5581 if (ret)
5582 dev_err(&pf->pdev->dev,
5583 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5584 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5585 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5586 return ret;
5587}
5588
5589
5590
5591
5592
5593
5594
5595static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5596{
5597 enum i40e_admin_queue_err last_aq_status;
5598 struct i40e_cloud_filter *cfilter;
5599 struct i40e_channel *ch, *ch_tmp;
5600 struct i40e_pf *pf = vsi->back;
5601 struct hlist_node *node;
5602 int ret, i;
5603
5604
5605
5606
5607 vsi->current_rss_size = 0;
5608
5609
5610 if (list_empty(&vsi->ch_list))
5611 return;
5612
5613 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5614 struct i40e_vsi *p_vsi;
5615
5616 list_del(&ch->list);
5617 p_vsi = ch->parent_vsi;
5618 if (!p_vsi || !ch->initialized) {
5619 kfree(ch);
5620 continue;
5621 }
5622
5623 for (i = 0; i < ch->num_queue_pairs; i++) {
5624 struct i40e_ring *tx_ring, *rx_ring;
5625 u16 pf_q;
5626
5627 pf_q = ch->base_queue + i;
5628 tx_ring = vsi->tx_rings[pf_q];
5629 tx_ring->ch = NULL;
5630
5631 rx_ring = vsi->rx_rings[pf_q];
5632 rx_ring->ch = NULL;
5633 }
5634
5635
5636 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5637 if (ret)
5638 dev_info(&vsi->back->pdev->dev,
5639 "Failed to reset tx rate for ch->seid %u\n",
5640 ch->seid);
5641
5642
5643 hlist_for_each_entry_safe(cfilter, node,
5644 &pf->cloud_filter_list, cloud_node) {
5645 if (cfilter->seid != ch->seid)
5646 continue;
5647
5648 hash_del(&cfilter->cloud_node);
5649 if (cfilter->dst_port)
5650 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5651 cfilter,
5652 false);
5653 else
5654 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5655 false);
5656 last_aq_status = pf->hw.aq.asq_last_status;
5657 if (ret)
5658 dev_info(&pf->pdev->dev,
5659 "Failed to delete cloud filter, err %s aq_err %s\n",
5660 i40e_stat_str(&pf->hw, ret),
5661 i40e_aq_str(&pf->hw, last_aq_status));
5662 kfree(cfilter);
5663 }
5664
5665
5666 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5667 NULL);
5668 if (ret)
5669 dev_err(&vsi->back->pdev->dev,
5670 "unable to remove channel (%d) for parent VSI(%d)\n",
5671 ch->seid, p_vsi->seid);
5672 kfree(ch);
5673 }
5674 INIT_LIST_HEAD(&vsi->ch_list);
5675}
5676
5677
5678
5679
5680
5681
5682
5683static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5684{
5685 struct i40e_channel *ch, *ch_tmp;
5686
5687 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5688 if (ch->initialized)
5689 return true;
5690 }
5691
5692 return false;
5693}
5694
5695
5696
5697
5698
5699
5700
5701
5702static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5703{
5704 struct i40e_channel *ch, *ch_tmp;
5705 int max = 0;
5706
5707 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5708 if (!ch->initialized)
5709 continue;
5710 if (ch->num_queue_pairs > max)
5711 max = ch->num_queue_pairs;
5712 }
5713
5714 return max;
5715}
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5729 struct i40e_vsi *vsi, bool *reconfig_rss)
5730{
5731 int max_ch_queues;
5732
5733 if (!reconfig_rss)
5734 return -EINVAL;
5735
5736 *reconfig_rss = false;
5737 if (vsi->current_rss_size) {
5738 if (num_queues > vsi->current_rss_size) {
5739 dev_dbg(&pf->pdev->dev,
5740 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5741 num_queues, vsi->current_rss_size);
5742 return -EINVAL;
5743 } else if ((num_queues < vsi->current_rss_size) &&
5744 (!is_power_of_2(num_queues))) {
5745 dev_dbg(&pf->pdev->dev,
5746 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5747 num_queues, vsi->current_rss_size);
5748 return -EINVAL;
5749 }
5750 }
5751
5752 if (!is_power_of_2(num_queues)) {
5753
5754
5755
5756
5757
5758 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5759 if (num_queues < max_ch_queues) {
5760 dev_dbg(&pf->pdev->dev,
5761 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5762 num_queues, max_ch_queues);
5763 return -EINVAL;
5764 }
5765 *reconfig_rss = true;
5766 }
5767
5768 return 0;
5769}
5770
5771
5772
5773
5774
5775
5776
5777
5778static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5779{
5780 struct i40e_pf *pf = vsi->back;
5781 u8 seed[I40E_HKEY_ARRAY_SIZE];
5782 struct i40e_hw *hw = &pf->hw;
5783 int local_rss_size;
5784 u8 *lut;
5785 int ret;
5786
5787 if (!vsi->rss_size)
5788 return -EINVAL;
5789
5790 if (rss_size > vsi->rss_size)
5791 return -EINVAL;
5792
5793 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5794 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5795 if (!lut)
5796 return -ENOMEM;
5797
5798
5799 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5800
5801
5802
5803
5804 if (vsi->rss_hkey_user)
5805 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5806 else
5807 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5808
5809 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5810 if (ret) {
5811 dev_info(&pf->pdev->dev,
5812 "Cannot set RSS lut, err %s aq_err %s\n",
5813 i40e_stat_str(hw, ret),
5814 i40e_aq_str(hw, hw->aq.asq_last_status));
5815 kfree(lut);
5816 return ret;
5817 }
5818 kfree(lut);
5819
5820
5821 if (!vsi->orig_rss_size)
5822 vsi->orig_rss_size = vsi->rss_size;
5823 vsi->current_rss_size = local_rss_size;
5824
5825 return ret;
5826}
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5837 struct i40e_vsi_context *ctxt,
5838 struct i40e_channel *ch)
5839{
5840 u16 qcount, qmap, sections = 0;
5841 u8 offset = 0;
5842 int pow;
5843
5844 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5845 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5846
5847 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5848 ch->num_queue_pairs = qcount;
5849
5850
5851 pow = ilog2(qcount);
5852 if (!is_power_of_2(qcount))
5853 pow++;
5854
5855 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5856 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5857
5858
5859 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5860
5861 ctxt->info.up_enable_bits = 0x1;
5862 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5863 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5864 ctxt->info.valid_sections |= cpu_to_le16(sections);
5865}
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5876 struct i40e_channel *ch)
5877{
5878 struct i40e_hw *hw = &pf->hw;
5879 struct i40e_vsi_context ctxt;
5880 u8 enabled_tc = 0x1;
5881 int ret;
5882
5883 if (ch->type != I40E_VSI_VMDQ2) {
5884 dev_info(&pf->pdev->dev,
5885 "add new vsi failed, ch->type %d\n", ch->type);
5886 return -EINVAL;
5887 }
5888
5889 memset(&ctxt, 0, sizeof(ctxt));
5890 ctxt.pf_num = hw->pf_id;
5891 ctxt.vf_num = 0;
5892 ctxt.uplink_seid = uplink_seid;
5893 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5894 if (ch->type == I40E_VSI_VMDQ2)
5895 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5896
5897 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5898 ctxt.info.valid_sections |=
5899 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5900 ctxt.info.switch_id =
5901 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5902 }
5903
5904
5905 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5906
5907
5908 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5909 if (ret) {
5910 dev_info(&pf->pdev->dev,
5911 "add new vsi failed, err %s aq_err %s\n",
5912 i40e_stat_str(&pf->hw, ret),
5913 i40e_aq_str(&pf->hw,
5914 pf->hw.aq.asq_last_status));
5915 return -ENOENT;
5916 }
5917
5918
5919
5920
5921 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5922 ch->seid = ctxt.seid;
5923 ch->vsi_number = ctxt.vsi_number;
5924 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5925
5926
5927
5928
5929
5930 ch->info.mapping_flags = ctxt.info.mapping_flags;
5931 memcpy(&ch->info.queue_mapping,
5932 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5933 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5934 sizeof(ctxt.info.tc_mapping));
5935
5936 return 0;
5937}
5938
5939static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5940 u8 *bw_share)
5941{
5942 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5943 i40e_status ret;
5944 int i;
5945
5946 bw_data.tc_valid_bits = ch->enabled_tc;
5947 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5948 bw_data.tc_bw_credits[i] = bw_share[i];
5949
5950 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5951 &bw_data, NULL);
5952 if (ret) {
5953 dev_info(&vsi->back->pdev->dev,
5954 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5955 vsi->back->hw.aq.asq_last_status, ch->seid);
5956 return -EINVAL;
5957 }
5958
5959 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5960 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5961
5962 return 0;
5963}
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5975 struct i40e_vsi *vsi,
5976 struct i40e_channel *ch)
5977{
5978 i40e_status ret;
5979 int i;
5980 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5981
5982
5983 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5984 if (ch->enabled_tc & BIT(i))
5985 bw_share[i] = 1;
5986 }
5987
5988
5989 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5990 if (ret) {
5991 dev_info(&vsi->back->pdev->dev,
5992 "Failed configuring TC map %d for channel (seid %u)\n",
5993 ch->enabled_tc, ch->seid);
5994 return ret;
5995 }
5996
5997 for (i = 0; i < ch->num_queue_pairs; i++) {
5998 struct i40e_ring *tx_ring, *rx_ring;
5999 u16 pf_q;
6000
6001 pf_q = ch->base_queue + i;
6002
6003
6004
6005
6006 tx_ring = vsi->tx_rings[pf_q];
6007 tx_ring->ch = ch;
6008
6009
6010 rx_ring = vsi->rx_rings[pf_q];
6011 rx_ring->ch = ch;
6012 }
6013
6014 return 0;
6015}
6016
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6029 struct i40e_vsi *vsi,
6030 struct i40e_channel *ch,
6031 u16 uplink_seid, u8 type)
6032{
6033 int ret;
6034
6035 ch->initialized = false;
6036 ch->base_queue = vsi->next_base_queue;
6037 ch->type = type;
6038
6039
6040 ret = i40e_add_channel(pf, uplink_seid, ch);
6041 if (ret) {
6042 dev_info(&pf->pdev->dev,
6043 "failed to add_channel using uplink_seid %u\n",
6044 uplink_seid);
6045 return ret;
6046 }
6047
6048
6049 ch->initialized = true;
6050
6051
6052 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6053 if (ret) {
6054 dev_info(&pf->pdev->dev,
6055 "failed to configure TX rings for channel %u\n",
6056 ch->seid);
6057 return ret;
6058 }
6059
6060
6061 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6062 dev_dbg(&pf->pdev->dev,
6063 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6064 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6065 ch->num_queue_pairs,
6066 vsi->next_base_queue);
6067 return ret;
6068}
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6080 struct i40e_channel *ch)
6081{
6082 u8 vsi_type;
6083 u16 seid;
6084 int ret;
6085
6086 if (vsi->type == I40E_VSI_MAIN) {
6087 vsi_type = I40E_VSI_VMDQ2;
6088 } else {
6089 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6090 vsi->type);
6091 return false;
6092 }
6093
6094
6095 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6096
6097
6098 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6099 if (ret) {
6100 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6101 return false;
6102 }
6103
6104 return ch->initialized ? true : false;
6105}
6106
6107
6108
6109
6110
6111
6112
6113
6114static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6115{
6116 u8 mode;
6117 struct i40e_pf *pf = vsi->back;
6118 struct i40e_hw *hw = &pf->hw;
6119 int ret;
6120
6121 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6122 if (ret)
6123 return -EINVAL;
6124
6125 if (hw->dev_caps.switch_mode) {
6126
6127
6128
6129 u32 switch_mode = hw->dev_caps.switch_mode &
6130 I40E_SWITCH_MODE_MASK;
6131 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6132 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6133 return 0;
6134 dev_err(&pf->pdev->dev,
6135 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6136 hw->dev_caps.switch_mode);
6137 return -EINVAL;
6138 }
6139 }
6140
6141
6142 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6143
6144
6145 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6146
6147
6148 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6149
6150
6151 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6152 pf->last_sw_conf_valid_flags,
6153 mode, NULL);
6154 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6155 dev_err(&pf->pdev->dev,
6156 "couldn't set switch config bits, err %s aq_err %s\n",
6157 i40e_stat_str(hw, ret),
6158 i40e_aq_str(hw,
6159 hw->aq.asq_last_status));
6160
6161 return ret;
6162}
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172int i40e_create_queue_channel(struct i40e_vsi *vsi,
6173 struct i40e_channel *ch)
6174{
6175 struct i40e_pf *pf = vsi->back;
6176 bool reconfig_rss;
6177 int err;
6178
6179 if (!ch)
6180 return -EINVAL;
6181
6182 if (!ch->num_queue_pairs) {
6183 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6184 ch->num_queue_pairs);
6185 return -EINVAL;
6186 }
6187
6188
6189 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6190 &reconfig_rss);
6191 if (err) {
6192 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6193 ch->num_queue_pairs);
6194 return -EINVAL;
6195 }
6196
6197
6198
6199
6200 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6201 (!i40e_is_any_channel(vsi))) {
6202 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6203 dev_dbg(&pf->pdev->dev,
6204 "Failed to create channel. Override queues (%u) not power of 2\n",
6205 vsi->tc_config.tc_info[0].qcount);
6206 return -EINVAL;
6207 }
6208
6209 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6210 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6211
6212 if (vsi->type == I40E_VSI_MAIN) {
6213 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6214 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6215 true);
6216 else
6217 i40e_do_reset_safe(pf,
6218 I40E_PF_RESET_FLAG);
6219 }
6220 }
6221
6222
6223
6224 }
6225
6226
6227
6228
6229 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6230 dev_dbg(&pf->pdev->dev,
6231 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6232 vsi->cnt_q_avail, ch->num_queue_pairs);
6233 return -EINVAL;
6234 }
6235
6236
6237 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6238 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6239 if (err) {
6240 dev_info(&pf->pdev->dev,
6241 "Error: unable to reconfig rss for num_queues (%u)\n",
6242 ch->num_queue_pairs);
6243 return -EINVAL;
6244 }
6245 }
6246
6247 if (!i40e_setup_channel(pf, vsi, ch)) {
6248 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6249 return -EINVAL;
6250 }
6251
6252 dev_info(&pf->pdev->dev,
6253 "Setup channel (id:%u) utilizing num_queues %d\n",
6254 ch->seid, ch->num_queue_pairs);
6255
6256
6257 if (ch->max_tx_rate) {
6258 u64 credits = ch->max_tx_rate;
6259
6260 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6261 return -EINVAL;
6262
6263 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6264 dev_dbg(&pf->pdev->dev,
6265 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6266 ch->max_tx_rate,
6267 credits,
6268 ch->seid);
6269 }
6270
6271
6272 ch->parent_vsi = vsi;
6273
6274
6275 vsi->cnt_q_avail -= ch->num_queue_pairs;
6276
6277 return 0;
6278}
6279
6280
6281
6282
6283
6284
6285
6286static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6287{
6288 struct i40e_channel *ch;
6289 u64 max_rate = 0;
6290 int ret = 0, i;
6291
6292
6293 vsi->tc_seid_map[0] = vsi->seid;
6294 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6295 if (vsi->tc_config.enabled_tc & BIT(i)) {
6296 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6297 if (!ch) {
6298 ret = -ENOMEM;
6299 goto err_free;
6300 }
6301
6302 INIT_LIST_HEAD(&ch->list);
6303 ch->num_queue_pairs =
6304 vsi->tc_config.tc_info[i].qcount;
6305 ch->base_queue =
6306 vsi->tc_config.tc_info[i].qoffset;
6307
6308
6309
6310
6311 max_rate = vsi->mqprio_qopt.max_rate[i];
6312 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6313 ch->max_tx_rate = max_rate;
6314
6315 list_add_tail(&ch->list, &vsi->ch_list);
6316
6317 ret = i40e_create_queue_channel(vsi, ch);
6318 if (ret) {
6319 dev_err(&vsi->back->pdev->dev,
6320 "Failed creating queue channel with TC%d: queues %d\n",
6321 i, ch->num_queue_pairs);
6322 goto err_free;
6323 }
6324 vsi->tc_seid_map[i] = ch->seid;
6325 }
6326 }
6327 return ret;
6328
6329err_free:
6330 i40e_remove_queue_channels(vsi);
6331 return ret;
6332}
6333
6334
6335
6336
6337
6338
6339
6340
6341int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6342{
6343 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6344 struct i40e_pf *pf = veb->pf;
6345 int ret = 0;
6346 int i;
6347
6348
6349 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6350 return ret;
6351
6352 bw_data.tc_valid_bits = enabled_tc;
6353
6354
6355
6356 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6357 if (enabled_tc & BIT(i))
6358 bw_data.tc_bw_share_credits[i] = 1;
6359 }
6360
6361 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6362 &bw_data, NULL);
6363 if (ret) {
6364 dev_info(&pf->pdev->dev,
6365 "VEB bw config failed, err %s aq_err %s\n",
6366 i40e_stat_str(&pf->hw, ret),
6367 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6368 goto out;
6369 }
6370
6371
6372 ret = i40e_veb_get_bw_info(veb);
6373 if (ret) {
6374 dev_info(&pf->pdev->dev,
6375 "Failed getting veb bw config, err %s aq_err %s\n",
6376 i40e_stat_str(&pf->hw, ret),
6377 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6378 }
6379
6380out:
6381 return ret;
6382}
6383
6384#ifdef CONFIG_I40E_DCB
6385
6386
6387
6388
6389
6390
6391
6392
6393static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6394{
6395 u8 tc_map = 0;
6396 int ret;
6397 u8 v;
6398
6399
6400 tc_map = i40e_pf_get_tc_map(pf);
6401 for (v = 0; v < I40E_MAX_VEB; v++) {
6402 if (!pf->veb[v])
6403 continue;
6404 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6405 if (ret) {
6406 dev_info(&pf->pdev->dev,
6407 "Failed configuring TC for VEB seid=%d\n",
6408 pf->veb[v]->seid);
6409
6410 }
6411 }
6412
6413
6414 for (v = 0; v < pf->num_alloc_vsi; v++) {
6415 if (!pf->vsi[v])
6416 continue;
6417
6418
6419
6420
6421 if (v == pf->lan_vsi)
6422 tc_map = i40e_pf_get_tc_map(pf);
6423 else
6424 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6425
6426 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6427 if (ret) {
6428 dev_info(&pf->pdev->dev,
6429 "Failed configuring TC for VSI seid=%d\n",
6430 pf->vsi[v]->seid);
6431
6432 } else {
6433
6434 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6435 if (pf->vsi[v]->netdev)
6436 i40e_dcbnl_set_all(pf->vsi[v]);
6437 }
6438 }
6439}
6440
6441
6442
6443
6444
6445
6446
6447
6448static int i40e_resume_port_tx(struct i40e_pf *pf)
6449{
6450 struct i40e_hw *hw = &pf->hw;
6451 int ret;
6452
6453 ret = i40e_aq_resume_port_tx(hw, NULL);
6454 if (ret) {
6455 dev_info(&pf->pdev->dev,
6456 "Resume Port Tx failed, err %s aq_err %s\n",
6457 i40e_stat_str(&pf->hw, ret),
6458 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6459
6460 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6461 i40e_service_event_schedule(pf);
6462 }
6463
6464 return ret;
6465}
6466
6467
6468
6469
6470
6471
6472
6473
6474static int i40e_init_pf_dcb(struct i40e_pf *pf)
6475{
6476 struct i40e_hw *hw = &pf->hw;
6477 int err = 0;
6478
6479
6480
6481
6482 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6483 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
6484 dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
6485 err = I40E_NOT_SUPPORTED;
6486 goto out;
6487 }
6488
6489 err = i40e_init_dcb(hw, true);
6490 if (!err) {
6491
6492 if ((!hw->func_caps.dcb) ||
6493 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6494 dev_info(&pf->pdev->dev,
6495 "DCBX offload is not supported or is disabled for this PF.\n");
6496 } else {
6497
6498 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6499 DCB_CAP_DCBX_VER_IEEE;
6500
6501 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6502
6503
6504
6505 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6506 pf->flags |= I40E_FLAG_DCB_ENABLED;
6507 else
6508 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6509 dev_dbg(&pf->pdev->dev,
6510 "DCBX offload is supported for this PF.\n");
6511 }
6512 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6513 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6514 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6515 } else {
6516 dev_info(&pf->pdev->dev,
6517 "Query for DCB configuration failed, err %s aq_err %s\n",
6518 i40e_stat_str(&pf->hw, err),
6519 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6520 }
6521
6522out:
6523 return err;
6524}
6525#endif
6526
6527
6528
6529
6530
6531
6532void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6533{
6534 enum i40e_aq_link_speed new_speed;
6535 struct i40e_pf *pf = vsi->back;
6536 char *speed = "Unknown";
6537 char *fc = "Unknown";
6538 char *fec = "";
6539 char *req_fec = "";
6540 char *an = "";
6541
6542 if (isup)
6543 new_speed = pf->hw.phy.link_info.link_speed;
6544 else
6545 new_speed = I40E_LINK_SPEED_UNKNOWN;
6546
6547 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6548 return;
6549 vsi->current_isup = isup;
6550 vsi->current_speed = new_speed;
6551 if (!isup) {
6552 netdev_info(vsi->netdev, "NIC Link is Down\n");
6553 return;
6554 }
6555
6556
6557
6558
6559 if (pf->hw.func_caps.npar_enable &&
6560 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6561 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6562 netdev_warn(vsi->netdev,
6563 "The partition detected link speed that is less than 10Gbps\n");
6564
6565 switch (pf->hw.phy.link_info.link_speed) {
6566 case I40E_LINK_SPEED_40GB:
6567 speed = "40 G";
6568 break;
6569 case I40E_LINK_SPEED_20GB:
6570 speed = "20 G";
6571 break;
6572 case I40E_LINK_SPEED_25GB:
6573 speed = "25 G";
6574 break;
6575 case I40E_LINK_SPEED_10GB:
6576 speed = "10 G";
6577 break;
6578 case I40E_LINK_SPEED_5GB:
6579 speed = "5 G";
6580 break;
6581 case I40E_LINK_SPEED_2_5GB:
6582 speed = "2.5 G";
6583 break;
6584 case I40E_LINK_SPEED_1GB:
6585 speed = "1000 M";
6586 break;
6587 case I40E_LINK_SPEED_100MB:
6588 speed = "100 M";
6589 break;
6590 default:
6591 break;
6592 }
6593
6594 switch (pf->hw.fc.current_mode) {
6595 case I40E_FC_FULL:
6596 fc = "RX/TX";
6597 break;
6598 case I40E_FC_TX_PAUSE:
6599 fc = "TX";
6600 break;
6601 case I40E_FC_RX_PAUSE:
6602 fc = "RX";
6603 break;
6604 default:
6605 fc = "None";
6606 break;
6607 }
6608
6609 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6610 req_fec = "None";
6611 fec = "None";
6612 an = "False";
6613
6614 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6615 an = "True";
6616
6617 if (pf->hw.phy.link_info.fec_info &
6618 I40E_AQ_CONFIG_FEC_KR_ENA)
6619 fec = "CL74 FC-FEC/BASE-R";
6620 else if (pf->hw.phy.link_info.fec_info &
6621 I40E_AQ_CONFIG_FEC_RS_ENA)
6622 fec = "CL108 RS-FEC";
6623
6624
6625
6626
6627 if (vsi->back->hw.phy.link_info.req_fec_info &
6628 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6629 if (vsi->back->hw.phy.link_info.req_fec_info &
6630 I40E_AQ_REQUEST_FEC_RS)
6631 req_fec = "CL108 RS-FEC";
6632 else
6633 req_fec = "CL74 FC-FEC/BASE-R";
6634 }
6635 netdev_info(vsi->netdev,
6636 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6637 speed, req_fec, fec, an, fc);
6638 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
6639 req_fec = "None";
6640 fec = "None";
6641 an = "False";
6642
6643 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6644 an = "True";
6645
6646 if (pf->hw.phy.link_info.fec_info &
6647 I40E_AQ_CONFIG_FEC_KR_ENA)
6648 fec = "CL74 FC-FEC/BASE-R";
6649
6650 if (pf->hw.phy.link_info.req_fec_info &
6651 I40E_AQ_REQUEST_FEC_KR)
6652 req_fec = "CL74 FC-FEC/BASE-R";
6653
6654 netdev_info(vsi->netdev,
6655 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6656 speed, req_fec, fec, an, fc);
6657 } else {
6658 netdev_info(vsi->netdev,
6659 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
6660 speed, fc);
6661 }
6662
6663}
6664
6665
6666
6667
6668
6669static int i40e_up_complete(struct i40e_vsi *vsi)
6670{
6671 struct i40e_pf *pf = vsi->back;
6672 int err;
6673
6674 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6675 i40e_vsi_configure_msix(vsi);
6676 else
6677 i40e_configure_msi_and_legacy(vsi);
6678
6679
6680 err = i40e_vsi_start_rings(vsi);
6681 if (err)
6682 return err;
6683
6684 clear_bit(__I40E_VSI_DOWN, vsi->state);
6685 i40e_napi_enable_all(vsi);
6686 i40e_vsi_enable_irq(vsi);
6687
6688 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6689 (vsi->netdev)) {
6690 i40e_print_link_message(vsi, true);
6691 netif_tx_start_all_queues(vsi->netdev);
6692 netif_carrier_on(vsi->netdev);
6693 }
6694
6695
6696 if (vsi->type == I40E_VSI_FDIR) {
6697
6698 pf->fd_add_err = 0;
6699 pf->fd_atr_cnt = 0;
6700 i40e_fdir_filter_restore(vsi);
6701 }
6702
6703
6704
6705
6706 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6707 i40e_service_event_schedule(pf);
6708
6709 return 0;
6710}
6711
6712
6713
6714
6715
6716
6717
6718
6719static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6720{
6721 struct i40e_pf *pf = vsi->back;
6722
6723 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6724 usleep_range(1000, 2000);
6725 i40e_down(vsi);
6726
6727 i40e_up(vsi);
6728 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6729}
6730
6731
6732
6733
6734
6735
6736static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6737{
6738 struct i40e_aq_get_phy_abilities_resp abilities;
6739 struct i40e_aq_set_phy_config config = {0};
6740 bool non_zero_phy_type = is_up;
6741 struct i40e_hw *hw = &pf->hw;
6742 i40e_status err;
6743 u64 mask;
6744 u8 speed;
6745
6746
6747
6748
6749
6750
6751
6752 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6753 NULL);
6754 if (err) {
6755 dev_err(&pf->pdev->dev,
6756 "failed to get phy cap., ret = %s last_status = %s\n",
6757 i40e_stat_str(hw, err),
6758 i40e_aq_str(hw, hw->aq.asq_last_status));
6759 return err;
6760 }
6761 speed = abilities.link_speed;
6762
6763
6764 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6765 NULL);
6766 if (err) {
6767 dev_err(&pf->pdev->dev,
6768 "failed to get phy cap., ret = %s last_status = %s\n",
6769 i40e_stat_str(hw, err),
6770 i40e_aq_str(hw, hw->aq.asq_last_status));
6771 return err;
6772 }
6773
6774
6775
6776
6777
6778 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
6779 non_zero_phy_type = true;
6780 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6781 return I40E_SUCCESS;
6782
6783
6784
6785
6786
6787 mask = I40E_PHY_TYPES_BITMASK;
6788 config.phy_type =
6789 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6790 config.phy_type_ext =
6791 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
6792
6793 config.abilities = abilities.abilities;
6794 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
6795 if (is_up)
6796 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
6797 else
6798 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
6799 }
6800 if (abilities.link_speed != 0)
6801 config.link_speed = abilities.link_speed;
6802 else
6803 config.link_speed = speed;
6804 config.eee_capability = abilities.eee_capability;
6805 config.eeer = abilities.eeer_val;
6806 config.low_power_ctrl = abilities.d3_lpan;
6807 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6808 I40E_AQ_PHY_FEC_CONFIG_MASK;
6809 err = i40e_aq_set_phy_config(hw, &config, NULL);
6810
6811 if (err) {
6812 dev_err(&pf->pdev->dev,
6813 "set phy config ret = %s last_status = %s\n",
6814 i40e_stat_str(&pf->hw, err),
6815 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6816 return err;
6817 }
6818
6819
6820 err = i40e_update_link_info(hw);
6821 if (err) {
6822
6823
6824
6825
6826 msleep(1000);
6827 i40e_update_link_info(hw);
6828 }
6829
6830 i40e_aq_set_link_restart_an(hw, is_up, NULL);
6831
6832 return I40E_SUCCESS;
6833}
6834
6835
6836
6837
6838
6839int i40e_up(struct i40e_vsi *vsi)
6840{
6841 int err;
6842
6843 if (vsi->type == I40E_VSI_MAIN &&
6844 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
6845 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
6846 i40e_force_link_state(vsi->back, true);
6847
6848 err = i40e_vsi_configure(vsi);
6849 if (!err)
6850 err = i40e_up_complete(vsi);
6851
6852 return err;
6853}
6854
6855
6856
6857
6858
6859void i40e_down(struct i40e_vsi *vsi)
6860{
6861 int i;
6862
6863
6864
6865
6866 if (vsi->netdev) {
6867 netif_carrier_off(vsi->netdev);
6868 netif_tx_disable(vsi->netdev);
6869 }
6870 i40e_vsi_disable_irq(vsi);
6871 i40e_vsi_stop_rings(vsi);
6872 if (vsi->type == I40E_VSI_MAIN &&
6873 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
6874 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
6875 i40e_force_link_state(vsi->back, false);
6876 i40e_napi_disable_all(vsi);
6877
6878 for (i = 0; i < vsi->num_queue_pairs; i++) {
6879 i40e_clean_tx_ring(vsi->tx_rings[i]);
6880 if (i40e_enabled_xdp_vsi(vsi)) {
6881
6882
6883
6884 synchronize_rcu();
6885 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6886 }
6887 i40e_clean_rx_ring(vsi->rx_rings[i]);
6888 }
6889
6890}
6891
6892
6893
6894
6895
6896
6897static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6898 struct tc_mqprio_qopt_offload *mqprio_qopt)
6899{
6900 u64 sum_max_rate = 0;
6901 u64 max_rate = 0;
6902 int i;
6903
6904 if (mqprio_qopt->qopt.offset[0] != 0 ||
6905 mqprio_qopt->qopt.num_tc < 1 ||
6906 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6907 return -EINVAL;
6908 for (i = 0; ; i++) {
6909 if (!mqprio_qopt->qopt.count[i])
6910 return -EINVAL;
6911 if (mqprio_qopt->min_rate[i]) {
6912 dev_err(&vsi->back->pdev->dev,
6913 "Invalid min tx rate (greater than 0) specified\n");
6914 return -EINVAL;
6915 }
6916 max_rate = mqprio_qopt->max_rate[i];
6917 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6918 sum_max_rate += max_rate;
6919
6920 if (i >= mqprio_qopt->qopt.num_tc - 1)
6921 break;
6922 if (mqprio_qopt->qopt.offset[i + 1] !=
6923 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6924 return -EINVAL;
6925 }
6926 if (vsi->num_queue_pairs <
6927 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6928 return -EINVAL;
6929 }
6930 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6931 dev_err(&vsi->back->pdev->dev,
6932 "Invalid max tx rate specified\n");
6933 return -EINVAL;
6934 }
6935 return 0;
6936}
6937
6938
6939
6940
6941
6942static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6943{
6944 u16 qcount;
6945 int i;
6946
6947
6948 vsi->tc_config.numtc = 1;
6949 vsi->tc_config.enabled_tc = 1;
6950 qcount = min_t(int, vsi->alloc_queue_pairs,
6951 i40e_pf_get_max_q_per_tc(vsi->back));
6952 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6953
6954
6955
6956 vsi->tc_config.tc_info[i].qoffset = 0;
6957 if (i == 0)
6958 vsi->tc_config.tc_info[i].qcount = qcount;
6959 else
6960 vsi->tc_config.tc_info[i].qcount = 1;
6961 vsi->tc_config.tc_info[i].netdev_tc = 0;
6962 }
6963}
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
6976 const u8 *macaddr, int *aq_err)
6977{
6978 struct i40e_aqc_remove_macvlan_element_data element;
6979 i40e_status status;
6980
6981 memset(&element, 0, sizeof(element));
6982 ether_addr_copy(element.mac_addr, macaddr);
6983 element.vlan_tag = 0;
6984 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6985 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
6986 *aq_err = hw->aq.asq_last_status;
6987
6988 return status;
6989}
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000
7001static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7002 const u8 *macaddr, int *aq_err)
7003{
7004 struct i40e_aqc_add_macvlan_element_data element;
7005 i40e_status status;
7006 u16 cmd_flags = 0;
7007
7008 ether_addr_copy(element.mac_addr, macaddr);
7009 element.vlan_tag = 0;
7010 element.queue_number = 0;
7011 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7012 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7013 element.flags = cpu_to_le16(cmd_flags);
7014 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7015 *aq_err = hw->aq.asq_last_status;
7016
7017 return status;
7018}
7019
7020
7021
7022
7023
7024
7025static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7026{
7027 struct i40e_ring *tx_ring, *rx_ring;
7028 u16 pf_q;
7029 int i;
7030
7031 for (i = 0; i < ch->num_queue_pairs; i++) {
7032 pf_q = ch->base_queue + i;
7033 tx_ring = vsi->tx_rings[pf_q];
7034 tx_ring->ch = NULL;
7035 rx_ring = vsi->rx_rings[pf_q];
7036 rx_ring->ch = NULL;
7037 }
7038}
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7049{
7050 struct i40e_channel *ch, *ch_tmp;
7051 int ret;
7052
7053 if (list_empty(&vsi->macvlan_list))
7054 return;
7055
7056 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7057 struct i40e_vsi *parent_vsi;
7058
7059 if (i40e_is_channel_macvlan(ch)) {
7060 i40e_reset_ch_rings(vsi, ch);
7061 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7062 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7063 netdev_set_sb_channel(ch->fwd->netdev, 0);
7064 kfree(ch->fwd);
7065 ch->fwd = NULL;
7066 }
7067
7068 list_del(&ch->list);
7069 parent_vsi = ch->parent_vsi;
7070 if (!parent_vsi || !ch->initialized) {
7071 kfree(ch);
7072 continue;
7073 }
7074
7075
7076 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7077 NULL);
7078 if (ret)
7079 dev_err(&vsi->back->pdev->dev,
7080 "unable to remove channel (%d) for parent VSI(%d)\n",
7081 ch->seid, parent_vsi->seid);
7082 kfree(ch);
7083 }
7084 vsi->macvlan_cnt = 0;
7085}
7086
7087
7088
7089
7090
7091
7092
7093static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7094 struct i40e_fwd_adapter *fwd)
7095{
7096 int ret = 0, num_tc = 1, i, aq_err;
7097 struct i40e_channel *ch, *ch_tmp;
7098 struct i40e_pf *pf = vsi->back;
7099 struct i40e_hw *hw = &pf->hw;
7100
7101 if (list_empty(&vsi->macvlan_list))
7102 return -EINVAL;
7103
7104
7105 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7106 if (!i40e_is_channel_macvlan(ch)) {
7107 ch->fwd = fwd;
7108
7109 for (i = 0; i < num_tc; i++)
7110 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7111 i,
7112 ch->num_queue_pairs,
7113 ch->base_queue);
7114 for (i = 0; i < ch->num_queue_pairs; i++) {
7115 struct i40e_ring *tx_ring, *rx_ring;
7116 u16 pf_q;
7117
7118 pf_q = ch->base_queue + i;
7119
7120
7121 tx_ring = vsi->tx_rings[pf_q];
7122 tx_ring->ch = ch;
7123
7124
7125 rx_ring = vsi->rx_rings[pf_q];
7126 rx_ring->ch = ch;
7127 }
7128 break;
7129 }
7130 }
7131
7132
7133
7134
7135 wmb();
7136
7137
7138 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7139 if (ret) {
7140
7141 macvlan_release_l2fw_offload(vdev);
7142 for (i = 0; i < ch->num_queue_pairs; i++) {
7143 struct i40e_ring *rx_ring;
7144 u16 pf_q;
7145
7146 pf_q = ch->base_queue + i;
7147 rx_ring = vsi->rx_rings[pf_q];
7148 rx_ring->netdev = NULL;
7149 }
7150 dev_info(&pf->pdev->dev,
7151 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7152 i40e_stat_str(hw, ret),
7153 i40e_aq_str(hw, aq_err));
7154 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7155 }
7156
7157 return ret;
7158}
7159
7160
7161
7162
7163
7164
7165
7166
7167static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7168 struct net_device *vdev)
7169{
7170 struct i40e_pf *pf = vsi->back;
7171 struct i40e_hw *hw = &pf->hw;
7172 struct i40e_vsi_context ctxt;
7173 u16 sections, qmap, num_qps;
7174 struct i40e_channel *ch;
7175 int i, pow, ret = 0;
7176 u8 offset = 0;
7177
7178 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7179 return -EINVAL;
7180
7181 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7182
7183
7184 pow = fls(roundup_pow_of_two(num_qps) - 1);
7185
7186 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7187 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7188
7189
7190 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7191 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7192 memset(&ctxt, 0, sizeof(ctxt));
7193 ctxt.seid = vsi->seid;
7194 ctxt.pf_num = vsi->back->hw.pf_id;
7195 ctxt.vf_num = 0;
7196 ctxt.uplink_seid = vsi->uplink_seid;
7197 ctxt.info = vsi->info;
7198 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7199 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7200 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7201 ctxt.info.valid_sections |= cpu_to_le16(sections);
7202
7203
7204 vsi->rss_size = max_t(u16, num_qps, qcnt);
7205 ret = i40e_vsi_config_rss(vsi);
7206 if (ret) {
7207 dev_info(&pf->pdev->dev,
7208 "Failed to reconfig RSS for num_queues (%u)\n",
7209 vsi->rss_size);
7210 return ret;
7211 }
7212 vsi->reconfig_rss = true;
7213 dev_dbg(&vsi->back->pdev->dev,
7214 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7215 vsi->next_base_queue = num_qps;
7216 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7217
7218
7219
7220
7221 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7222 if (ret) {
7223 dev_info(&pf->pdev->dev,
7224 "Update vsi tc config failed, err %s aq_err %s\n",
7225 i40e_stat_str(hw, ret),
7226 i40e_aq_str(hw, hw->aq.asq_last_status));
7227 return ret;
7228 }
7229
7230 i40e_vsi_update_queue_map(vsi, &ctxt);
7231 vsi->info.valid_sections = 0;
7232
7233
7234 INIT_LIST_HEAD(&vsi->macvlan_list);
7235 for (i = 0; i < macvlan_cnt; i++) {
7236 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7237 if (!ch) {
7238 ret = -ENOMEM;
7239 goto err_free;
7240 }
7241 INIT_LIST_HEAD(&ch->list);
7242 ch->num_queue_pairs = qcnt;
7243 if (!i40e_setup_channel(pf, vsi, ch)) {
7244 ret = -EINVAL;
7245 kfree(ch);
7246 goto err_free;
7247 }
7248 ch->parent_vsi = vsi;
7249 vsi->cnt_q_avail -= ch->num_queue_pairs;
7250 vsi->macvlan_cnt++;
7251 list_add_tail(&ch->list, &vsi->macvlan_list);
7252 }
7253
7254 return ret;
7255
7256err_free:
7257 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7258 i40e_free_macvlan_channels(vsi);
7259
7260 return ret;
7261}
7262
7263
7264
7265
7266
7267
7268static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7269{
7270 struct i40e_netdev_priv *np = netdev_priv(netdev);
7271 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7272 struct i40e_vsi *vsi = np->vsi;
7273 struct i40e_pf *pf = vsi->back;
7274 struct i40e_fwd_adapter *fwd;
7275 int avail_macvlan, ret;
7276
7277 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7278 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7279 return ERR_PTR(-EINVAL);
7280 }
7281 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7282 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7283 return ERR_PTR(-EINVAL);
7284 }
7285 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7286 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7287 return ERR_PTR(-EINVAL);
7288 }
7289
7290
7291
7292
7293 if (netif_is_multiqueue(vdev))
7294 return ERR_PTR(-ERANGE);
7295
7296 if (!vsi->macvlan_cnt) {
7297
7298 set_bit(0, vsi->fwd_bitmask);
7299
7300
7301
7302
7303
7304 vectors = pf->num_lan_msix;
7305 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7306
7307 q_per_macvlan = 4;
7308 macvlan_cnt = (vectors - 32) / 4;
7309 } else if (vectors <= 64 && vectors > 32) {
7310
7311 q_per_macvlan = 2;
7312 macvlan_cnt = (vectors - 16) / 2;
7313 } else if (vectors <= 32 && vectors > 16) {
7314
7315 q_per_macvlan = 1;
7316 macvlan_cnt = vectors - 16;
7317 } else if (vectors <= 16 && vectors > 8) {
7318
7319 q_per_macvlan = 1;
7320 macvlan_cnt = vectors - 8;
7321 } else {
7322
7323 q_per_macvlan = 1;
7324 macvlan_cnt = vectors - 1;
7325 }
7326
7327 if (macvlan_cnt == 0)
7328 return ERR_PTR(-EBUSY);
7329
7330
7331 i40e_quiesce_vsi(vsi);
7332
7333
7334 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7335 vdev);
7336 if (ret)
7337 return ERR_PTR(ret);
7338
7339
7340 i40e_unquiesce_vsi(vsi);
7341 }
7342 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7343 vsi->macvlan_cnt);
7344 if (avail_macvlan >= I40E_MAX_MACVLANS)
7345 return ERR_PTR(-EBUSY);
7346
7347
7348 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7349 if (!fwd)
7350 return ERR_PTR(-ENOMEM);
7351
7352 set_bit(avail_macvlan, vsi->fwd_bitmask);
7353 fwd->bit_no = avail_macvlan;
7354 netdev_set_sb_channel(vdev, avail_macvlan);
7355 fwd->netdev = vdev;
7356
7357 if (!netif_running(netdev))
7358 return fwd;
7359
7360
7361 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7362 if (ret) {
7363
7364 netdev_unbind_sb_channel(netdev, vdev);
7365 netdev_set_sb_channel(vdev, 0);
7366
7367 kfree(fwd);
7368 return ERR_PTR(-EINVAL);
7369 }
7370
7371 return fwd;
7372}
7373
7374
7375
7376
7377
7378static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7379{
7380 struct i40e_channel *ch, *ch_tmp;
7381 struct i40e_pf *pf = vsi->back;
7382 struct i40e_hw *hw = &pf->hw;
7383 int aq_err, ret = 0;
7384
7385 if (list_empty(&vsi->macvlan_list))
7386 return;
7387
7388 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7389 if (i40e_is_channel_macvlan(ch)) {
7390 ret = i40e_del_macvlan_filter(hw, ch->seid,
7391 i40e_channel_mac(ch),
7392 &aq_err);
7393 if (!ret) {
7394
7395 i40e_reset_ch_rings(vsi, ch);
7396 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7397 netdev_unbind_sb_channel(vsi->netdev,
7398 ch->fwd->netdev);
7399 netdev_set_sb_channel(ch->fwd->netdev, 0);
7400 kfree(ch->fwd);
7401 ch->fwd = NULL;
7402 }
7403 }
7404 }
7405}
7406
7407
7408
7409
7410
7411
7412static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7413{
7414 struct i40e_netdev_priv *np = netdev_priv(netdev);
7415 struct i40e_fwd_adapter *fwd = vdev;
7416 struct i40e_channel *ch, *ch_tmp;
7417 struct i40e_vsi *vsi = np->vsi;
7418 struct i40e_pf *pf = vsi->back;
7419 struct i40e_hw *hw = &pf->hw;
7420 int aq_err, ret = 0;
7421
7422
7423 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7424 if (i40e_is_channel_macvlan(ch) &&
7425 ether_addr_equal(i40e_channel_mac(ch),
7426 fwd->netdev->dev_addr)) {
7427 ret = i40e_del_macvlan_filter(hw, ch->seid,
7428 i40e_channel_mac(ch),
7429 &aq_err);
7430 if (!ret) {
7431
7432 i40e_reset_ch_rings(vsi, ch);
7433 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7434 netdev_unbind_sb_channel(netdev, fwd->netdev);
7435 netdev_set_sb_channel(fwd->netdev, 0);
7436 kfree(ch->fwd);
7437 ch->fwd = NULL;
7438 } else {
7439 dev_info(&pf->pdev->dev,
7440 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7441 i40e_stat_str(hw, ret),
7442 i40e_aq_str(hw, aq_err));
7443 }
7444 break;
7445 }
7446 }
7447}
7448
7449
7450
7451
7452
7453
7454static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7455{
7456 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7457 struct i40e_netdev_priv *np = netdev_priv(netdev);
7458 struct i40e_vsi *vsi = np->vsi;
7459 struct i40e_pf *pf = vsi->back;
7460 u8 enabled_tc = 0, num_tc, hw;
7461 bool need_reset = false;
7462 int old_queue_pairs;
7463 int ret = -EINVAL;
7464 u16 mode;
7465 int i;
7466
7467 old_queue_pairs = vsi->num_queue_pairs;
7468 num_tc = mqprio_qopt->qopt.num_tc;
7469 hw = mqprio_qopt->qopt.hw;
7470 mode = mqprio_qopt->mode;
7471 if (!hw) {
7472 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7473 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7474 goto config_tc;
7475 }
7476
7477
7478 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7479 netdev_info(netdev,
7480 "Configuring TC not supported in MFP mode\n");
7481 return ret;
7482 }
7483 switch (mode) {
7484 case TC_MQPRIO_MODE_DCB:
7485 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7486
7487
7488 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7489 netdev_info(netdev,
7490 "DCB is not enabled for adapter\n");
7491 return ret;
7492 }
7493
7494
7495 if (num_tc > i40e_pf_get_num_tc(pf)) {
7496 netdev_info(netdev,
7497 "TC count greater than enabled on link for adapter\n");
7498 return ret;
7499 }
7500 break;
7501 case TC_MQPRIO_MODE_CHANNEL:
7502 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7503 netdev_info(netdev,
7504 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7505 return ret;
7506 }
7507 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7508 return ret;
7509 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7510 if (ret)
7511 return ret;
7512 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7513 sizeof(*mqprio_qopt));
7514 pf->flags |= I40E_FLAG_TC_MQPRIO;
7515 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7516 break;
7517 default:
7518 return -EINVAL;
7519 }
7520
7521config_tc:
7522
7523 for (i = 0; i < num_tc; i++)
7524 enabled_tc |= BIT(i);
7525
7526
7527 if (enabled_tc == vsi->tc_config.enabled_tc &&
7528 mode != TC_MQPRIO_MODE_CHANNEL)
7529 return 0;
7530
7531
7532 i40e_quiesce_vsi(vsi);
7533
7534 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7535 i40e_remove_queue_channels(vsi);
7536
7537
7538 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7539 if (ret) {
7540 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7541 vsi->seid);
7542 need_reset = true;
7543 goto exit;
7544 } else {
7545 dev_info(&vsi->back->pdev->dev,
7546 "Setup channel (id:%u) utilizing num_queues %d\n",
7547 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7548 }
7549
7550 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7551 if (vsi->mqprio_qopt.max_rate[0]) {
7552 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7553
7554 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7555 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7556 if (!ret) {
7557 u64 credits = max_tx_rate;
7558
7559 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7560 dev_dbg(&vsi->back->pdev->dev,
7561 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7562 max_tx_rate,
7563 credits,
7564 vsi->seid);
7565 } else {
7566 need_reset = true;
7567 goto exit;
7568 }
7569 }
7570 ret = i40e_configure_queue_channels(vsi);
7571 if (ret) {
7572 vsi->num_queue_pairs = old_queue_pairs;
7573 netdev_info(netdev,
7574 "Failed configuring queue channels\n");
7575 need_reset = true;
7576 goto exit;
7577 }
7578 }
7579
7580exit:
7581
7582 if (need_reset) {
7583 i40e_vsi_set_default_tc_config(vsi);
7584 need_reset = false;
7585 }
7586
7587
7588 i40e_unquiesce_vsi(vsi);
7589 return ret;
7590}
7591
7592
7593
7594
7595
7596
7597
7598
7599static inline void
7600i40e_set_cld_element(struct i40e_cloud_filter *filter,
7601 struct i40e_aqc_cloud_filters_element_data *cld)
7602{
7603 int i, j;
7604 u32 ipa;
7605
7606 memset(cld, 0, sizeof(*cld));
7607 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7608 ether_addr_copy(cld->inner_mac, filter->src_mac);
7609
7610 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7611 return;
7612
7613 if (filter->n_proto == ETH_P_IPV6) {
7614#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7615 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7616 i++, j += 2) {
7617 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7618 ipa = cpu_to_le32(ipa);
7619 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7620 }
7621 } else {
7622 ipa = be32_to_cpu(filter->dst_ipv4);
7623 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7624 }
7625
7626 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7627
7628
7629
7630
7631 if (filter->tenant_id)
7632 return;
7633}
7634
7635
7636
7637
7638
7639
7640
7641
7642
7643
7644int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7645 struct i40e_cloud_filter *filter, bool add)
7646{
7647 struct i40e_aqc_cloud_filters_element_data cld_filter;
7648 struct i40e_pf *pf = vsi->back;
7649 int ret;
7650 static const u16 flag_table[128] = {
7651 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7652 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7653 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7654 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7655 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7656 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7657 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7658 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7659 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7660 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7661 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7662 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7663 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7664 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7665 };
7666
7667 if (filter->flags >= ARRAY_SIZE(flag_table))
7668 return I40E_ERR_CONFIG;
7669
7670
7671 i40e_set_cld_element(filter, &cld_filter);
7672
7673 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7674 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7675 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7676
7677 if (filter->n_proto == ETH_P_IPV6)
7678 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7679 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7680 else
7681 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7682 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7683
7684 if (add)
7685 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7686 &cld_filter, 1);
7687 else
7688 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7689 &cld_filter, 1);
7690 if (ret)
7691 dev_dbg(&pf->pdev->dev,
7692 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7693 add ? "add" : "delete", filter->dst_port, ret,
7694 pf->hw.aq.asq_last_status);
7695 else
7696 dev_info(&pf->pdev->dev,
7697 "%s cloud filter for VSI: %d\n",
7698 add ? "Added" : "Deleted", filter->seid);
7699 return ret;
7700}
7701
7702
7703
7704
7705
7706
7707
7708
7709
7710
7711int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7712 struct i40e_cloud_filter *filter,
7713 bool add)
7714{
7715 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7716 struct i40e_pf *pf = vsi->back;
7717 int ret;
7718
7719
7720 if ((is_valid_ether_addr(filter->dst_mac) &&
7721 is_valid_ether_addr(filter->src_mac)) ||
7722 (is_multicast_ether_addr(filter->dst_mac) &&
7723 is_multicast_ether_addr(filter->src_mac)))
7724 return -EOPNOTSUPP;
7725
7726
7727
7728
7729 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7730 return -EOPNOTSUPP;
7731
7732
7733 if (filter->src_port || filter->src_ipv4 ||
7734 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7735 return -EOPNOTSUPP;
7736
7737
7738 i40e_set_cld_element(filter, &cld_filter.element);
7739
7740 if (is_valid_ether_addr(filter->dst_mac) ||
7741 is_valid_ether_addr(filter->src_mac) ||
7742 is_multicast_ether_addr(filter->dst_mac) ||
7743 is_multicast_ether_addr(filter->src_mac)) {
7744
7745 if (filter->dst_ipv4)
7746 return -EOPNOTSUPP;
7747
7748
7749
7750
7751
7752 cld_filter.element.flags =
7753 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7754
7755 if (filter->vlan_id) {
7756 cld_filter.element.flags =
7757 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7758 }
7759
7760 } else if (filter->dst_ipv4 ||
7761 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7762 cld_filter.element.flags =
7763 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7764 if (filter->n_proto == ETH_P_IPV6)
7765 cld_filter.element.flags |=
7766 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7767 else
7768 cld_filter.element.flags |=
7769 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7770 } else {
7771 dev_err(&pf->pdev->dev,
7772 "either mac or ip has to be valid for cloud filter\n");
7773 return -EINVAL;
7774 }
7775
7776
7777 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7778 be16_to_cpu(filter->dst_port);
7779
7780 if (add) {
7781
7782 ret = i40e_validate_and_set_switch_mode(vsi);
7783 if (ret) {
7784 dev_err(&pf->pdev->dev,
7785 "failed to set switch mode, ret %d\n",
7786 ret);
7787 return ret;
7788 }
7789
7790 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7791 &cld_filter, 1);
7792 } else {
7793 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7794 &cld_filter, 1);
7795 }
7796
7797 if (ret)
7798 dev_dbg(&pf->pdev->dev,
7799 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7800 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7801 else
7802 dev_info(&pf->pdev->dev,
7803 "%s cloud filter for VSI: %d, L4 port: %d\n",
7804 add ? "add" : "delete", filter->seid,
7805 ntohs(filter->dst_port));
7806 return ret;
7807}
7808
7809
7810
7811
7812
7813
7814
7815
7816static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7817 struct flow_cls_offload *f,
7818 struct i40e_cloud_filter *filter)
7819{
7820 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
7821 struct flow_dissector *dissector = rule->match.dissector;
7822 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7823 struct i40e_pf *pf = vsi->back;
7824 u8 field_flags = 0;
7825
7826 if (dissector->used_keys &
7827 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7828 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7829 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7830 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7831 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7832 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7833 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7834 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7835 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7836 dissector->used_keys);
7837 return -EOPNOTSUPP;
7838 }
7839
7840 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7841 struct flow_match_enc_keyid match;
7842
7843 flow_rule_match_enc_keyid(rule, &match);
7844 if (match.mask->keyid != 0)
7845 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7846
7847 filter->tenant_id = be32_to_cpu(match.key->keyid);
7848 }
7849
7850 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7851 struct flow_match_basic match;
7852
7853 flow_rule_match_basic(rule, &match);
7854 n_proto_key = ntohs(match.key->n_proto);
7855 n_proto_mask = ntohs(match.mask->n_proto);
7856
7857 if (n_proto_key == ETH_P_ALL) {
7858 n_proto_key = 0;
7859 n_proto_mask = 0;
7860 }
7861 filter->n_proto = n_proto_key & n_proto_mask;
7862 filter->ip_proto = match.key->ip_proto;
7863 }
7864
7865 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7866 struct flow_match_eth_addrs match;
7867
7868 flow_rule_match_eth_addrs(rule, &match);
7869
7870
7871 if (!is_zero_ether_addr(match.mask->dst)) {
7872 if (is_broadcast_ether_addr(match.mask->dst)) {
7873 field_flags |= I40E_CLOUD_FIELD_OMAC;
7874 } else {
7875 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7876 match.mask->dst);
7877 return I40E_ERR_CONFIG;
7878 }
7879 }
7880
7881 if (!is_zero_ether_addr(match.mask->src)) {
7882 if (is_broadcast_ether_addr(match.mask->src)) {
7883 field_flags |= I40E_CLOUD_FIELD_IMAC;
7884 } else {
7885 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7886 match.mask->src);
7887 return I40E_ERR_CONFIG;
7888 }
7889 }
7890 ether_addr_copy(filter->dst_mac, match.key->dst);
7891 ether_addr_copy(filter->src_mac, match.key->src);
7892 }
7893
7894 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7895 struct flow_match_vlan match;
7896
7897 flow_rule_match_vlan(rule, &match);
7898 if (match.mask->vlan_id) {
7899 if (match.mask->vlan_id == VLAN_VID_MASK) {
7900 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7901
7902 } else {
7903 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7904 match.mask->vlan_id);
7905 return I40E_ERR_CONFIG;
7906 }
7907 }
7908
7909 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7910 }
7911
7912 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7913 struct flow_match_control match;
7914
7915 flow_rule_match_control(rule, &match);
7916 addr_type = match.key->addr_type;
7917 }
7918
7919 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7920 struct flow_match_ipv4_addrs match;
7921
7922 flow_rule_match_ipv4_addrs(rule, &match);
7923 if (match.mask->dst) {
7924 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7925 field_flags |= I40E_CLOUD_FIELD_IIP;
7926 } else {
7927 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7928 &match.mask->dst);
7929 return I40E_ERR_CONFIG;
7930 }
7931 }
7932
7933 if (match.mask->src) {
7934 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7935 field_flags |= I40E_CLOUD_FIELD_IIP;
7936 } else {
7937 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7938 &match.mask->src);
7939 return I40E_ERR_CONFIG;
7940 }
7941 }
7942
7943 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7944 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7945 return I40E_ERR_CONFIG;
7946 }
7947 filter->dst_ipv4 = match.key->dst;
7948 filter->src_ipv4 = match.key->src;
7949 }
7950
7951 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7952 struct flow_match_ipv6_addrs match;
7953
7954 flow_rule_match_ipv6_addrs(rule, &match);
7955
7956
7957
7958
7959 if (ipv6_addr_loopback(&match.key->dst) ||
7960 ipv6_addr_loopback(&match.key->src)) {
7961 dev_err(&pf->pdev->dev,
7962 "Bad ipv6, addr is LOOPBACK\n");
7963 return I40E_ERR_CONFIG;
7964 }
7965 if (!ipv6_addr_any(&match.mask->dst) ||
7966 !ipv6_addr_any(&match.mask->src))
7967 field_flags |= I40E_CLOUD_FIELD_IIP;
7968
7969 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7970 sizeof(filter->src_ipv6));
7971 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7972 sizeof(filter->dst_ipv6));
7973 }
7974
7975 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7976 struct flow_match_ports match;
7977
7978 flow_rule_match_ports(rule, &match);
7979 if (match.mask->src) {
7980 if (match.mask->src == cpu_to_be16(0xffff)) {
7981 field_flags |= I40E_CLOUD_FIELD_IIP;
7982 } else {
7983 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7984 be16_to_cpu(match.mask->src));
7985 return I40E_ERR_CONFIG;
7986 }
7987 }
7988
7989 if (match.mask->dst) {
7990 if (match.mask->dst == cpu_to_be16(0xffff)) {
7991 field_flags |= I40E_CLOUD_FIELD_IIP;
7992 } else {
7993 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7994 be16_to_cpu(match.mask->dst));
7995 return I40E_ERR_CONFIG;
7996 }
7997 }
7998
7999 filter->dst_port = match.key->dst;
8000 filter->src_port = match.key->src;
8001
8002 switch (filter->ip_proto) {
8003 case IPPROTO_TCP:
8004 case IPPROTO_UDP:
8005 break;
8006 default:
8007 dev_err(&pf->pdev->dev,
8008 "Only UDP and TCP transport are supported\n");
8009 return -EINVAL;
8010 }
8011 }
8012 filter->flags = field_flags;
8013 return 0;
8014}
8015
8016
8017
8018
8019
8020
8021
8022
8023static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8024 struct i40e_cloud_filter *filter)
8025{
8026 struct i40e_channel *ch, *ch_tmp;
8027
8028
8029 if (tc == 0) {
8030 filter->seid = vsi->seid;
8031 return 0;
8032 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8033 if (!filter->dst_port) {
8034 dev_err(&vsi->back->pdev->dev,
8035 "Specify destination port to direct to traffic class that is not default\n");
8036 return -EINVAL;
8037 }
8038 if (list_empty(&vsi->ch_list))
8039 return -EINVAL;
8040 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8041 list) {
8042 if (ch->seid == vsi->tc_seid_map[tc])
8043 filter->seid = ch->seid;
8044 }
8045 return 0;
8046 }
8047 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8048 return -EINVAL;
8049}
8050
8051
8052
8053
8054
8055
8056
8057static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8058 struct flow_cls_offload *cls_flower)
8059{
8060 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8061 struct i40e_cloud_filter *filter = NULL;
8062 struct i40e_pf *pf = vsi->back;
8063 int err = 0;
8064
8065 if (tc < 0) {
8066 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8067 return -EOPNOTSUPP;
8068 }
8069
8070 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8071 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8072 return -EBUSY;
8073
8074 if (pf->fdir_pf_active_filters ||
8075 (!hlist_empty(&pf->fdir_filter_list))) {
8076 dev_err(&vsi->back->pdev->dev,
8077 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8078 return -EINVAL;
8079 }
8080
8081 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8082 dev_err(&vsi->back->pdev->dev,
8083 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8084 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8085 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8086 }
8087
8088 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8089 if (!filter)
8090 return -ENOMEM;
8091
8092 filter->cookie = cls_flower->cookie;
8093
8094 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8095 if (err < 0)
8096 goto err;
8097
8098 err = i40e_handle_tclass(vsi, tc, filter);
8099 if (err < 0)
8100 goto err;
8101
8102
8103 if (filter->dst_port)
8104 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8105 else
8106 err = i40e_add_del_cloud_filter(vsi, filter, true);
8107
8108 if (err) {
8109 dev_err(&pf->pdev->dev,
8110 "Failed to add cloud filter, err %s\n",
8111 i40e_stat_str(&pf->hw, err));
8112 goto err;
8113 }
8114
8115
8116 INIT_HLIST_NODE(&filter->cloud_node);
8117
8118 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8119
8120 pf->num_cloud_filters++;
8121
8122 return err;
8123err:
8124 kfree(filter);
8125 return err;
8126}
8127
8128
8129
8130
8131
8132
8133
8134static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8135 unsigned long *cookie)
8136{
8137 struct i40e_cloud_filter *filter = NULL;
8138 struct hlist_node *node2;
8139
8140 hlist_for_each_entry_safe(filter, node2,
8141 &vsi->back->cloud_filter_list, cloud_node)
8142 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8143 return filter;
8144 return NULL;
8145}
8146
8147
8148
8149
8150
8151
8152
8153static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8154 struct flow_cls_offload *cls_flower)
8155{
8156 struct i40e_cloud_filter *filter = NULL;
8157 struct i40e_pf *pf = vsi->back;
8158 int err = 0;
8159
8160 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8161
8162 if (!filter)
8163 return -EINVAL;
8164
8165 hash_del(&filter->cloud_node);
8166
8167 if (filter->dst_port)
8168 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8169 else
8170 err = i40e_add_del_cloud_filter(vsi, filter, false);
8171
8172 kfree(filter);
8173 if (err) {
8174 dev_err(&pf->pdev->dev,
8175 "Failed to delete cloud filter, err %s\n",
8176 i40e_stat_str(&pf->hw, err));
8177 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8178 }
8179
8180 pf->num_cloud_filters--;
8181 if (!pf->num_cloud_filters)
8182 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8183 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8184 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8185 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8186 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8187 }
8188 return 0;
8189}
8190
8191
8192
8193
8194
8195
8196static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8197 struct flow_cls_offload *cls_flower)
8198{
8199 struct i40e_vsi *vsi = np->vsi;
8200
8201 switch (cls_flower->command) {
8202 case FLOW_CLS_REPLACE:
8203 return i40e_configure_clsflower(vsi, cls_flower);
8204 case FLOW_CLS_DESTROY:
8205 return i40e_delete_clsflower(vsi, cls_flower);
8206 case FLOW_CLS_STATS:
8207 return -EOPNOTSUPP;
8208 default:
8209 return -EOPNOTSUPP;
8210 }
8211}
8212
8213static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8214 void *cb_priv)
8215{
8216 struct i40e_netdev_priv *np = cb_priv;
8217
8218 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8219 return -EOPNOTSUPP;
8220
8221 switch (type) {
8222 case TC_SETUP_CLSFLOWER:
8223 return i40e_setup_tc_cls_flower(np, type_data);
8224
8225 default:
8226 return -EOPNOTSUPP;
8227 }
8228}
8229
8230static LIST_HEAD(i40e_block_cb_list);
8231
8232static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8233 void *type_data)
8234{
8235 struct i40e_netdev_priv *np = netdev_priv(netdev);
8236
8237 switch (type) {
8238 case TC_SETUP_QDISC_MQPRIO:
8239 return i40e_setup_tc(netdev, type_data);
8240 case TC_SETUP_BLOCK:
8241 return flow_block_cb_setup_simple(type_data,
8242 &i40e_block_cb_list,
8243 i40e_setup_tc_block_cb,
8244 np, np, true);
8245 default:
8246 return -EOPNOTSUPP;
8247 }
8248}
8249
8250
8251
8252
8253
8254
8255
8256
8257
8258
8259
8260
8261
8262int i40e_open(struct net_device *netdev)
8263{
8264 struct i40e_netdev_priv *np = netdev_priv(netdev);
8265 struct i40e_vsi *vsi = np->vsi;
8266 struct i40e_pf *pf = vsi->back;
8267 int err;
8268
8269
8270 if (test_bit(__I40E_TESTING, pf->state) ||
8271 test_bit(__I40E_BAD_EEPROM, pf->state))
8272 return -EBUSY;
8273
8274 netif_carrier_off(netdev);
8275
8276 if (i40e_force_link_state(pf, true))
8277 return -EAGAIN;
8278
8279 err = i40e_vsi_open(vsi);
8280 if (err)
8281 return err;
8282
8283
8284 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8285 TCP_FLAG_FIN) >> 16);
8286 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8287 TCP_FLAG_FIN |
8288 TCP_FLAG_CWR) >> 16);
8289 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8290
8291 udp_tunnel_get_rx_info(netdev);
8292
8293 return 0;
8294}
8295
8296
8297
8298
8299
8300
8301
8302
8303
8304
8305
8306int i40e_vsi_open(struct i40e_vsi *vsi)
8307{
8308 struct i40e_pf *pf = vsi->back;
8309 char int_name[I40E_INT_NAME_STR_LEN];
8310 int err;
8311
8312
8313 err = i40e_vsi_setup_tx_resources(vsi);
8314 if (err)
8315 goto err_setup_tx;
8316 err = i40e_vsi_setup_rx_resources(vsi);
8317 if (err)
8318 goto err_setup_rx;
8319
8320 err = i40e_vsi_configure(vsi);
8321 if (err)
8322 goto err_setup_rx;
8323
8324 if (vsi->netdev) {
8325 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8326 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8327 err = i40e_vsi_request_irq(vsi, int_name);
8328 if (err)
8329 goto err_setup_rx;
8330
8331
8332 err = netif_set_real_num_tx_queues(vsi->netdev,
8333 vsi->num_queue_pairs);
8334 if (err)
8335 goto err_set_queues;
8336
8337 err = netif_set_real_num_rx_queues(vsi->netdev,
8338 vsi->num_queue_pairs);
8339 if (err)
8340 goto err_set_queues;
8341
8342 } else if (vsi->type == I40E_VSI_FDIR) {
8343 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8344 dev_driver_string(&pf->pdev->dev),
8345 dev_name(&pf->pdev->dev));
8346 err = i40e_vsi_request_irq(vsi, int_name);
8347
8348 } else {
8349 err = -EINVAL;
8350 goto err_setup_rx;
8351 }
8352
8353 err = i40e_up_complete(vsi);
8354 if (err)
8355 goto err_up_complete;
8356
8357 return 0;
8358
8359err_up_complete:
8360 i40e_down(vsi);
8361err_set_queues:
8362 i40e_vsi_free_irq(vsi);
8363err_setup_rx:
8364 i40e_vsi_free_rx_resources(vsi);
8365err_setup_tx:
8366 i40e_vsi_free_tx_resources(vsi);
8367 if (vsi == pf->vsi[pf->lan_vsi])
8368 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8369
8370 return err;
8371}
8372
8373
8374
8375
8376
8377
8378
8379
8380static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8381{
8382 struct i40e_fdir_filter *filter;
8383 struct i40e_flex_pit *pit_entry, *tmp;
8384 struct hlist_node *node2;
8385
8386 hlist_for_each_entry_safe(filter, node2,
8387 &pf->fdir_filter_list, fdir_node) {
8388 hlist_del(&filter->fdir_node);
8389 kfree(filter);
8390 }
8391
8392 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8393 list_del(&pit_entry->list);
8394 kfree(pit_entry);
8395 }
8396 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8397
8398 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8399 list_del(&pit_entry->list);
8400 kfree(pit_entry);
8401 }
8402 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8403
8404 pf->fdir_pf_active_filters = 0;
8405 pf->fd_tcp4_filter_cnt = 0;
8406 pf->fd_udp4_filter_cnt = 0;
8407 pf->fd_sctp4_filter_cnt = 0;
8408 pf->fd_ip4_filter_cnt = 0;
8409
8410
8411 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8412 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8413 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8414
8415
8416 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8417 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8418 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8419
8420
8421 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8422 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8423 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8424
8425
8426 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8427 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8428
8429 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8430 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8431}
8432
8433
8434
8435
8436
8437
8438
8439
8440static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8441{
8442 struct i40e_cloud_filter *cfilter;
8443 struct hlist_node *node;
8444
8445 hlist_for_each_entry_safe(cfilter, node,
8446 &pf->cloud_filter_list, cloud_node) {
8447 hlist_del(&cfilter->cloud_node);
8448 kfree(cfilter);
8449 }
8450 pf->num_cloud_filters = 0;
8451
8452 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8453 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8454 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8455 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8456 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8457 }
8458}
8459
8460
8461
8462
8463
8464
8465
8466
8467
8468
8469
8470int i40e_close(struct net_device *netdev)
8471{
8472 struct i40e_netdev_priv *np = netdev_priv(netdev);
8473 struct i40e_vsi *vsi = np->vsi;
8474
8475 i40e_vsi_close(vsi);
8476
8477 return 0;
8478}
8479
8480
8481
8482
8483
8484
8485
8486
8487
8488
8489
8490
8491void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8492{
8493 u32 val;
8494
8495
8496 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8497
8498
8499
8500
8501
8502
8503
8504
8505
8506 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8507 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8508 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8509 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8510
8511 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8512
8513
8514
8515
8516
8517 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8518 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8519 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8520 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8521 i40e_flush(&pf->hw);
8522
8523 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8524
8525
8526
8527
8528
8529
8530
8531
8532
8533 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8534 i40e_handle_reset_warning(pf, lock_acquired);
8535
8536 dev_info(&pf->pdev->dev,
8537 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8538 "FW LLDP is disabled\n" :
8539 "FW LLDP is enabled\n");
8540
8541 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
8542
8543
8544
8545
8546 i40e_prep_for_reset(pf, lock_acquired);
8547 i40e_reset_and_rebuild(pf, true, lock_acquired);
8548
8549 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8550 int v;
8551
8552
8553 dev_info(&pf->pdev->dev,
8554 "VSI reinit requested\n");
8555 for (v = 0; v < pf->num_alloc_vsi; v++) {
8556 struct i40e_vsi *vsi = pf->vsi[v];
8557
8558 if (vsi != NULL &&
8559 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8560 vsi->state))
8561 i40e_vsi_reinit_locked(pf->vsi[v]);
8562 }
8563 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8564 int v;
8565
8566
8567 dev_info(&pf->pdev->dev, "VSI down requested\n");
8568 for (v = 0; v < pf->num_alloc_vsi; v++) {
8569 struct i40e_vsi *vsi = pf->vsi[v];
8570
8571 if (vsi != NULL &&
8572 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8573 vsi->state)) {
8574 set_bit(__I40E_VSI_DOWN, vsi->state);
8575 i40e_down(vsi);
8576 }
8577 }
8578 } else {
8579 dev_info(&pf->pdev->dev,
8580 "bad reset request 0x%08x\n", reset_flags);
8581 }
8582}
8583
8584#ifdef CONFIG_I40E_DCB
8585
8586
8587
8588
8589
8590
8591bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8592 struct i40e_dcbx_config *old_cfg,
8593 struct i40e_dcbx_config *new_cfg)
8594{
8595 bool need_reconfig = false;
8596
8597
8598 if (memcmp(&new_cfg->etscfg,
8599 &old_cfg->etscfg,
8600 sizeof(new_cfg->etscfg))) {
8601
8602 if (memcmp(&new_cfg->etscfg.prioritytable,
8603 &old_cfg->etscfg.prioritytable,
8604 sizeof(new_cfg->etscfg.prioritytable))) {
8605 need_reconfig = true;
8606 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8607 }
8608
8609 if (memcmp(&new_cfg->etscfg.tcbwtable,
8610 &old_cfg->etscfg.tcbwtable,
8611 sizeof(new_cfg->etscfg.tcbwtable)))
8612 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8613
8614 if (memcmp(&new_cfg->etscfg.tsatable,
8615 &old_cfg->etscfg.tsatable,
8616 sizeof(new_cfg->etscfg.tsatable)))
8617 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8618 }
8619
8620
8621 if (memcmp(&new_cfg->pfc,
8622 &old_cfg->pfc,
8623 sizeof(new_cfg->pfc))) {
8624 need_reconfig = true;
8625 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8626 }
8627
8628
8629 if (memcmp(&new_cfg->app,
8630 &old_cfg->app,
8631 sizeof(new_cfg->app))) {
8632 need_reconfig = true;
8633 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8634 }
8635
8636 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8637 return need_reconfig;
8638}
8639
8640
8641
8642
8643
8644
8645static int i40e_handle_lldp_event(struct i40e_pf *pf,
8646 struct i40e_arq_event_info *e)
8647{
8648 struct i40e_aqc_lldp_get_mib *mib =
8649 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8650 struct i40e_hw *hw = &pf->hw;
8651 struct i40e_dcbx_config tmp_dcbx_cfg;
8652 bool need_reconfig = false;
8653 int ret = 0;
8654 u8 type;
8655
8656
8657 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8658 return ret;
8659
8660
8661 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8662 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8663 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8664 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8665 return ret;
8666
8667
8668 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8669 dev_dbg(&pf->pdev->dev,
8670 "LLDP event mib type %s\n", type ? "remote" : "local");
8671 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8672
8673 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8674 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8675 &hw->remote_dcbx_config);
8676 goto exit;
8677 }
8678
8679
8680 tmp_dcbx_cfg = hw->local_dcbx_config;
8681
8682
8683 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8684
8685 ret = i40e_get_dcb_config(&pf->hw);
8686 if (ret) {
8687 dev_info(&pf->pdev->dev,
8688 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8689 i40e_stat_str(&pf->hw, ret),
8690 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8691 goto exit;
8692 }
8693
8694
8695 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8696 sizeof(tmp_dcbx_cfg))) {
8697 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8698 goto exit;
8699 }
8700
8701 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8702 &hw->local_dcbx_config);
8703
8704 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8705
8706 if (!need_reconfig)
8707 goto exit;
8708
8709
8710 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8711 pf->flags |= I40E_FLAG_DCB_ENABLED;
8712 else
8713 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8714
8715 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8716
8717 i40e_pf_quiesce_all_vsi(pf);
8718
8719
8720 i40e_dcb_reconfigure(pf);
8721
8722 ret = i40e_resume_port_tx(pf);
8723
8724 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8725
8726 if (ret)
8727 goto exit;
8728
8729
8730 ret = i40e_pf_wait_queues_disabled(pf);
8731 if (ret) {
8732
8733 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8734 i40e_service_event_schedule(pf);
8735 } else {
8736 i40e_pf_unquiesce_all_vsi(pf);
8737 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8738 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8739 }
8740
8741exit:
8742 return ret;
8743}
8744#endif
8745
8746
8747
8748
8749
8750
8751
8752void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8753{
8754 rtnl_lock();
8755 i40e_do_reset(pf, reset_flags, true);
8756 rtnl_unlock();
8757}
8758
8759
8760
8761
8762
8763
8764
8765
8766
8767static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8768 struct i40e_arq_event_info *e)
8769{
8770 struct i40e_aqc_lan_overflow *data =
8771 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8772 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8773 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8774 struct i40e_hw *hw = &pf->hw;
8775 struct i40e_vf *vf;
8776 u16 vf_id;
8777
8778 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8779 queue, qtx_ctl);
8780
8781
8782 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8783 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8784 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8785 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8786 vf_id -= hw->func_caps.vf_base_id;
8787 vf = &pf->vf[vf_id];
8788 i40e_vc_notify_vf_reset(vf);
8789
8790 msleep(20);
8791 i40e_reset_vf(vf, false);
8792 }
8793}
8794
8795
8796
8797
8798
8799u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8800{
8801 u32 val, fcnt_prog;
8802
8803 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8804 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8805 return fcnt_prog;
8806}
8807
8808
8809
8810
8811
8812u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8813{
8814 u32 val, fcnt_prog;
8815
8816 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8817 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8818 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8819 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8820 return fcnt_prog;
8821}
8822
8823
8824
8825
8826
8827u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8828{
8829 u32 val, fcnt_prog;
8830
8831 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8832 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8833 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8834 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8835 return fcnt_prog;
8836}
8837
8838
8839
8840
8841
8842static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8843{
8844 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8845 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8846 (I40E_DEBUG_FD & pf->hw.debug_mask))
8847 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8848}
8849
8850
8851
8852
8853
8854static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8855{
8856 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8857
8858
8859
8860
8861
8862 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8863 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8864 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8865
8866 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8867 (I40E_DEBUG_FD & pf->hw.debug_mask))
8868 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8869 }
8870}
8871
8872
8873
8874
8875
8876
8877static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8878 struct i40e_fdir_filter *filter)
8879{
8880
8881 pf->fdir_pf_active_filters--;
8882 pf->fd_inv = 0;
8883
8884 switch (filter->flow_type) {
8885 case TCP_V4_FLOW:
8886 pf->fd_tcp4_filter_cnt--;
8887 break;
8888 case UDP_V4_FLOW:
8889 pf->fd_udp4_filter_cnt--;
8890 break;
8891 case SCTP_V4_FLOW:
8892 pf->fd_sctp4_filter_cnt--;
8893 break;
8894 case IP_USER_FLOW:
8895 switch (filter->ip4_proto) {
8896 case IPPROTO_TCP:
8897 pf->fd_tcp4_filter_cnt--;
8898 break;
8899 case IPPROTO_UDP:
8900 pf->fd_udp4_filter_cnt--;
8901 break;
8902 case IPPROTO_SCTP:
8903 pf->fd_sctp4_filter_cnt--;
8904 break;
8905 case IPPROTO_IP:
8906 pf->fd_ip4_filter_cnt--;
8907 break;
8908 }
8909 break;
8910 }
8911
8912
8913 hlist_del(&filter->fdir_node);
8914 kfree(filter);
8915}
8916
8917
8918
8919
8920
8921void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8922{
8923 struct i40e_fdir_filter *filter;
8924 u32 fcnt_prog, fcnt_avail;
8925 struct hlist_node *node;
8926
8927 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8928 return;
8929
8930
8931 fcnt_prog = i40e_get_global_fd_count(pf);
8932 fcnt_avail = pf->fdir_pf_filter_count;
8933 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8934 (pf->fd_add_err == 0) ||
8935 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8936 i40e_reenable_fdir_sb(pf);
8937
8938
8939
8940
8941
8942 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8943 (pf->fd_tcp4_filter_cnt == 0))
8944 i40e_reenable_fdir_atr(pf);
8945
8946
8947 if (pf->fd_inv > 0) {
8948 hlist_for_each_entry_safe(filter, node,
8949 &pf->fdir_filter_list, fdir_node)
8950 if (filter->fd_id == pf->fd_inv)
8951 i40e_delete_invalid_filter(pf, filter);
8952 }
8953}
8954
8955#define I40E_MIN_FD_FLUSH_INTERVAL 10
8956#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8957
8958
8959
8960
8961static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8962{
8963 unsigned long min_flush_time;
8964 int flush_wait_retry = 50;
8965 bool disable_atr = false;
8966 int fd_room;
8967 int reg;
8968
8969 if (!time_after(jiffies, pf->fd_flush_timestamp +
8970 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8971 return;
8972
8973
8974
8975
8976 min_flush_time = pf->fd_flush_timestamp +
8977 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8978 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8979
8980 if (!(time_after(jiffies, min_flush_time)) &&
8981 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8982 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8983 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8984 disable_atr = true;
8985 }
8986
8987 pf->fd_flush_timestamp = jiffies;
8988 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8989
8990 wr32(&pf->hw, I40E_PFQF_CTL_1,
8991 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8992 i40e_flush(&pf->hw);
8993 pf->fd_flush_cnt++;
8994 pf->fd_add_err = 0;
8995 do {
8996
8997 usleep_range(5000, 6000);
8998 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8999 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9000 break;
9001 } while (flush_wait_retry--);
9002 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9003 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9004 } else {
9005
9006 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9007 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9008 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9009 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9010 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9011 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9012 }
9013}
9014
9015
9016
9017
9018
9019u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9020{
9021 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9022}
9023
9024
9025
9026
9027
9028static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9029{
9030
9031
9032 if (test_bit(__I40E_DOWN, pf->state))
9033 return;
9034
9035 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9036 i40e_fdir_flush_and_replay(pf);
9037
9038 i40e_fdir_check_and_reenable(pf);
9039
9040}
9041
9042
9043
9044
9045
9046
9047static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9048{
9049 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9050 return;
9051
9052 switch (vsi->type) {
9053 case I40E_VSI_MAIN:
9054 if (!vsi->netdev || !vsi->netdev_registered)
9055 break;
9056
9057 if (link_up) {
9058 netif_carrier_on(vsi->netdev);
9059 netif_tx_wake_all_queues(vsi->netdev);
9060 } else {
9061 netif_carrier_off(vsi->netdev);
9062 netif_tx_stop_all_queues(vsi->netdev);
9063 }
9064 break;
9065
9066 case I40E_VSI_SRIOV:
9067 case I40E_VSI_VMDQ2:
9068 case I40E_VSI_CTRL:
9069 case I40E_VSI_IWARP:
9070 case I40E_VSI_MIRROR:
9071 default:
9072
9073 break;
9074 }
9075}
9076
9077
9078
9079
9080
9081
9082static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9083{
9084 struct i40e_pf *pf;
9085 int i;
9086
9087 if (!veb || !veb->pf)
9088 return;
9089 pf = veb->pf;
9090
9091
9092 for (i = 0; i < I40E_MAX_VEB; i++)
9093 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9094 i40e_veb_link_event(pf->veb[i], link_up);
9095
9096
9097 for (i = 0; i < pf->num_alloc_vsi; i++)
9098 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9099 i40e_vsi_link_event(pf->vsi[i], link_up);
9100}
9101
9102
9103
9104
9105
9106static void i40e_link_event(struct i40e_pf *pf)
9107{
9108 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9109 u8 new_link_speed, old_link_speed;
9110 i40e_status status;
9111 bool new_link, old_link;
9112
9113
9114 pf->hw.phy.get_link_info = true;
9115 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9116 status = i40e_get_link_status(&pf->hw, &new_link);
9117
9118
9119 if (status == I40E_SUCCESS) {
9120 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9121 } else {
9122
9123
9124
9125 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9126 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9127 status);
9128 return;
9129 }
9130
9131 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9132 new_link_speed = pf->hw.phy.link_info.link_speed;
9133
9134 if (new_link == old_link &&
9135 new_link_speed == old_link_speed &&
9136 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9137 new_link == netif_carrier_ok(vsi->netdev)))
9138 return;
9139
9140 i40e_print_link_message(vsi, new_link);
9141
9142
9143
9144
9145 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9146 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9147 else
9148 i40e_vsi_link_event(vsi, new_link);
9149
9150 if (pf->vf)
9151 i40e_vc_notify_link_state(pf);
9152
9153 if (pf->flags & I40E_FLAG_PTP)
9154 i40e_ptp_set_increment(pf);
9155}
9156
9157
9158
9159
9160
9161static void i40e_watchdog_subtask(struct i40e_pf *pf)
9162{
9163 int i;
9164
9165
9166 if (test_bit(__I40E_DOWN, pf->state) ||
9167 test_bit(__I40E_CONFIG_BUSY, pf->state))
9168 return;
9169
9170
9171 if (time_before(jiffies, (pf->service_timer_previous +
9172 pf->service_timer_period)))
9173 return;
9174 pf->service_timer_previous = jiffies;
9175
9176 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9177 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9178 i40e_link_event(pf);
9179
9180
9181
9182
9183 for (i = 0; i < pf->num_alloc_vsi; i++)
9184 if (pf->vsi[i] && pf->vsi[i]->netdev)
9185 i40e_update_stats(pf->vsi[i]);
9186
9187 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9188
9189 for (i = 0; i < I40E_MAX_VEB; i++)
9190 if (pf->veb[i])
9191 i40e_update_veb_stats(pf->veb[i]);
9192 }
9193
9194 i40e_ptp_rx_hang(pf);
9195 i40e_ptp_tx_hang(pf);
9196}
9197
9198
9199
9200
9201
9202static void i40e_reset_subtask(struct i40e_pf *pf)
9203{
9204 u32 reset_flags = 0;
9205
9206 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9207 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9208 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9209 }
9210 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9211 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9212 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9213 }
9214 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9215 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9216 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9217 }
9218 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9219 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9220 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9221 }
9222 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9223 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9224 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9225 }
9226
9227
9228
9229
9230 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9231 i40e_prep_for_reset(pf, false);
9232 i40e_reset(pf);
9233 i40e_rebuild(pf, false, false);
9234 }
9235
9236
9237 if (reset_flags &&
9238 !test_bit(__I40E_DOWN, pf->state) &&
9239 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9240 i40e_do_reset(pf, reset_flags, false);
9241 }
9242}
9243
9244
9245
9246
9247
9248
9249static void i40e_handle_link_event(struct i40e_pf *pf,
9250 struct i40e_arq_event_info *e)
9251{
9252 struct i40e_aqc_get_link_status *status =
9253 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9254
9255
9256
9257
9258
9259
9260
9261 i40e_link_event(pf);
9262
9263
9264 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9265 dev_err(&pf->pdev->dev,
9266 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9267 dev_err(&pf->pdev->dev,
9268 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9269 } else {
9270
9271
9272
9273 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9274 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9275 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9276 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9277 dev_err(&pf->pdev->dev,
9278 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9279 dev_err(&pf->pdev->dev,
9280 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9281 }
9282 }
9283}
9284
9285
9286
9287
9288
9289static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9290{
9291 struct i40e_arq_event_info event;
9292 struct i40e_hw *hw = &pf->hw;
9293 u16 pending, i = 0;
9294 i40e_status ret;
9295 u16 opcode;
9296 u32 oldval;
9297 u32 val;
9298
9299
9300 if (test_bit(__I40E_RESET_FAILED, pf->state))
9301 return;
9302
9303
9304 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9305 oldval = val;
9306 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9307 if (hw->debug_mask & I40E_DEBUG_AQ)
9308 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9309 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9310 }
9311 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9312 if (hw->debug_mask & I40E_DEBUG_AQ)
9313 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9314 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9315 pf->arq_overflows++;
9316 }
9317 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9318 if (hw->debug_mask & I40E_DEBUG_AQ)
9319 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9320 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9321 }
9322 if (oldval != val)
9323 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9324
9325 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9326 oldval = val;
9327 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9328 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9329 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9330 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9331 }
9332 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9333 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9334 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9335 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9336 }
9337 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9338 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9339 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9340 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9341 }
9342 if (oldval != val)
9343 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9344
9345 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9346 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9347 if (!event.msg_buf)
9348 return;
9349
9350 do {
9351 ret = i40e_clean_arq_element(hw, &event, &pending);
9352 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9353 break;
9354 else if (ret) {
9355 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9356 break;
9357 }
9358
9359 opcode = le16_to_cpu(event.desc.opcode);
9360 switch (opcode) {
9361
9362 case i40e_aqc_opc_get_link_status:
9363 i40e_handle_link_event(pf, &event);
9364 break;
9365 case i40e_aqc_opc_send_msg_to_pf:
9366 ret = i40e_vc_process_vf_msg(pf,
9367 le16_to_cpu(event.desc.retval),
9368 le32_to_cpu(event.desc.cookie_high),
9369 le32_to_cpu(event.desc.cookie_low),
9370 event.msg_buf,
9371 event.msg_len);
9372 break;
9373 case i40e_aqc_opc_lldp_update_mib:
9374 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9375#ifdef CONFIG_I40E_DCB
9376 rtnl_lock();
9377 ret = i40e_handle_lldp_event(pf, &event);
9378 rtnl_unlock();
9379#endif
9380 break;
9381 case i40e_aqc_opc_event_lan_overflow:
9382 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9383 i40e_handle_lan_overflow_event(pf, &event);
9384 break;
9385 case i40e_aqc_opc_send_msg_to_peer:
9386 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9387 break;
9388 case i40e_aqc_opc_nvm_erase:
9389 case i40e_aqc_opc_nvm_update:
9390 case i40e_aqc_opc_oem_post_update:
9391 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9392 "ARQ NVM operation 0x%04x completed\n",
9393 opcode);
9394 break;
9395 default:
9396 dev_info(&pf->pdev->dev,
9397 "ARQ: Unknown event 0x%04x ignored\n",
9398 opcode);
9399 break;
9400 }
9401 } while (i++ < pf->adminq_work_limit);
9402
9403 if (i < pf->adminq_work_limit)
9404 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9405
9406
9407 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9408 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9409 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9410 i40e_flush(hw);
9411
9412 kfree(event.msg_buf);
9413}
9414
9415
9416
9417
9418
9419static void i40e_verify_eeprom(struct i40e_pf *pf)
9420{
9421 int err;
9422
9423 err = i40e_diag_eeprom_test(&pf->hw);
9424 if (err) {
9425
9426 err = i40e_diag_eeprom_test(&pf->hw);
9427 if (err) {
9428 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9429 err);
9430 set_bit(__I40E_BAD_EEPROM, pf->state);
9431 }
9432 }
9433
9434 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9435 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9436 clear_bit(__I40E_BAD_EEPROM, pf->state);
9437 }
9438}
9439
9440
9441
9442
9443
9444
9445
9446static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9447{
9448 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9449 struct i40e_vsi_context ctxt;
9450 int ret;
9451
9452 ctxt.seid = pf->main_vsi_seid;
9453 ctxt.pf_num = pf->hw.pf_id;
9454 ctxt.vf_num = 0;
9455 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9456 if (ret) {
9457 dev_info(&pf->pdev->dev,
9458 "couldn't get PF vsi config, err %s aq_err %s\n",
9459 i40e_stat_str(&pf->hw, ret),
9460 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9461 return;
9462 }
9463 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9464 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9465 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9466
9467 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9468 if (ret) {
9469 dev_info(&pf->pdev->dev,
9470 "update vsi switch failed, err %s aq_err %s\n",
9471 i40e_stat_str(&pf->hw, ret),
9472 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9473 }
9474}
9475
9476
9477
9478
9479
9480
9481
9482static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9483{
9484 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9485 struct i40e_vsi_context ctxt;
9486 int ret;
9487
9488 ctxt.seid = pf->main_vsi_seid;
9489 ctxt.pf_num = pf->hw.pf_id;
9490 ctxt.vf_num = 0;
9491 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9492 if (ret) {
9493 dev_info(&pf->pdev->dev,
9494 "couldn't get PF vsi config, err %s aq_err %s\n",
9495 i40e_stat_str(&pf->hw, ret),
9496 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9497 return;
9498 }
9499 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9500 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9501 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9502
9503 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9504 if (ret) {
9505 dev_info(&pf->pdev->dev,
9506 "update vsi switch failed, err %s aq_err %s\n",
9507 i40e_stat_str(&pf->hw, ret),
9508 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9509 }
9510}
9511
9512
9513
9514
9515
9516
9517
9518
9519
9520static void i40e_config_bridge_mode(struct i40e_veb *veb)
9521{
9522 struct i40e_pf *pf = veb->pf;
9523
9524 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9525 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9526 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9527 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9528 i40e_disable_pf_switch_lb(pf);
9529 else
9530 i40e_enable_pf_switch_lb(pf);
9531}
9532
9533
9534
9535
9536
9537
9538
9539
9540
9541
9542static int i40e_reconstitute_veb(struct i40e_veb *veb)
9543{
9544 struct i40e_vsi *ctl_vsi = NULL;
9545 struct i40e_pf *pf = veb->pf;
9546 int v, veb_idx;
9547 int ret;
9548
9549
9550 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
9551 if (pf->vsi[v] &&
9552 pf->vsi[v]->veb_idx == veb->idx &&
9553 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
9554 ctl_vsi = pf->vsi[v];
9555 break;
9556 }
9557 }
9558 if (!ctl_vsi) {
9559 dev_info(&pf->pdev->dev,
9560 "missing owner VSI for veb_idx %d\n", veb->idx);
9561 ret = -ENOENT;
9562 goto end_reconstitute;
9563 }
9564 if (ctl_vsi != pf->vsi[pf->lan_vsi])
9565 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9566 ret = i40e_add_vsi(ctl_vsi);
9567 if (ret) {
9568 dev_info(&pf->pdev->dev,
9569 "rebuild of veb_idx %d owner VSI failed: %d\n",
9570 veb->idx, ret);
9571 goto end_reconstitute;
9572 }
9573 i40e_vsi_reset_stats(ctl_vsi);
9574
9575
9576 ret = i40e_add_veb(veb, ctl_vsi);
9577 if (ret)
9578 goto end_reconstitute;
9579
9580 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9581 veb->bridge_mode = BRIDGE_MODE_VEB;
9582 else
9583 veb->bridge_mode = BRIDGE_MODE_VEPA;
9584 i40e_config_bridge_mode(veb);
9585
9586
9587 for (v = 0; v < pf->num_alloc_vsi; v++) {
9588 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9589 continue;
9590
9591 if (pf->vsi[v]->veb_idx == veb->idx) {
9592 struct i40e_vsi *vsi = pf->vsi[v];
9593
9594 vsi->uplink_seid = veb->seid;
9595 ret = i40e_add_vsi(vsi);
9596 if (ret) {
9597 dev_info(&pf->pdev->dev,
9598 "rebuild of vsi_idx %d failed: %d\n",
9599 v, ret);
9600 goto end_reconstitute;
9601 }
9602 i40e_vsi_reset_stats(vsi);
9603 }
9604 }
9605
9606
9607 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9608 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9609 pf->veb[veb_idx]->uplink_seid = veb->seid;
9610 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9611 if (ret)
9612 break;
9613 }
9614 }
9615
9616end_reconstitute:
9617 return ret;
9618}
9619
9620
9621
9622
9623
9624
9625static int i40e_get_capabilities(struct i40e_pf *pf,
9626 enum i40e_admin_queue_opc list_type)
9627{
9628 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9629 u16 data_size;
9630 int buf_len;
9631 int err;
9632
9633 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9634 do {
9635 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9636 if (!cap_buf)
9637 return -ENOMEM;
9638
9639
9640 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9641 &data_size, list_type,
9642 NULL);
9643
9644 kfree(cap_buf);
9645
9646 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9647
9648 buf_len = data_size;
9649 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9650 dev_info(&pf->pdev->dev,
9651 "capability discovery failed, err %s aq_err %s\n",
9652 i40e_stat_str(&pf->hw, err),
9653 i40e_aq_str(&pf->hw,
9654 pf->hw.aq.asq_last_status));
9655 return -ENODEV;
9656 }
9657 } while (err);
9658
9659 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9660 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9661 dev_info(&pf->pdev->dev,
9662 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9663 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9664 pf->hw.func_caps.num_msix_vectors,
9665 pf->hw.func_caps.num_msix_vectors_vf,
9666 pf->hw.func_caps.fd_filters_guaranteed,
9667 pf->hw.func_caps.fd_filters_best_effort,
9668 pf->hw.func_caps.num_tx_qp,
9669 pf->hw.func_caps.num_vsis);
9670 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9671 dev_info(&pf->pdev->dev,
9672 "switch_mode=0x%04x, function_valid=0x%08x\n",
9673 pf->hw.dev_caps.switch_mode,
9674 pf->hw.dev_caps.valid_functions);
9675 dev_info(&pf->pdev->dev,
9676 "SR-IOV=%d, num_vfs for all function=%u\n",
9677 pf->hw.dev_caps.sr_iov_1_1,
9678 pf->hw.dev_caps.num_vfs);
9679 dev_info(&pf->pdev->dev,
9680 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9681 pf->hw.dev_caps.num_vsis,
9682 pf->hw.dev_caps.num_rx_qp,
9683 pf->hw.dev_caps.num_tx_qp);
9684 }
9685 }
9686 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9687#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9688 + pf->hw.func_caps.num_vfs)
9689 if (pf->hw.revision_id == 0 &&
9690 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9691 dev_info(&pf->pdev->dev,
9692 "got num_vsis %d, setting num_vsis to %d\n",
9693 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9694 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9695 }
9696 }
9697 return 0;
9698}
9699
9700static int i40e_vsi_clear(struct i40e_vsi *vsi);
9701
9702
9703
9704
9705
9706static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9707{
9708 struct i40e_vsi *vsi;
9709
9710
9711
9712
9713 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9714 static const u32 hkey[] = {
9715 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9716 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9717 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9718 0x95b3a76d};
9719 int i;
9720
9721 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9722 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9723 }
9724
9725 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9726 return;
9727
9728
9729 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9730
9731
9732 if (!vsi) {
9733 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9734 pf->vsi[pf->lan_vsi]->seid, 0);
9735 if (!vsi) {
9736 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9737 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9738 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9739 return;
9740 }
9741 }
9742
9743 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9744}
9745
9746
9747
9748
9749
9750static void i40e_fdir_teardown(struct i40e_pf *pf)
9751{
9752 struct i40e_vsi *vsi;
9753
9754 i40e_fdir_filter_exit(pf);
9755 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9756 if (vsi)
9757 i40e_vsi_release(vsi);
9758}
9759
9760
9761
9762
9763
9764
9765
9766
9767
9768static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9769{
9770 struct i40e_cloud_filter *cfilter;
9771 struct i40e_pf *pf = vsi->back;
9772 struct hlist_node *node;
9773 i40e_status ret;
9774
9775
9776 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9777 cloud_node) {
9778 if (cfilter->seid != seid)
9779 continue;
9780
9781 if (cfilter->dst_port)
9782 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9783 true);
9784 else
9785 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9786
9787 if (ret) {
9788 dev_dbg(&pf->pdev->dev,
9789 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9790 i40e_stat_str(&pf->hw, ret),
9791 i40e_aq_str(&pf->hw,
9792 pf->hw.aq.asq_last_status));
9793 return ret;
9794 }
9795 }
9796 return 0;
9797}
9798
9799
9800
9801
9802
9803
9804
9805static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9806{
9807 struct i40e_channel *ch, *ch_tmp;
9808 i40e_status ret;
9809
9810 if (list_empty(&vsi->ch_list))
9811 return 0;
9812
9813 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9814 if (!ch->initialized)
9815 break;
9816
9817 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9818 if (ret) {
9819 dev_info(&vsi->back->pdev->dev,
9820 "failed to rebuild channels using uplink_seid %u\n",
9821 vsi->uplink_seid);
9822 return ret;
9823 }
9824
9825 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9826 if (ret) {
9827 dev_info(&vsi->back->pdev->dev,
9828 "failed to configure TX rings for channel %u\n",
9829 ch->seid);
9830 return ret;
9831 }
9832
9833 vsi->next_base_queue = vsi->next_base_queue +
9834 ch->num_queue_pairs;
9835 if (ch->max_tx_rate) {
9836 u64 credits = ch->max_tx_rate;
9837
9838 if (i40e_set_bw_limit(vsi, ch->seid,
9839 ch->max_tx_rate))
9840 return -EINVAL;
9841
9842 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9843 dev_dbg(&vsi->back->pdev->dev,
9844 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9845 ch->max_tx_rate,
9846 credits,
9847 ch->seid);
9848 }
9849 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9850 if (ret) {
9851 dev_dbg(&vsi->back->pdev->dev,
9852 "Failed to rebuild cloud filters for channel VSI %u\n",
9853 ch->seid);
9854 return ret;
9855 }
9856 }
9857 return 0;
9858}
9859
9860
9861
9862
9863
9864
9865
9866
9867
9868static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9869{
9870 struct i40e_hw *hw = &pf->hw;
9871 i40e_status ret = 0;
9872 u32 v;
9873
9874 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9875 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9876 return;
9877 if (i40e_check_asq_alive(&pf->hw))
9878 i40e_vc_notify_reset(pf);
9879
9880 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9881
9882
9883
9884 if (!lock_acquired)
9885 rtnl_lock();
9886 i40e_pf_quiesce_all_vsi(pf);
9887 if (!lock_acquired)
9888 rtnl_unlock();
9889
9890 for (v = 0; v < pf->num_alloc_vsi; v++) {
9891 if (pf->vsi[v])
9892 pf->vsi[v]->seid = 0;
9893 }
9894
9895 i40e_shutdown_adminq(&pf->hw);
9896
9897
9898 if (hw->hmc.hmc_obj) {
9899 ret = i40e_shutdown_lan_hmc(hw);
9900 if (ret)
9901 dev_warn(&pf->pdev->dev,
9902 "shutdown_lan_hmc failed: %d\n", ret);
9903 }
9904
9905
9906
9907
9908 i40e_ptp_save_hw_time(pf);
9909}
9910
9911
9912
9913
9914
9915static void i40e_send_version(struct i40e_pf *pf)
9916{
9917 struct i40e_driver_version dv;
9918
9919 dv.major_version = 0xff;
9920 dv.minor_version = 0xff;
9921 dv.build_version = 0xff;
9922 dv.subbuild_version = 0;
9923 strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
9924 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9925}
9926
9927
9928
9929
9930
9931static void i40e_get_oem_version(struct i40e_hw *hw)
9932{
9933 u16 block_offset = 0xffff;
9934 u16 block_length = 0;
9935 u16 capabilities = 0;
9936 u16 gen_snap = 0;
9937 u16 release = 0;
9938
9939#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9940#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9941#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9942#define I40E_NVM_OEM_GEN_OFFSET 0x02
9943#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9944#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9945#define I40E_NVM_OEM_LENGTH 3
9946
9947
9948 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9949 if (block_offset == 0xffff)
9950 return;
9951
9952
9953 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9954 &block_length);
9955 if (block_length < I40E_NVM_OEM_LENGTH)
9956 return;
9957
9958
9959 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9960 &capabilities);
9961 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9962 return;
9963
9964 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9965 &gen_snap);
9966 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9967 &release);
9968 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9969 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9970}
9971
9972
9973
9974
9975
9976static int i40e_reset(struct i40e_pf *pf)
9977{
9978 struct i40e_hw *hw = &pf->hw;
9979 i40e_status ret;
9980
9981 ret = i40e_pf_reset(hw);
9982 if (ret) {
9983 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9984 set_bit(__I40E_RESET_FAILED, pf->state);
9985 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9986 } else {
9987 pf->pfr_count++;
9988 }
9989 return ret;
9990}
9991
9992
9993
9994
9995
9996
9997
9998
9999static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10000{
10001 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10002 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10003 struct i40e_hw *hw = &pf->hw;
10004 u8 set_fc_aq_fail = 0;
10005 i40e_status ret;
10006 u32 val;
10007 int v;
10008
10009 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10010 i40e_check_recovery_mode(pf)) {
10011 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10012 }
10013
10014 if (test_bit(__I40E_DOWN, pf->state) &&
10015 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10016 !old_recovery_mode_bit)
10017 goto clear_recovery;
10018 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10019
10020
10021 ret = i40e_init_adminq(&pf->hw);
10022 if (ret) {
10023 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10024 i40e_stat_str(&pf->hw, ret),
10025 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10026 goto clear_recovery;
10027 }
10028 i40e_get_oem_version(&pf->hw);
10029
10030 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10031 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
10032 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
10033
10034
10035
10036
10037
10038 mdelay(300);
10039 }
10040
10041
10042 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10043 i40e_verify_eeprom(pf);
10044
10045
10046
10047
10048
10049 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10050 old_recovery_mode_bit) {
10051 if (i40e_get_capabilities(pf,
10052 i40e_aqc_opc_list_func_capabilities))
10053 goto end_unlock;
10054
10055 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10056
10057
10058
10059 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10060 goto end_unlock;
10061 } else {
10062 if (!lock_acquired)
10063 rtnl_lock();
10064
10065
10066
10067
10068 free_irq(pf->pdev->irq, pf);
10069 i40e_clear_interrupt_scheme(pf);
10070 if (i40e_restore_interrupt_scheme(pf))
10071 goto end_unlock;
10072 }
10073
10074
10075 i40e_send_version(pf);
10076
10077
10078
10079
10080 goto end_unlock;
10081 }
10082
10083 i40e_clear_pxe_mode(hw);
10084 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10085 if (ret)
10086 goto end_core_reset;
10087
10088 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10089 hw->func_caps.num_rx_qp, 0, 0);
10090 if (ret) {
10091 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10092 goto end_core_reset;
10093 }
10094 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10095 if (ret) {
10096 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10097 goto end_core_reset;
10098 }
10099
10100
10101 i40e_aq_set_dcb_parameters(hw, true, NULL);
10102
10103#ifdef CONFIG_I40E_DCB
10104 ret = i40e_init_pf_dcb(pf);
10105 if (ret) {
10106 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
10107 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10108
10109 }
10110#endif
10111
10112 if (!lock_acquired)
10113 rtnl_lock();
10114 ret = i40e_setup_pf_switch(pf, reinit);
10115 if (ret)
10116 goto end_unlock;
10117
10118
10119
10120
10121 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10122 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10123 I40E_AQ_EVENT_MEDIA_NA |
10124 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10125 if (ret)
10126 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10127 i40e_stat_str(&pf->hw, ret),
10128 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10129
10130
10131 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
10132 if (ret)
10133 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
10134 i40e_stat_str(&pf->hw, ret),
10135 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10136
10137
10138
10139
10140
10141
10142
10143
10144 if (vsi->uplink_seid != pf->mac_seid) {
10145 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10146
10147 for (v = 0; v < I40E_MAX_VEB; v++) {
10148 if (!pf->veb[v])
10149 continue;
10150
10151 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10152 pf->veb[v]->uplink_seid == 0) {
10153 ret = i40e_reconstitute_veb(pf->veb[v]);
10154
10155 if (!ret)
10156 continue;
10157
10158
10159
10160
10161
10162
10163
10164 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10165 dev_info(&pf->pdev->dev,
10166 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10167 ret);
10168 vsi->uplink_seid = pf->mac_seid;
10169 break;
10170 } else if (pf->veb[v]->uplink_seid == 0) {
10171 dev_info(&pf->pdev->dev,
10172 "rebuild of orphan VEB failed: %d\n",
10173 ret);
10174 }
10175 }
10176 }
10177 }
10178
10179 if (vsi->uplink_seid == pf->mac_seid) {
10180 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10181
10182 ret = i40e_add_vsi(vsi);
10183 if (ret) {
10184 dev_info(&pf->pdev->dev,
10185 "rebuild of Main VSI failed: %d\n", ret);
10186 goto end_unlock;
10187 }
10188 }
10189
10190 if (vsi->mqprio_qopt.max_rate[0]) {
10191 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10192 u64 credits = 0;
10193
10194 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10195 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10196 if (ret)
10197 goto end_unlock;
10198
10199 credits = max_tx_rate;
10200 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10201 dev_dbg(&vsi->back->pdev->dev,
10202 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10203 max_tx_rate,
10204 credits,
10205 vsi->seid);
10206 }
10207
10208 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10209 if (ret)
10210 goto end_unlock;
10211
10212
10213
10214
10215 ret = i40e_rebuild_channels(vsi);
10216 if (ret)
10217 goto end_unlock;
10218
10219
10220
10221
10222
10223#define I40E_REG_MSS 0x000E64DC
10224#define I40E_REG_MSS_MIN_MASK 0x3FF0000
10225#define I40E_64BYTE_MSS 0x400000
10226 val = rd32(hw, I40E_REG_MSS);
10227 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10228 val &= ~I40E_REG_MSS_MIN_MASK;
10229 val |= I40E_64BYTE_MSS;
10230 wr32(hw, I40E_REG_MSS, val);
10231 }
10232
10233 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10234 msleep(75);
10235 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10236 if (ret)
10237 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10238 i40e_stat_str(&pf->hw, ret),
10239 i40e_aq_str(&pf->hw,
10240 pf->hw.aq.asq_last_status));
10241 }
10242
10243 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10244 ret = i40e_setup_misc_vector(pf);
10245
10246
10247
10248
10249
10250
10251
10252 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10253 pf->main_vsi_seid);
10254
10255
10256 i40e_pf_unquiesce_all_vsi(pf);
10257
10258
10259 if (!lock_acquired)
10260 rtnl_unlock();
10261
10262
10263 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10264 if (ret)
10265 dev_warn(&pf->pdev->dev,
10266 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10267 pf->cur_promisc ? "on" : "off",
10268 i40e_stat_str(&pf->hw, ret),
10269 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10270
10271 i40e_reset_all_vfs(pf, true);
10272
10273
10274 i40e_send_version(pf);
10275
10276
10277 goto end_core_reset;
10278
10279end_unlock:
10280 if (!lock_acquired)
10281 rtnl_unlock();
10282end_core_reset:
10283 clear_bit(__I40E_RESET_FAILED, pf->state);
10284clear_recovery:
10285 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10286 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10287}
10288
10289
10290
10291
10292
10293
10294
10295
10296static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10297 bool lock_acquired)
10298{
10299 int ret;
10300
10301
10302
10303
10304 ret = i40e_reset(pf);
10305 if (!ret)
10306 i40e_rebuild(pf, reinit, lock_acquired);
10307}
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10319{
10320 i40e_prep_for_reset(pf, lock_acquired);
10321 i40e_reset_and_rebuild(pf, false, lock_acquired);
10322}
10323
10324
10325
10326
10327
10328
10329
10330static void i40e_handle_mdd_event(struct i40e_pf *pf)
10331{
10332 struct i40e_hw *hw = &pf->hw;
10333 bool mdd_detected = false;
10334 struct i40e_vf *vf;
10335 u32 reg;
10336 int i;
10337
10338 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10339 return;
10340
10341
10342 reg = rd32(hw, I40E_GL_MDET_TX);
10343 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10344 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10345 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10346 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10347 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10348 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10349 I40E_GL_MDET_TX_EVENT_SHIFT;
10350 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10351 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10352 pf->hw.func_caps.base_queue;
10353 if (netif_msg_tx_err(pf))
10354 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10355 event, queue, pf_num, vf_num);
10356 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10357 mdd_detected = true;
10358 }
10359 reg = rd32(hw, I40E_GL_MDET_RX);
10360 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10361 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10362 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10363 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10364 I40E_GL_MDET_RX_EVENT_SHIFT;
10365 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10366 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10367 pf->hw.func_caps.base_queue;
10368 if (netif_msg_rx_err(pf))
10369 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10370 event, queue, func);
10371 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10372 mdd_detected = true;
10373 }
10374
10375 if (mdd_detected) {
10376 reg = rd32(hw, I40E_PF_MDET_TX);
10377 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10378 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10379 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10380 }
10381 reg = rd32(hw, I40E_PF_MDET_RX);
10382 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10383 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10384 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10385 }
10386 }
10387
10388
10389 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10390 vf = &(pf->vf[i]);
10391 reg = rd32(hw, I40E_VP_MDET_TX(i));
10392 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10393 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10394 vf->num_mdd_events++;
10395 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10396 i);
10397 dev_info(&pf->pdev->dev,
10398 "Use PF Control I/F to re-enable the VF\n");
10399 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10400 }
10401
10402 reg = rd32(hw, I40E_VP_MDET_RX(i));
10403 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10404 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10405 vf->num_mdd_events++;
10406 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10407 i);
10408 dev_info(&pf->pdev->dev,
10409 "Use PF Control I/F to re-enable the VF\n");
10410 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10411 }
10412 }
10413
10414
10415 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10416 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10417 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10418 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10419 i40e_flush(hw);
10420}
10421
10422
10423
10424
10425
10426static void i40e_service_task(struct work_struct *work)
10427{
10428 struct i40e_pf *pf = container_of(work,
10429 struct i40e_pf,
10430 service_task);
10431 unsigned long start_time = jiffies;
10432
10433
10434 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10435 test_bit(__I40E_SUSPENDED, pf->state))
10436 return;
10437
10438 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10439 return;
10440
10441 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10442 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10443 i40e_sync_filters_subtask(pf);
10444 i40e_reset_subtask(pf);
10445 i40e_handle_mdd_event(pf);
10446 i40e_vc_process_vflr_event(pf);
10447 i40e_watchdog_subtask(pf);
10448 i40e_fdir_reinit_subtask(pf);
10449 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10450
10451 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10452 true);
10453 } else {
10454 i40e_client_subtask(pf);
10455 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10456 pf->state))
10457 i40e_notify_client_of_l2_param_changes(
10458 pf->vsi[pf->lan_vsi]);
10459 }
10460 i40e_sync_filters_subtask(pf);
10461 } else {
10462 i40e_reset_subtask(pf);
10463 }
10464
10465 i40e_clean_adminq_subtask(pf);
10466
10467
10468 smp_mb__before_atomic();
10469 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10470
10471
10472
10473
10474
10475 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10476 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10477 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10478 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10479 i40e_service_event_schedule(pf);
10480}
10481
10482
10483
10484
10485
10486static void i40e_service_timer(struct timer_list *t)
10487{
10488 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10489
10490 mod_timer(&pf->service_timer,
10491 round_jiffies(jiffies + pf->service_timer_period));
10492 i40e_service_event_schedule(pf);
10493}
10494
10495
10496
10497
10498
10499static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10500{
10501 struct i40e_pf *pf = vsi->back;
10502
10503 switch (vsi->type) {
10504 case I40E_VSI_MAIN:
10505 vsi->alloc_queue_pairs = pf->num_lan_qps;
10506 if (!vsi->num_tx_desc)
10507 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10508 I40E_REQ_DESCRIPTOR_MULTIPLE);
10509 if (!vsi->num_rx_desc)
10510 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10511 I40E_REQ_DESCRIPTOR_MULTIPLE);
10512 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10513 vsi->num_q_vectors = pf->num_lan_msix;
10514 else
10515 vsi->num_q_vectors = 1;
10516
10517 break;
10518
10519 case I40E_VSI_FDIR:
10520 vsi->alloc_queue_pairs = 1;
10521 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10522 I40E_REQ_DESCRIPTOR_MULTIPLE);
10523 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10524 I40E_REQ_DESCRIPTOR_MULTIPLE);
10525 vsi->num_q_vectors = pf->num_fdsb_msix;
10526 break;
10527
10528 case I40E_VSI_VMDQ2:
10529 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10530 if (!vsi->num_tx_desc)
10531 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10532 I40E_REQ_DESCRIPTOR_MULTIPLE);
10533 if (!vsi->num_rx_desc)
10534 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10535 I40E_REQ_DESCRIPTOR_MULTIPLE);
10536 vsi->num_q_vectors = pf->num_vmdq_msix;
10537 break;
10538
10539 case I40E_VSI_SRIOV:
10540 vsi->alloc_queue_pairs = pf->num_vf_qps;
10541 if (!vsi->num_tx_desc)
10542 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10543 I40E_REQ_DESCRIPTOR_MULTIPLE);
10544 if (!vsi->num_rx_desc)
10545 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10546 I40E_REQ_DESCRIPTOR_MULTIPLE);
10547 break;
10548
10549 default:
10550 WARN_ON(1);
10551 return -ENODATA;
10552 }
10553
10554 return 0;
10555}
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10566{
10567 struct i40e_ring **next_rings;
10568 int size;
10569 int ret = 0;
10570
10571
10572 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10573 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10574 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10575 if (!vsi->tx_rings)
10576 return -ENOMEM;
10577 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10578 if (i40e_enabled_xdp_vsi(vsi)) {
10579 vsi->xdp_rings = next_rings;
10580 next_rings += vsi->alloc_queue_pairs;
10581 }
10582 vsi->rx_rings = next_rings;
10583
10584 if (alloc_qvectors) {
10585
10586 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10587 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10588 if (!vsi->q_vectors) {
10589 ret = -ENOMEM;
10590 goto err_vectors;
10591 }
10592 }
10593 return ret;
10594
10595err_vectors:
10596 kfree(vsi->tx_rings);
10597 return ret;
10598}
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10609{
10610 int ret = -ENODEV;
10611 struct i40e_vsi *vsi;
10612 int vsi_idx;
10613 int i;
10614
10615
10616 mutex_lock(&pf->switch_mutex);
10617
10618
10619
10620
10621
10622
10623
10624 i = pf->next_vsi;
10625 while (i < pf->num_alloc_vsi && pf->vsi[i])
10626 i++;
10627 if (i >= pf->num_alloc_vsi) {
10628 i = 0;
10629 while (i < pf->next_vsi && pf->vsi[i])
10630 i++;
10631 }
10632
10633 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10634 vsi_idx = i;
10635 } else {
10636 ret = -ENODEV;
10637 goto unlock_pf;
10638 }
10639 pf->next_vsi = ++i;
10640
10641 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10642 if (!vsi) {
10643 ret = -ENOMEM;
10644 goto unlock_pf;
10645 }
10646 vsi->type = type;
10647 vsi->back = pf;
10648 set_bit(__I40E_VSI_DOWN, vsi->state);
10649 vsi->flags = 0;
10650 vsi->idx = vsi_idx;
10651 vsi->int_rate_limit = 0;
10652 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10653 pf->rss_table_size : 64;
10654 vsi->netdev_registered = false;
10655 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10656 hash_init(vsi->mac_filter_hash);
10657 vsi->irqs_ready = false;
10658
10659 if (type == I40E_VSI_MAIN) {
10660 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10661 if (!vsi->af_xdp_zc_qps)
10662 goto err_rings;
10663 }
10664
10665 ret = i40e_set_num_rings_in_vsi(vsi);
10666 if (ret)
10667 goto err_rings;
10668
10669 ret = i40e_vsi_alloc_arrays(vsi, true);
10670 if (ret)
10671 goto err_rings;
10672
10673
10674 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10675
10676
10677 spin_lock_init(&vsi->mac_filter_hash_lock);
10678 pf->vsi[vsi_idx] = vsi;
10679 ret = vsi_idx;
10680 goto unlock_pf;
10681
10682err_rings:
10683 bitmap_free(vsi->af_xdp_zc_qps);
10684 pf->next_vsi = i - 1;
10685 kfree(vsi);
10686unlock_pf:
10687 mutex_unlock(&pf->switch_mutex);
10688 return ret;
10689}
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10700{
10701
10702 if (free_qvectors) {
10703 kfree(vsi->q_vectors);
10704 vsi->q_vectors = NULL;
10705 }
10706 kfree(vsi->tx_rings);
10707 vsi->tx_rings = NULL;
10708 vsi->rx_rings = NULL;
10709 vsi->xdp_rings = NULL;
10710}
10711
10712
10713
10714
10715
10716
10717static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10718{
10719 if (!vsi)
10720 return;
10721
10722 kfree(vsi->rss_hkey_user);
10723 vsi->rss_hkey_user = NULL;
10724
10725 kfree(vsi->rss_lut_user);
10726 vsi->rss_lut_user = NULL;
10727}
10728
10729
10730
10731
10732
10733static int i40e_vsi_clear(struct i40e_vsi *vsi)
10734{
10735 struct i40e_pf *pf;
10736
10737 if (!vsi)
10738 return 0;
10739
10740 if (!vsi->back)
10741 goto free_vsi;
10742 pf = vsi->back;
10743
10744 mutex_lock(&pf->switch_mutex);
10745 if (!pf->vsi[vsi->idx]) {
10746 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10747 vsi->idx, vsi->idx, vsi->type);
10748 goto unlock_vsi;
10749 }
10750
10751 if (pf->vsi[vsi->idx] != vsi) {
10752 dev_err(&pf->pdev->dev,
10753 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10754 pf->vsi[vsi->idx]->idx,
10755 pf->vsi[vsi->idx]->type,
10756 vsi->idx, vsi->type);
10757 goto unlock_vsi;
10758 }
10759
10760
10761 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10762 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10763
10764 bitmap_free(vsi->af_xdp_zc_qps);
10765 i40e_vsi_free_arrays(vsi, true);
10766 i40e_clear_rss_config_user(vsi);
10767
10768 pf->vsi[vsi->idx] = NULL;
10769 if (vsi->idx < pf->next_vsi)
10770 pf->next_vsi = vsi->idx;
10771
10772unlock_vsi:
10773 mutex_unlock(&pf->switch_mutex);
10774free_vsi:
10775 kfree(vsi);
10776
10777 return 0;
10778}
10779
10780
10781
10782
10783
10784static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10785{
10786 int i;
10787
10788 if (vsi->tx_rings && vsi->tx_rings[0]) {
10789 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10790 kfree_rcu(vsi->tx_rings[i], rcu);
10791 WRITE_ONCE(vsi->tx_rings[i], NULL);
10792 WRITE_ONCE(vsi->rx_rings[i], NULL);
10793 if (vsi->xdp_rings)
10794 WRITE_ONCE(vsi->xdp_rings[i], NULL);
10795 }
10796 }
10797}
10798
10799
10800
10801
10802
10803static int i40e_alloc_rings(struct i40e_vsi *vsi)
10804{
10805 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10806 struct i40e_pf *pf = vsi->back;
10807 struct i40e_ring *ring;
10808
10809
10810 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10811
10812 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10813 if (!ring)
10814 goto err_out;
10815
10816 ring->queue_index = i;
10817 ring->reg_idx = vsi->base_queue + i;
10818 ring->ring_active = false;
10819 ring->vsi = vsi;
10820 ring->netdev = vsi->netdev;
10821 ring->dev = &pf->pdev->dev;
10822 ring->count = vsi->num_tx_desc;
10823 ring->size = 0;
10824 ring->dcb_tc = 0;
10825 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10826 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10827 ring->itr_setting = pf->tx_itr_default;
10828 WRITE_ONCE(vsi->tx_rings[i], ring++);
10829
10830 if (!i40e_enabled_xdp_vsi(vsi))
10831 goto setup_rx;
10832
10833 ring->queue_index = vsi->alloc_queue_pairs + i;
10834 ring->reg_idx = vsi->base_queue + ring->queue_index;
10835 ring->ring_active = false;
10836 ring->vsi = vsi;
10837 ring->netdev = NULL;
10838 ring->dev = &pf->pdev->dev;
10839 ring->count = vsi->num_tx_desc;
10840 ring->size = 0;
10841 ring->dcb_tc = 0;
10842 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10843 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10844 set_ring_xdp(ring);
10845 ring->itr_setting = pf->tx_itr_default;
10846 WRITE_ONCE(vsi->xdp_rings[i], ring++);
10847
10848setup_rx:
10849 ring->queue_index = i;
10850 ring->reg_idx = vsi->base_queue + i;
10851 ring->ring_active = false;
10852 ring->vsi = vsi;
10853 ring->netdev = vsi->netdev;
10854 ring->dev = &pf->pdev->dev;
10855 ring->count = vsi->num_rx_desc;
10856 ring->size = 0;
10857 ring->dcb_tc = 0;
10858 ring->itr_setting = pf->rx_itr_default;
10859 WRITE_ONCE(vsi->rx_rings[i], ring);
10860 }
10861
10862 return 0;
10863
10864err_out:
10865 i40e_vsi_clear_rings(vsi);
10866 return -ENOMEM;
10867}
10868
10869
10870
10871
10872
10873
10874
10875
10876static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10877{
10878 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10879 I40E_MIN_MSIX, vectors);
10880 if (vectors < 0) {
10881 dev_info(&pf->pdev->dev,
10882 "MSI-X vector reservation failed: %d\n", vectors);
10883 vectors = 0;
10884 }
10885
10886 return vectors;
10887}
10888
10889
10890
10891
10892
10893
10894
10895
10896
10897static int i40e_init_msix(struct i40e_pf *pf)
10898{
10899 struct i40e_hw *hw = &pf->hw;
10900 int cpus, extra_vectors;
10901 int vectors_left;
10902 int v_budget, i;
10903 int v_actual;
10904 int iwarp_requested = 0;
10905
10906 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10907 return -ENODEV;
10908
10909
10910
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924 vectors_left = hw->func_caps.num_msix_vectors;
10925 v_budget = 0;
10926
10927
10928 if (vectors_left) {
10929 v_budget++;
10930 vectors_left--;
10931 }
10932
10933
10934
10935
10936
10937
10938
10939
10940 cpus = num_online_cpus();
10941 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10942 vectors_left -= pf->num_lan_msix;
10943
10944
10945 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10946 if (vectors_left) {
10947 pf->num_fdsb_msix = 1;
10948 v_budget++;
10949 vectors_left--;
10950 } else {
10951 pf->num_fdsb_msix = 0;
10952 }
10953 }
10954
10955
10956 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10957 iwarp_requested = pf->num_iwarp_msix;
10958
10959 if (!vectors_left)
10960 pf->num_iwarp_msix = 0;
10961 else if (vectors_left < pf->num_iwarp_msix)
10962 pf->num_iwarp_msix = 1;
10963 v_budget += pf->num_iwarp_msix;
10964 vectors_left -= pf->num_iwarp_msix;
10965 }
10966
10967
10968 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10969 if (!vectors_left) {
10970 pf->num_vmdq_msix = 0;
10971 pf->num_vmdq_qps = 0;
10972 } else {
10973 int vmdq_vecs_wanted =
10974 pf->num_vmdq_vsis * pf->num_vmdq_qps;
10975 int vmdq_vecs =
10976 min_t(int, vectors_left, vmdq_vecs_wanted);
10977
10978
10979
10980
10981
10982
10983
10984 if (vectors_left < vmdq_vecs_wanted) {
10985 pf->num_vmdq_qps = 1;
10986 vmdq_vecs_wanted = pf->num_vmdq_vsis;
10987 vmdq_vecs = min_t(int,
10988 vectors_left,
10989 vmdq_vecs_wanted);
10990 }
10991 pf->num_vmdq_msix = pf->num_vmdq_qps;
10992
10993 v_budget += vmdq_vecs;
10994 vectors_left -= vmdq_vecs;
10995 }
10996 }
10997
10998
10999
11000
11001
11002
11003
11004
11005
11006
11007 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11008 pf->num_lan_msix += extra_vectors;
11009 vectors_left -= extra_vectors;
11010
11011 WARN(vectors_left < 0,
11012 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11013
11014 v_budget += pf->num_lan_msix;
11015 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11016 GFP_KERNEL);
11017 if (!pf->msix_entries)
11018 return -ENOMEM;
11019
11020 for (i = 0; i < v_budget; i++)
11021 pf->msix_entries[i].entry = i;
11022 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11023
11024 if (v_actual < I40E_MIN_MSIX) {
11025 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11026 kfree(pf->msix_entries);
11027 pf->msix_entries = NULL;
11028 pci_disable_msix(pf->pdev);
11029 return -ENODEV;
11030
11031 } else if (v_actual == I40E_MIN_MSIX) {
11032
11033 pf->num_vmdq_vsis = 0;
11034 pf->num_vmdq_qps = 0;
11035 pf->num_lan_qps = 1;
11036 pf->num_lan_msix = 1;
11037
11038 } else if (v_actual != v_budget) {
11039
11040
11041
11042
11043
11044 int vec;
11045
11046 dev_info(&pf->pdev->dev,
11047 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11048 v_actual, v_budget);
11049
11050 vec = v_actual - 1;
11051
11052
11053 pf->num_vmdq_msix = 1;
11054 pf->num_vmdq_vsis = 1;
11055 pf->num_vmdq_qps = 1;
11056
11057
11058 switch (vec) {
11059 case 2:
11060 pf->num_lan_msix = 1;
11061 break;
11062 case 3:
11063 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11064 pf->num_lan_msix = 1;
11065 pf->num_iwarp_msix = 1;
11066 } else {
11067 pf->num_lan_msix = 2;
11068 }
11069 break;
11070 default:
11071 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11072 pf->num_iwarp_msix = min_t(int, (vec / 3),
11073 iwarp_requested);
11074 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11075 I40E_DEFAULT_NUM_VMDQ_VSI);
11076 } else {
11077 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11078 I40E_DEFAULT_NUM_VMDQ_VSI);
11079 }
11080 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11081 pf->num_fdsb_msix = 1;
11082 vec--;
11083 }
11084 pf->num_lan_msix = min_t(int,
11085 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11086 pf->num_lan_msix);
11087 pf->num_lan_qps = pf->num_lan_msix;
11088 break;
11089 }
11090 }
11091
11092 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11093 (pf->num_fdsb_msix == 0)) {
11094 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11095 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11096 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11097 }
11098 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11099 (pf->num_vmdq_msix == 0)) {
11100 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11101 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11102 }
11103
11104 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11105 (pf->num_iwarp_msix == 0)) {
11106 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11107 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11108 }
11109 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11110 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11111 pf->num_lan_msix,
11112 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11113 pf->num_fdsb_msix,
11114 pf->num_iwarp_msix);
11115
11116 return v_actual;
11117}
11118
11119
11120
11121
11122
11123
11124
11125
11126static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11127{
11128 struct i40e_q_vector *q_vector;
11129
11130
11131 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11132 if (!q_vector)
11133 return -ENOMEM;
11134
11135 q_vector->vsi = vsi;
11136 q_vector->v_idx = v_idx;
11137 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11138
11139 if (vsi->netdev)
11140 netif_napi_add(vsi->netdev, &q_vector->napi,
11141 i40e_napi_poll, NAPI_POLL_WEIGHT);
11142
11143
11144 vsi->q_vectors[v_idx] = q_vector;
11145
11146 return 0;
11147}
11148
11149
11150
11151
11152
11153
11154
11155
11156static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11157{
11158 struct i40e_pf *pf = vsi->back;
11159 int err, v_idx, num_q_vectors;
11160
11161
11162 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11163 num_q_vectors = vsi->num_q_vectors;
11164 else if (vsi == pf->vsi[pf->lan_vsi])
11165 num_q_vectors = 1;
11166 else
11167 return -EINVAL;
11168
11169 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11170 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11171 if (err)
11172 goto err_out;
11173 }
11174
11175 return 0;
11176
11177err_out:
11178 while (v_idx--)
11179 i40e_free_q_vector(vsi, v_idx);
11180
11181 return err;
11182}
11183
11184
11185
11186
11187
11188static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11189{
11190 int vectors = 0;
11191 ssize_t size;
11192
11193 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11194 vectors = i40e_init_msix(pf);
11195 if (vectors < 0) {
11196 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11197 I40E_FLAG_IWARP_ENABLED |
11198 I40E_FLAG_RSS_ENABLED |
11199 I40E_FLAG_DCB_CAPABLE |
11200 I40E_FLAG_DCB_ENABLED |
11201 I40E_FLAG_SRIOV_ENABLED |
11202 I40E_FLAG_FD_SB_ENABLED |
11203 I40E_FLAG_FD_ATR_ENABLED |
11204 I40E_FLAG_VMDQ_ENABLED);
11205 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11206
11207
11208 i40e_determine_queue_usage(pf);
11209 }
11210 }
11211
11212 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11213 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11214 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11215 vectors = pci_enable_msi(pf->pdev);
11216 if (vectors < 0) {
11217 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11218 vectors);
11219 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11220 }
11221 vectors = 1;
11222 }
11223
11224 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11225 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11226
11227
11228 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11229 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11230 if (!pf->irq_pile)
11231 return -ENOMEM;
11232
11233 pf->irq_pile->num_entries = vectors;
11234 pf->irq_pile->search_hint = 0;
11235
11236
11237 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11238
11239 return 0;
11240}
11241
11242
11243
11244
11245
11246
11247
11248
11249
11250static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11251{
11252 int err, i;
11253
11254
11255
11256
11257
11258 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11259
11260 err = i40e_init_interrupt_scheme(pf);
11261 if (err)
11262 return err;
11263
11264
11265
11266
11267 for (i = 0; i < pf->num_alloc_vsi; i++) {
11268 if (pf->vsi[i]) {
11269 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11270 if (err)
11271 goto err_unwind;
11272 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11273 }
11274 }
11275
11276 err = i40e_setup_misc_vector(pf);
11277 if (err)
11278 goto err_unwind;
11279
11280 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11281 i40e_client_update_msix_info(pf);
11282
11283 return 0;
11284
11285err_unwind:
11286 while (i--) {
11287 if (pf->vsi[i])
11288 i40e_vsi_free_q_vectors(pf->vsi[i]);
11289 }
11290
11291 return err;
11292}
11293
11294
11295
11296
11297
11298
11299
11300
11301
11302
11303
11304static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11305{
11306 int err;
11307
11308 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11309 err = i40e_setup_misc_vector(pf);
11310
11311 if (err) {
11312 dev_info(&pf->pdev->dev,
11313 "MSI-X misc vector request failed, error %d\n",
11314 err);
11315 return err;
11316 }
11317 } else {
11318 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11319
11320 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11321 pf->int_name, pf);
11322
11323 if (err) {
11324 dev_info(&pf->pdev->dev,
11325 "MSI/legacy misc vector request failed, error %d\n",
11326 err);
11327 return err;
11328 }
11329 i40e_enable_misc_int_causes(pf);
11330 i40e_irq_dynamic_enable_icr0(pf);
11331 }
11332
11333 return 0;
11334}
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344static int i40e_setup_misc_vector(struct i40e_pf *pf)
11345{
11346 struct i40e_hw *hw = &pf->hw;
11347 int err = 0;
11348
11349
11350 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11351 err = request_irq(pf->msix_entries[0].vector,
11352 i40e_intr, 0, pf->int_name, pf);
11353 if (err) {
11354 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11355 dev_info(&pf->pdev->dev,
11356 "request_irq for %s failed: %d\n",
11357 pf->int_name, err);
11358 return -EFAULT;
11359 }
11360 }
11361
11362 i40e_enable_misc_int_causes(pf);
11363
11364
11365 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11366 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11367
11368 i40e_flush(hw);
11369
11370 i40e_irq_dynamic_enable_icr0(pf);
11371
11372 return err;
11373}
11374
11375
11376
11377
11378
11379
11380
11381
11382
11383
11384static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11385 u8 *lut, u16 lut_size)
11386{
11387 struct i40e_pf *pf = vsi->back;
11388 struct i40e_hw *hw = &pf->hw;
11389 int ret = 0;
11390
11391 if (seed) {
11392 ret = i40e_aq_get_rss_key(hw, vsi->id,
11393 (struct i40e_aqc_get_set_rss_key_data *)seed);
11394 if (ret) {
11395 dev_info(&pf->pdev->dev,
11396 "Cannot get RSS key, err %s aq_err %s\n",
11397 i40e_stat_str(&pf->hw, ret),
11398 i40e_aq_str(&pf->hw,
11399 pf->hw.aq.asq_last_status));
11400 return ret;
11401 }
11402 }
11403
11404 if (lut) {
11405 bool pf_lut = vsi->type == I40E_VSI_MAIN;
11406
11407 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11408 if (ret) {
11409 dev_info(&pf->pdev->dev,
11410 "Cannot get RSS lut, err %s aq_err %s\n",
11411 i40e_stat_str(&pf->hw, ret),
11412 i40e_aq_str(&pf->hw,
11413 pf->hw.aq.asq_last_status));
11414 return ret;
11415 }
11416 }
11417
11418 return ret;
11419}
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11431 const u8 *lut, u16 lut_size)
11432{
11433 struct i40e_pf *pf = vsi->back;
11434 struct i40e_hw *hw = &pf->hw;
11435 u16 vf_id = vsi->vf_id;
11436 u8 i;
11437
11438
11439 if (seed) {
11440 u32 *seed_dw = (u32 *)seed;
11441
11442 if (vsi->type == I40E_VSI_MAIN) {
11443 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11444 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11445 } else if (vsi->type == I40E_VSI_SRIOV) {
11446 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11447 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11448 } else {
11449 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11450 }
11451 }
11452
11453 if (lut) {
11454 u32 *lut_dw = (u32 *)lut;
11455
11456 if (vsi->type == I40E_VSI_MAIN) {
11457 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11458 return -EINVAL;
11459 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11460 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11461 } else if (vsi->type == I40E_VSI_SRIOV) {
11462 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11463 return -EINVAL;
11464 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11465 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11466 } else {
11467 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11468 }
11469 }
11470 i40e_flush(hw);
11471
11472 return 0;
11473}
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483
11484static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11485 u8 *lut, u16 lut_size)
11486{
11487 struct i40e_pf *pf = vsi->back;
11488 struct i40e_hw *hw = &pf->hw;
11489 u16 i;
11490
11491 if (seed) {
11492 u32 *seed_dw = (u32 *)seed;
11493
11494 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11495 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11496 }
11497 if (lut) {
11498 u32 *lut_dw = (u32 *)lut;
11499
11500 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11501 return -EINVAL;
11502 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11503 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11504 }
11505
11506 return 0;
11507}
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11519{
11520 struct i40e_pf *pf = vsi->back;
11521
11522 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11523 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11524 else
11525 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11526}
11527
11528
11529
11530
11531
11532
11533
11534
11535
11536
11537int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11538{
11539 struct i40e_pf *pf = vsi->back;
11540
11541 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11542 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11543 else
11544 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11545}
11546
11547
11548
11549
11550
11551
11552
11553
11554void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11555 u16 rss_table_size, u16 rss_size)
11556{
11557 u16 i;
11558
11559 for (i = 0; i < rss_table_size; i++)
11560 lut[i] = i % rss_size;
11561}
11562
11563
11564
11565
11566
11567static int i40e_pf_config_rss(struct i40e_pf *pf)
11568{
11569 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11570 u8 seed[I40E_HKEY_ARRAY_SIZE];
11571 u8 *lut;
11572 struct i40e_hw *hw = &pf->hw;
11573 u32 reg_val;
11574 u64 hena;
11575 int ret;
11576
11577
11578 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11579 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11580 hena |= i40e_pf_get_default_rss_hena(pf);
11581
11582 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11583 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11584
11585
11586 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11587 reg_val = (pf->rss_table_size == 512) ?
11588 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11589 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11590 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11591
11592
11593 if (!vsi->rss_size) {
11594 u16 qcount;
11595
11596
11597
11598
11599
11600 qcount = vsi->num_queue_pairs /
11601 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11602 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11603 }
11604 if (!vsi->rss_size)
11605 return -EINVAL;
11606
11607 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11608 if (!lut)
11609 return -ENOMEM;
11610
11611
11612 if (vsi->rss_lut_user)
11613 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11614 else
11615 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11616
11617
11618
11619
11620 if (vsi->rss_hkey_user)
11621 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11622 else
11623 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11624 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11625 kfree(lut);
11626
11627 return ret;
11628}
11629
11630
11631
11632
11633
11634
11635
11636
11637
11638
11639int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11640{
11641 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11642 int new_rss_size;
11643
11644 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11645 return 0;
11646
11647 queue_count = min_t(int, queue_count, num_online_cpus());
11648 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11649
11650 if (queue_count != vsi->num_queue_pairs) {
11651 u16 qcount;
11652
11653 vsi->req_queue_pairs = queue_count;
11654 i40e_prep_for_reset(pf, true);
11655
11656 pf->alloc_rss_size = new_rss_size;
11657
11658 i40e_reset_and_rebuild(pf, true, true);
11659
11660
11661
11662
11663 if (queue_count < vsi->rss_size) {
11664 i40e_clear_rss_config_user(vsi);
11665 dev_dbg(&pf->pdev->dev,
11666 "discard user configured hash keys and lut\n");
11667 }
11668
11669
11670 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11671 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11672
11673 i40e_pf_config_rss(pf);
11674 }
11675 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11676 vsi->req_queue_pairs, pf->rss_size_max);
11677 return pf->alloc_rss_size;
11678}
11679
11680
11681
11682
11683
11684i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11685{
11686 i40e_status status;
11687 bool min_valid, max_valid;
11688 u32 max_bw, min_bw;
11689
11690 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11691 &min_valid, &max_valid);
11692
11693 if (!status) {
11694 if (min_valid)
11695 pf->min_bw = min_bw;
11696 if (max_valid)
11697 pf->max_bw = max_bw;
11698 }
11699
11700 return status;
11701}
11702
11703
11704
11705
11706
11707i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11708{
11709 struct i40e_aqc_configure_partition_bw_data bw_data;
11710 i40e_status status;
11711
11712
11713 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11714 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11715 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11716
11717
11718 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11719
11720 return status;
11721}
11722
11723
11724
11725
11726
11727i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11728{
11729
11730 enum i40e_admin_queue_err last_aq_status;
11731 i40e_status ret;
11732 u16 nvm_word;
11733
11734 if (pf->hw.partition_id != 1) {
11735 dev_info(&pf->pdev->dev,
11736 "Commit BW only works on partition 1! This is partition %d",
11737 pf->hw.partition_id);
11738 ret = I40E_NOT_SUPPORTED;
11739 goto bw_commit_out;
11740 }
11741
11742
11743 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11744 last_aq_status = pf->hw.aq.asq_last_status;
11745 if (ret) {
11746 dev_info(&pf->pdev->dev,
11747 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11748 i40e_stat_str(&pf->hw, ret),
11749 i40e_aq_str(&pf->hw, last_aq_status));
11750 goto bw_commit_out;
11751 }
11752
11753
11754 ret = i40e_aq_read_nvm(&pf->hw,
11755 I40E_SR_NVM_CONTROL_WORD,
11756 0x10, sizeof(nvm_word), &nvm_word,
11757 false, NULL);
11758
11759
11760
11761 last_aq_status = pf->hw.aq.asq_last_status;
11762 i40e_release_nvm(&pf->hw);
11763 if (ret) {
11764 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11765 i40e_stat_str(&pf->hw, ret),
11766 i40e_aq_str(&pf->hw, last_aq_status));
11767 goto bw_commit_out;
11768 }
11769
11770
11771 msleep(50);
11772
11773
11774 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11775 last_aq_status = pf->hw.aq.asq_last_status;
11776 if (ret) {
11777 dev_info(&pf->pdev->dev,
11778 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11779 i40e_stat_str(&pf->hw, ret),
11780 i40e_aq_str(&pf->hw, last_aq_status));
11781 goto bw_commit_out;
11782 }
11783
11784
11785
11786
11787 ret = i40e_aq_update_nvm(&pf->hw,
11788 I40E_SR_NVM_CONTROL_WORD,
11789 0x10, sizeof(nvm_word),
11790 &nvm_word, true, 0, NULL);
11791
11792
11793
11794 last_aq_status = pf->hw.aq.asq_last_status;
11795 i40e_release_nvm(&pf->hw);
11796 if (ret)
11797 dev_info(&pf->pdev->dev,
11798 "BW settings NOT SAVED, err %s aq_err %s\n",
11799 i40e_stat_str(&pf->hw, ret),
11800 i40e_aq_str(&pf->hw, last_aq_status));
11801bw_commit_out:
11802
11803 return ret;
11804}
11805
11806
11807
11808
11809
11810
11811static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
11812{
11813#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
11814#define I40E_FEATURES_ENABLE_PTR 0x2A
11815#define I40E_CURRENT_SETTING_PTR 0x2B
11816#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
11817#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
11818#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
11819#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
11820 i40e_status read_status = I40E_SUCCESS;
11821 u16 sr_emp_sr_settings_ptr = 0;
11822 u16 features_enable = 0;
11823 u16 link_behavior = 0;
11824 bool ret = false;
11825
11826 read_status = i40e_read_nvm_word(&pf->hw,
11827 I40E_SR_EMP_SR_SETTINGS_PTR,
11828 &sr_emp_sr_settings_ptr);
11829 if (read_status)
11830 goto err_nvm;
11831 read_status = i40e_read_nvm_word(&pf->hw,
11832 sr_emp_sr_settings_ptr +
11833 I40E_FEATURES_ENABLE_PTR,
11834 &features_enable);
11835 if (read_status)
11836 goto err_nvm;
11837 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
11838 read_status = i40e_read_nvm_module_data(&pf->hw,
11839 I40E_SR_EMP_SR_SETTINGS_PTR,
11840 I40E_CURRENT_SETTING_PTR,
11841 I40E_LINK_BEHAVIOR_WORD_OFFSET,
11842 I40E_LINK_BEHAVIOR_WORD_LENGTH,
11843 &link_behavior);
11844 if (read_status)
11845 goto err_nvm;
11846 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
11847 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
11848 }
11849 return ret;
11850
11851err_nvm:
11852 dev_warn(&pf->pdev->dev,
11853 "total-port-shutdown feature is off due to read nvm error: %s\n",
11854 i40e_stat_str(&pf->hw, read_status));
11855 return ret;
11856}
11857
11858
11859
11860
11861
11862
11863
11864
11865
11866static int i40e_sw_init(struct i40e_pf *pf)
11867{
11868 int err = 0;
11869 int size;
11870
11871
11872 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11873 I40E_FLAG_MSI_ENABLED |
11874 I40E_FLAG_MSIX_ENABLED;
11875
11876
11877 pf->rx_itr_default = I40E_ITR_RX_DEF;
11878 pf->tx_itr_default = I40E_ITR_TX_DEF;
11879
11880
11881
11882
11883 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11884 pf->alloc_rss_size = 1;
11885 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11886 pf->rss_size_max = min_t(int, pf->rss_size_max,
11887 pf->hw.func_caps.num_tx_qp);
11888 if (pf->hw.func_caps.rss) {
11889 pf->flags |= I40E_FLAG_RSS_ENABLED;
11890 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11891 num_online_cpus());
11892 }
11893
11894
11895 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11896 pf->flags |= I40E_FLAG_MFP_ENABLED;
11897 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11898 if (i40e_get_partition_bw_setting(pf)) {
11899 dev_warn(&pf->pdev->dev,
11900 "Could not get partition bw settings\n");
11901 } else {
11902 dev_info(&pf->pdev->dev,
11903 "Partition BW Min = %8.8x, Max = %8.8x\n",
11904 pf->min_bw, pf->max_bw);
11905
11906
11907 i40e_set_partition_bw_setting(pf);
11908 }
11909 }
11910
11911 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11912 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11913 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11914 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11915 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11916 pf->hw.num_partitions > 1)
11917 dev_info(&pf->pdev->dev,
11918 "Flow Director Sideband mode Disabled in MFP mode\n");
11919 else
11920 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11921 pf->fdir_pf_filter_count =
11922 pf->hw.func_caps.fd_filters_guaranteed;
11923 pf->hw.fdir_shared_filter_count =
11924 pf->hw.func_caps.fd_filters_best_effort;
11925 }
11926
11927 if (pf->hw.mac.type == I40E_MAC_X722) {
11928 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11929 I40E_HW_128_QP_RSS_CAPABLE |
11930 I40E_HW_ATR_EVICT_CAPABLE |
11931 I40E_HW_WB_ON_ITR_CAPABLE |
11932 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11933 I40E_HW_NO_PCI_LINK_CHECK |
11934 I40E_HW_USE_SET_LLDP_MIB |
11935 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11936 I40E_HW_PTP_L4_CAPABLE |
11937 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11938 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11939
11940#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11941 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11942 I40E_FDEVICT_PCTYPE_DEFAULT) {
11943 dev_warn(&pf->pdev->dev,
11944 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11945 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11946 }
11947 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11948 ((pf->hw.aq.api_maj_ver == 1) &&
11949 (pf->hw.aq.api_min_ver > 4))) {
11950
11951 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11952 }
11953
11954
11955 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11956 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11957
11958 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11959 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11960 (pf->hw.aq.fw_maj_ver < 4))) {
11961 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11962
11963 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11964 }
11965
11966
11967 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11968 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11969 (pf->hw.aq.fw_maj_ver < 4)))
11970 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11971
11972
11973 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11974 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11975 (pf->hw.aq.fw_maj_ver >= 5)))
11976 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11977
11978
11979 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11980 pf->hw.aq.fw_maj_ver >= 6)
11981 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11982
11983 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11984 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11985 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11986 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11987 }
11988
11989 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11990 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11991
11992 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11993 }
11994
11995
11996
11997
11998
11999 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12000 pf->hw.func_caps.npar_enable &&
12001 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12002 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12003
12004#ifdef CONFIG_PCI_IOV
12005 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12006 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12007 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12008 pf->num_req_vfs = min_t(int,
12009 pf->hw.func_caps.num_vfs,
12010 I40E_MAX_VF_COUNT);
12011 }
12012#endif
12013 pf->eeprom_version = 0xDEAD;
12014 pf->lan_veb = I40E_NO_VEB;
12015 pf->lan_vsi = I40E_NO_VSI;
12016
12017
12018 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12019
12020
12021 size = sizeof(struct i40e_lump_tracking)
12022 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12023 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12024 if (!pf->qp_pile) {
12025 err = -ENOMEM;
12026 goto sw_init_done;
12027 }
12028 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12029 pf->qp_pile->search_hint = 0;
12030
12031 pf->tx_timeout_recovery_level = 1;
12032
12033 if (pf->hw.mac.type != I40E_MAC_X722 &&
12034 i40e_is_total_port_shutdown_enabled(pf)) {
12035
12036
12037
12038 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12039 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12040 dev_info(&pf->pdev->dev,
12041 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12042 }
12043 mutex_init(&pf->switch_mutex);
12044
12045sw_init_done:
12046 return err;
12047}
12048
12049
12050
12051
12052
12053
12054
12055
12056bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12057{
12058 bool need_reset = false;
12059
12060
12061
12062
12063 if (features & NETIF_F_NTUPLE) {
12064
12065 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12066 need_reset = true;
12067
12068
12069
12070 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12071 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12072 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12073 }
12074 } else {
12075
12076 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12077 need_reset = true;
12078 i40e_fdir_filter_exit(pf);
12079 }
12080 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12081 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12082 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12083
12084
12085 pf->fd_add_err = 0;
12086 pf->fd_atr_cnt = 0;
12087
12088 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12089 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12090 (I40E_DEBUG_FD & pf->hw.debug_mask))
12091 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12092 }
12093 return need_reset;
12094}
12095
12096
12097
12098
12099
12100static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12101{
12102 struct i40e_pf *pf = vsi->back;
12103 struct i40e_hw *hw = &pf->hw;
12104 u16 vf_id = vsi->vf_id;
12105 u8 i;
12106
12107 if (vsi->type == I40E_VSI_MAIN) {
12108 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12109 wr32(hw, I40E_PFQF_HLUT(i), 0);
12110 } else if (vsi->type == I40E_VSI_SRIOV) {
12111 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12112 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12113 } else {
12114 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12115 }
12116}
12117
12118
12119
12120
12121
12122
12123
12124static int i40e_set_features(struct net_device *netdev,
12125 netdev_features_t features)
12126{
12127 struct i40e_netdev_priv *np = netdev_priv(netdev);
12128 struct i40e_vsi *vsi = np->vsi;
12129 struct i40e_pf *pf = vsi->back;
12130 bool need_reset;
12131
12132 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12133 i40e_pf_config_rss(pf);
12134 else if (!(features & NETIF_F_RXHASH) &&
12135 netdev->features & NETIF_F_RXHASH)
12136 i40e_clear_rss_lut(vsi);
12137
12138 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12139 i40e_vlan_stripping_enable(vsi);
12140 else
12141 i40e_vlan_stripping_disable(vsi);
12142
12143 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12144 dev_err(&pf->pdev->dev,
12145 "Offloaded tc filters active, can't turn hw_tc_offload off");
12146 return -EINVAL;
12147 }
12148
12149 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12150 i40e_del_all_macvlans(vsi);
12151
12152 need_reset = i40e_set_ntuple(pf, features);
12153
12154 if (need_reset)
12155 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12156
12157 return 0;
12158}
12159
12160static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12161 unsigned int table, unsigned int idx,
12162 struct udp_tunnel_info *ti)
12163{
12164 struct i40e_netdev_priv *np = netdev_priv(netdev);
12165 struct i40e_hw *hw = &np->vsi->back->hw;
12166 u8 type, filter_index;
12167 i40e_status ret;
12168
12169 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12170 I40E_AQC_TUNNEL_TYPE_NGE;
12171
12172 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12173 NULL);
12174 if (ret) {
12175 netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12176 i40e_stat_str(hw, ret),
12177 i40e_aq_str(hw, hw->aq.asq_last_status));
12178 return -EIO;
12179 }
12180
12181 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12182 return 0;
12183}
12184
12185static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12186 unsigned int table, unsigned int idx,
12187 struct udp_tunnel_info *ti)
12188{
12189 struct i40e_netdev_priv *np = netdev_priv(netdev);
12190 struct i40e_hw *hw = &np->vsi->back->hw;
12191 i40e_status ret;
12192
12193 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12194 if (ret) {
12195 netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12196 i40e_stat_str(hw, ret),
12197 i40e_aq_str(hw, hw->aq.asq_last_status));
12198 return -EIO;
12199 }
12200
12201 return 0;
12202}
12203
12204static int i40e_get_phys_port_id(struct net_device *netdev,
12205 struct netdev_phys_item_id *ppid)
12206{
12207 struct i40e_netdev_priv *np = netdev_priv(netdev);
12208 struct i40e_pf *pf = np->vsi->back;
12209 struct i40e_hw *hw = &pf->hw;
12210
12211 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12212 return -EOPNOTSUPP;
12213
12214 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12215 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12216
12217 return 0;
12218}
12219
12220
12221
12222
12223
12224
12225
12226
12227
12228
12229
12230static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12231 struct net_device *dev,
12232 const unsigned char *addr, u16 vid,
12233 u16 flags,
12234 struct netlink_ext_ack *extack)
12235{
12236 struct i40e_netdev_priv *np = netdev_priv(dev);
12237 struct i40e_pf *pf = np->vsi->back;
12238 int err = 0;
12239
12240 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12241 return -EOPNOTSUPP;
12242
12243 if (vid) {
12244 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12245 return -EINVAL;
12246 }
12247
12248
12249
12250
12251 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12252 netdev_info(dev, "FDB only supports static addresses\n");
12253 return -EINVAL;
12254 }
12255
12256 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12257 err = dev_uc_add_excl(dev, addr);
12258 else if (is_multicast_ether_addr(addr))
12259 err = dev_mc_add_excl(dev, addr);
12260 else
12261 err = -EINVAL;
12262
12263
12264 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12265 err = 0;
12266
12267 return err;
12268}
12269
12270
12271
12272
12273
12274
12275
12276
12277
12278
12279
12280
12281
12282
12283
12284
12285
12286static int i40e_ndo_bridge_setlink(struct net_device *dev,
12287 struct nlmsghdr *nlh,
12288 u16 flags,
12289 struct netlink_ext_ack *extack)
12290{
12291 struct i40e_netdev_priv *np = netdev_priv(dev);
12292 struct i40e_vsi *vsi = np->vsi;
12293 struct i40e_pf *pf = vsi->back;
12294 struct i40e_veb *veb = NULL;
12295 struct nlattr *attr, *br_spec;
12296 int i, rem;
12297
12298
12299 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12300 return -EOPNOTSUPP;
12301
12302
12303 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12304 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12305 veb = pf->veb[i];
12306 }
12307
12308 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12309
12310 nla_for_each_nested(attr, br_spec, rem) {
12311 __u16 mode;
12312
12313 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12314 continue;
12315
12316 mode = nla_get_u16(attr);
12317 if ((mode != BRIDGE_MODE_VEPA) &&
12318 (mode != BRIDGE_MODE_VEB))
12319 return -EINVAL;
12320
12321
12322 if (!veb) {
12323 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12324 vsi->tc_config.enabled_tc);
12325 if (veb) {
12326 veb->bridge_mode = mode;
12327 i40e_config_bridge_mode(veb);
12328 } else {
12329
12330 return -ENOENT;
12331 }
12332 break;
12333 } else if (mode != veb->bridge_mode) {
12334
12335 veb->bridge_mode = mode;
12336
12337 if (mode == BRIDGE_MODE_VEB)
12338 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12339 else
12340 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12341 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12342 break;
12343 }
12344 }
12345
12346 return 0;
12347}
12348
12349
12350
12351
12352
12353
12354
12355
12356
12357
12358
12359
12360
12361static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12362 struct net_device *dev,
12363 u32 __always_unused filter_mask,
12364 int nlflags)
12365{
12366 struct i40e_netdev_priv *np = netdev_priv(dev);
12367 struct i40e_vsi *vsi = np->vsi;
12368 struct i40e_pf *pf = vsi->back;
12369 struct i40e_veb *veb = NULL;
12370 int i;
12371
12372
12373 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12374 return -EOPNOTSUPP;
12375
12376
12377 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12378 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12379 veb = pf->veb[i];
12380 }
12381
12382 if (!veb)
12383 return 0;
12384
12385 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12386 0, 0, nlflags, filter_mask, NULL);
12387}
12388
12389
12390
12391
12392
12393
12394
12395static netdev_features_t i40e_features_check(struct sk_buff *skb,
12396 struct net_device *dev,
12397 netdev_features_t features)
12398{
12399 size_t len;
12400
12401
12402
12403
12404
12405 if (skb->ip_summed != CHECKSUM_PARTIAL)
12406 return features;
12407
12408
12409
12410
12411 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12412 features &= ~NETIF_F_GSO_MASK;
12413
12414
12415 len = skb_network_header(skb) - skb->data;
12416 if (len & ~(63 * 2))
12417 goto out_err;
12418
12419
12420 len = skb_transport_header(skb) - skb_network_header(skb);
12421 if (len & ~(127 * 4))
12422 goto out_err;
12423
12424 if (skb->encapsulation) {
12425
12426 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12427 if (len & ~(127 * 2))
12428 goto out_err;
12429
12430
12431 len = skb_inner_transport_header(skb) -
12432 skb_inner_network_header(skb);
12433 if (len & ~(127 * 4))
12434 goto out_err;
12435 }
12436
12437
12438
12439
12440
12441
12442 return features;
12443out_err:
12444 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12445}
12446
12447
12448
12449
12450
12451
12452static int i40e_xdp_setup(struct i40e_vsi *vsi,
12453 struct bpf_prog *prog)
12454{
12455 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12456 struct i40e_pf *pf = vsi->back;
12457 struct bpf_prog *old_prog;
12458 bool need_reset;
12459 int i;
12460
12461
12462 if (frame_size > vsi->rx_buf_len)
12463 return -EINVAL;
12464
12465 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12466 return 0;
12467
12468
12469 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12470
12471 if (need_reset)
12472 i40e_prep_for_reset(pf, true);
12473
12474 old_prog = xchg(&vsi->xdp_prog, prog);
12475
12476 if (need_reset) {
12477 if (!prog)
12478
12479 synchronize_rcu();
12480 i40e_reset_and_rebuild(pf, true, true);
12481 }
12482
12483 for (i = 0; i < vsi->num_queue_pairs; i++)
12484 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12485
12486 if (old_prog)
12487 bpf_prog_put(old_prog);
12488
12489
12490
12491
12492 if (need_reset && prog)
12493 for (i = 0; i < vsi->num_queue_pairs; i++)
12494 if (vsi->xdp_rings[i]->xsk_pool)
12495 (void)i40e_xsk_wakeup(vsi->netdev, i,
12496 XDP_WAKEUP_RX);
12497
12498 return 0;
12499}
12500
12501
12502
12503
12504
12505
12506
12507static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12508{
12509 struct i40e_pf *pf = vsi->back;
12510 int timeout = 50;
12511
12512 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12513 timeout--;
12514 if (!timeout)
12515 return -EBUSY;
12516 usleep_range(1000, 2000);
12517 }
12518
12519 return 0;
12520}
12521
12522
12523
12524
12525
12526static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12527{
12528 struct i40e_pf *pf = vsi->back;
12529
12530 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12531}
12532
12533
12534
12535
12536
12537
12538static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12539{
12540 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12541 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12542 memset(&vsi->tx_rings[queue_pair]->stats, 0,
12543 sizeof(vsi->tx_rings[queue_pair]->stats));
12544 if (i40e_enabled_xdp_vsi(vsi)) {
12545 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12546 sizeof(vsi->xdp_rings[queue_pair]->stats));
12547 }
12548}
12549
12550
12551
12552
12553
12554
12555static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12556{
12557 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12558 if (i40e_enabled_xdp_vsi(vsi)) {
12559
12560
12561
12562 synchronize_rcu();
12563 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12564 }
12565 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12566}
12567
12568
12569
12570
12571
12572
12573
12574static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12575 bool enable)
12576{
12577 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12578 struct i40e_q_vector *q_vector = rxr->q_vector;
12579
12580 if (!vsi->netdev)
12581 return;
12582
12583
12584 if (q_vector->rx.ring || q_vector->tx.ring) {
12585 if (enable)
12586 napi_enable(&q_vector->napi);
12587 else
12588 napi_disable(&q_vector->napi);
12589 }
12590}
12591
12592
12593
12594
12595
12596
12597
12598
12599
12600static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12601 bool enable)
12602{
12603 struct i40e_pf *pf = vsi->back;
12604 int pf_q, ret = 0;
12605
12606 pf_q = vsi->base_queue + queue_pair;
12607 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12608 false , enable);
12609 if (ret) {
12610 dev_info(&pf->pdev->dev,
12611 "VSI seid %d Tx ring %d %sable timeout\n",
12612 vsi->seid, pf_q, (enable ? "en" : "dis"));
12613 return ret;
12614 }
12615
12616 i40e_control_rx_q(pf, pf_q, enable);
12617 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12618 if (ret) {
12619 dev_info(&pf->pdev->dev,
12620 "VSI seid %d Rx ring %d %sable timeout\n",
12621 vsi->seid, pf_q, (enable ? "en" : "dis"));
12622 return ret;
12623 }
12624
12625
12626
12627
12628 if (!enable)
12629 mdelay(50);
12630
12631 if (!i40e_enabled_xdp_vsi(vsi))
12632 return ret;
12633
12634 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12635 pf_q + vsi->alloc_queue_pairs,
12636 true , enable);
12637 if (ret) {
12638 dev_info(&pf->pdev->dev,
12639 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12640 vsi->seid, pf_q, (enable ? "en" : "dis"));
12641 }
12642
12643 return ret;
12644}
12645
12646
12647
12648
12649
12650
12651static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12652{
12653 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12654 struct i40e_pf *pf = vsi->back;
12655 struct i40e_hw *hw = &pf->hw;
12656
12657
12658 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12659 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12660 else
12661 i40e_irq_dynamic_enable_icr0(pf);
12662
12663 i40e_flush(hw);
12664}
12665
12666
12667
12668
12669
12670
12671static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12672{
12673 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12674 struct i40e_pf *pf = vsi->back;
12675 struct i40e_hw *hw = &pf->hw;
12676
12677
12678
12679
12680
12681
12682
12683 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12684 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12685
12686 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12687 i40e_flush(hw);
12688 synchronize_irq(pf->msix_entries[intpf].vector);
12689 } else {
12690
12691 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12692 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12693 i40e_flush(hw);
12694 synchronize_irq(pf->pdev->irq);
12695 }
12696}
12697
12698
12699
12700
12701
12702
12703
12704
12705int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12706{
12707 int err;
12708
12709 err = i40e_enter_busy_conf(vsi);
12710 if (err)
12711 return err;
12712
12713 i40e_queue_pair_disable_irq(vsi, queue_pair);
12714 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false );
12715 i40e_queue_pair_toggle_napi(vsi, queue_pair, false );
12716 i40e_queue_pair_clean_rings(vsi, queue_pair);
12717 i40e_queue_pair_reset_stats(vsi, queue_pair);
12718
12719 return err;
12720}
12721
12722
12723
12724
12725
12726
12727
12728
12729int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12730{
12731 int err;
12732
12733 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12734 if (err)
12735 return err;
12736
12737 if (i40e_enabled_xdp_vsi(vsi)) {
12738 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12739 if (err)
12740 return err;
12741 }
12742
12743 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12744 if (err)
12745 return err;
12746
12747 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true );
12748 i40e_queue_pair_toggle_napi(vsi, queue_pair, true );
12749 i40e_queue_pair_enable_irq(vsi, queue_pair);
12750
12751 i40e_exit_busy_conf(vsi);
12752
12753 return err;
12754}
12755
12756
12757
12758
12759
12760
12761static int i40e_xdp(struct net_device *dev,
12762 struct netdev_bpf *xdp)
12763{
12764 struct i40e_netdev_priv *np = netdev_priv(dev);
12765 struct i40e_vsi *vsi = np->vsi;
12766
12767 if (vsi->type != I40E_VSI_MAIN)
12768 return -EINVAL;
12769
12770 switch (xdp->command) {
12771 case XDP_SETUP_PROG:
12772 return i40e_xdp_setup(vsi, xdp->prog);
12773 case XDP_SETUP_XSK_POOL:
12774 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
12775 xdp->xsk.queue_id);
12776 default:
12777 return -EINVAL;
12778 }
12779}
12780
12781static const struct net_device_ops i40e_netdev_ops = {
12782 .ndo_open = i40e_open,
12783 .ndo_stop = i40e_close,
12784 .ndo_start_xmit = i40e_lan_xmit_frame,
12785 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12786 .ndo_set_rx_mode = i40e_set_rx_mode,
12787 .ndo_validate_addr = eth_validate_addr,
12788 .ndo_set_mac_address = i40e_set_mac,
12789 .ndo_change_mtu = i40e_change_mtu,
12790 .ndo_do_ioctl = i40e_ioctl,
12791 .ndo_tx_timeout = i40e_tx_timeout,
12792 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12793 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12794#ifdef CONFIG_NET_POLL_CONTROLLER
12795 .ndo_poll_controller = i40e_netpoll,
12796#endif
12797 .ndo_setup_tc = __i40e_setup_tc,
12798 .ndo_set_features = i40e_set_features,
12799 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12800 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12801 .ndo_get_vf_stats = i40e_get_vf_stats,
12802 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12803 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12804 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12805 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12806 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12807 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
12808 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
12809 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12810 .ndo_fdb_add = i40e_ndo_fdb_add,
12811 .ndo_features_check = i40e_features_check,
12812 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12813 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12814 .ndo_bpf = i40e_xdp,
12815 .ndo_xdp_xmit = i40e_xdp_xmit,
12816 .ndo_xsk_wakeup = i40e_xsk_wakeup,
12817 .ndo_dfwd_add_station = i40e_fwd_add,
12818 .ndo_dfwd_del_station = i40e_fwd_del,
12819};
12820
12821
12822
12823
12824
12825
12826
12827static int i40e_config_netdev(struct i40e_vsi *vsi)
12828{
12829 struct i40e_pf *pf = vsi->back;
12830 struct i40e_hw *hw = &pf->hw;
12831 struct i40e_netdev_priv *np;
12832 struct net_device *netdev;
12833 u8 broadcast[ETH_ALEN];
12834 u8 mac_addr[ETH_ALEN];
12835 int etherdev_size;
12836 netdev_features_t hw_enc_features;
12837 netdev_features_t hw_features;
12838
12839 etherdev_size = sizeof(struct i40e_netdev_priv);
12840 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12841 if (!netdev)
12842 return -ENOMEM;
12843
12844 vsi->netdev = netdev;
12845 np = netdev_priv(netdev);
12846 np->vsi = vsi;
12847
12848 hw_enc_features = NETIF_F_SG |
12849 NETIF_F_IP_CSUM |
12850 NETIF_F_IPV6_CSUM |
12851 NETIF_F_HIGHDMA |
12852 NETIF_F_SOFT_FEATURES |
12853 NETIF_F_TSO |
12854 NETIF_F_TSO_ECN |
12855 NETIF_F_TSO6 |
12856 NETIF_F_GSO_GRE |
12857 NETIF_F_GSO_GRE_CSUM |
12858 NETIF_F_GSO_PARTIAL |
12859 NETIF_F_GSO_IPXIP4 |
12860 NETIF_F_GSO_IPXIP6 |
12861 NETIF_F_GSO_UDP_TUNNEL |
12862 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12863 NETIF_F_GSO_UDP_L4 |
12864 NETIF_F_SCTP_CRC |
12865 NETIF_F_RXHASH |
12866 NETIF_F_RXCSUM |
12867 0;
12868
12869 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12870 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12871
12872 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
12873
12874 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12875
12876 netdev->hw_enc_features |= hw_enc_features;
12877
12878
12879 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12880
12881
12882 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
12883
12884 hw_features = hw_enc_features |
12885 NETIF_F_HW_VLAN_CTAG_TX |
12886 NETIF_F_HW_VLAN_CTAG_RX;
12887
12888 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12889 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12890
12891 netdev->hw_features |= hw_features;
12892
12893 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12894 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12895
12896 if (vsi->type == I40E_VSI_MAIN) {
12897 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12898 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12899
12900
12901
12902
12903
12904
12905
12906
12907
12908
12909 i40e_rm_default_mac_filter(vsi, mac_addr);
12910 spin_lock_bh(&vsi->mac_filter_hash_lock);
12911 i40e_add_mac_filter(vsi, mac_addr);
12912 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12913 } else {
12914
12915
12916
12917
12918
12919 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12920 IFNAMSIZ - 4,
12921 pf->vsi[pf->lan_vsi]->netdev->name);
12922 eth_random_addr(mac_addr);
12923
12924 spin_lock_bh(&vsi->mac_filter_hash_lock);
12925 i40e_add_mac_filter(vsi, mac_addr);
12926 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12927 }
12928
12929
12930
12931
12932
12933
12934
12935
12936
12937
12938
12939
12940
12941
12942 eth_broadcast_addr(broadcast);
12943 spin_lock_bh(&vsi->mac_filter_hash_lock);
12944 i40e_add_mac_filter(vsi, broadcast);
12945 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12946
12947 ether_addr_copy(netdev->dev_addr, mac_addr);
12948 ether_addr_copy(netdev->perm_addr, mac_addr);
12949
12950
12951 netdev->neigh_priv_len = sizeof(u32) * 4;
12952
12953 netdev->priv_flags |= IFF_UNICAST_FLT;
12954 netdev->priv_flags |= IFF_SUPP_NOFCS;
12955
12956 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
12957
12958 netdev->netdev_ops = &i40e_netdev_ops;
12959 netdev->watchdog_timeo = 5 * HZ;
12960 i40e_set_ethtool_ops(netdev);
12961
12962
12963 netdev->min_mtu = ETH_MIN_MTU;
12964 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
12965
12966 return 0;
12967}
12968
12969
12970
12971
12972
12973
12974
12975static void i40e_vsi_delete(struct i40e_vsi *vsi)
12976{
12977
12978 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
12979 return;
12980
12981 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
12982}
12983
12984
12985
12986
12987
12988
12989
12990int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
12991{
12992 struct i40e_veb *veb;
12993 struct i40e_pf *pf = vsi->back;
12994
12995
12996 if (vsi->veb_idx >= I40E_MAX_VEB)
12997 return 1;
12998
12999 veb = pf->veb[vsi->veb_idx];
13000 if (!veb) {
13001 dev_info(&pf->pdev->dev,
13002 "There is no veb associated with the bridge\n");
13003 return -ENOENT;
13004 }
13005
13006
13007 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13008 return 0;
13009 } else {
13010
13011 return 1;
13012 }
13013
13014
13015 return 0;
13016}
13017
13018
13019
13020
13021
13022
13023
13024
13025static int i40e_add_vsi(struct i40e_vsi *vsi)
13026{
13027 int ret = -ENODEV;
13028 struct i40e_pf *pf = vsi->back;
13029 struct i40e_hw *hw = &pf->hw;
13030 struct i40e_vsi_context ctxt;
13031 struct i40e_mac_filter *f;
13032 struct hlist_node *h;
13033 int bkt;
13034
13035 u8 enabled_tc = 0x1;
13036 int f_count = 0;
13037
13038 memset(&ctxt, 0, sizeof(ctxt));
13039 switch (vsi->type) {
13040 case I40E_VSI_MAIN:
13041
13042
13043
13044
13045
13046 ctxt.seid = pf->main_vsi_seid;
13047 ctxt.pf_num = pf->hw.pf_id;
13048 ctxt.vf_num = 0;
13049 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13050 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13051 if (ret) {
13052 dev_info(&pf->pdev->dev,
13053 "couldn't get PF vsi config, err %s aq_err %s\n",
13054 i40e_stat_str(&pf->hw, ret),
13055 i40e_aq_str(&pf->hw,
13056 pf->hw.aq.asq_last_status));
13057 return -ENOENT;
13058 }
13059 vsi->info = ctxt.info;
13060 vsi->info.valid_sections = 0;
13061
13062 vsi->seid = ctxt.seid;
13063 vsi->id = ctxt.vsi_number;
13064
13065 enabled_tc = i40e_pf_get_tc_map(pf);
13066
13067
13068
13069
13070
13071 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13072 memset(&ctxt, 0, sizeof(ctxt));
13073 ctxt.seid = pf->main_vsi_seid;
13074 ctxt.pf_num = pf->hw.pf_id;
13075 ctxt.vf_num = 0;
13076 ctxt.info.valid_sections |=
13077 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13078 ctxt.info.switch_id =
13079 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13080 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13081 if (ret) {
13082 dev_info(&pf->pdev->dev,
13083 "update vsi failed, err %s aq_err %s\n",
13084 i40e_stat_str(&pf->hw, ret),
13085 i40e_aq_str(&pf->hw,
13086 pf->hw.aq.asq_last_status));
13087 ret = -ENOENT;
13088 goto err;
13089 }
13090 }
13091
13092
13093 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13094 !(pf->hw.func_caps.iscsi)) {
13095 memset(&ctxt, 0, sizeof(ctxt));
13096 ctxt.seid = pf->main_vsi_seid;
13097 ctxt.pf_num = pf->hw.pf_id;
13098 ctxt.vf_num = 0;
13099 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13100 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13101 if (ret) {
13102 dev_info(&pf->pdev->dev,
13103 "update vsi failed, err %s aq_err %s\n",
13104 i40e_stat_str(&pf->hw, ret),
13105 i40e_aq_str(&pf->hw,
13106 pf->hw.aq.asq_last_status));
13107 ret = -ENOENT;
13108 goto err;
13109 }
13110
13111 i40e_vsi_update_queue_map(vsi, &ctxt);
13112 vsi->info.valid_sections = 0;
13113 } else {
13114
13115
13116
13117
13118
13119
13120 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13121 if (ret) {
13122
13123
13124
13125 dev_info(&pf->pdev->dev,
13126 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13127 enabled_tc,
13128 i40e_stat_str(&pf->hw, ret),
13129 i40e_aq_str(&pf->hw,
13130 pf->hw.aq.asq_last_status));
13131 }
13132 }
13133 break;
13134
13135 case I40E_VSI_FDIR:
13136 ctxt.pf_num = hw->pf_id;
13137 ctxt.vf_num = 0;
13138 ctxt.uplink_seid = vsi->uplink_seid;
13139 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13140 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13141 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13142 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13143 ctxt.info.valid_sections |=
13144 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13145 ctxt.info.switch_id =
13146 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13147 }
13148 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13149 break;
13150
13151 case I40E_VSI_VMDQ2:
13152 ctxt.pf_num = hw->pf_id;
13153 ctxt.vf_num = 0;
13154 ctxt.uplink_seid = vsi->uplink_seid;
13155 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13156 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13157
13158
13159
13160
13161 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13162 ctxt.info.valid_sections |=
13163 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13164 ctxt.info.switch_id =
13165 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13166 }
13167
13168
13169 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13170 break;
13171
13172 case I40E_VSI_SRIOV:
13173 ctxt.pf_num = hw->pf_id;
13174 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13175 ctxt.uplink_seid = vsi->uplink_seid;
13176 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13177 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13178
13179
13180
13181
13182 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13183 ctxt.info.valid_sections |=
13184 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13185 ctxt.info.switch_id =
13186 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13187 }
13188
13189 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13190 ctxt.info.valid_sections |=
13191 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13192 ctxt.info.queueing_opt_flags |=
13193 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13194 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13195 }
13196
13197 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13198 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13199 if (pf->vf[vsi->vf_id].spoofchk) {
13200 ctxt.info.valid_sections |=
13201 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13202 ctxt.info.sec_flags |=
13203 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13204 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13205 }
13206
13207 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13208 break;
13209
13210 case I40E_VSI_IWARP:
13211
13212 break;
13213
13214 default:
13215 return -ENODEV;
13216 }
13217
13218 if (vsi->type != I40E_VSI_MAIN) {
13219 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13220 if (ret) {
13221 dev_info(&vsi->back->pdev->dev,
13222 "add vsi failed, err %s aq_err %s\n",
13223 i40e_stat_str(&pf->hw, ret),
13224 i40e_aq_str(&pf->hw,
13225 pf->hw.aq.asq_last_status));
13226 ret = -ENOENT;
13227 goto err;
13228 }
13229 vsi->info = ctxt.info;
13230 vsi->info.valid_sections = 0;
13231 vsi->seid = ctxt.seid;
13232 vsi->id = ctxt.vsi_number;
13233 }
13234
13235 vsi->active_filters = 0;
13236 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13237 spin_lock_bh(&vsi->mac_filter_hash_lock);
13238
13239 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13240 f->state = I40E_FILTER_NEW;
13241 f_count++;
13242 }
13243 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13244
13245 if (f_count) {
13246 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13247 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13248 }
13249
13250
13251 ret = i40e_vsi_get_bw_info(vsi);
13252 if (ret) {
13253 dev_info(&pf->pdev->dev,
13254 "couldn't get vsi bw info, err %s aq_err %s\n",
13255 i40e_stat_str(&pf->hw, ret),
13256 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13257
13258 ret = 0;
13259 }
13260
13261err:
13262 return ret;
13263}
13264
13265
13266
13267
13268
13269
13270
13271int i40e_vsi_release(struct i40e_vsi *vsi)
13272{
13273 struct i40e_mac_filter *f;
13274 struct hlist_node *h;
13275 struct i40e_veb *veb = NULL;
13276 struct i40e_pf *pf;
13277 u16 uplink_seid;
13278 int i, n, bkt;
13279
13280 pf = vsi->back;
13281
13282
13283 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13284 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13285 vsi->seid, vsi->uplink_seid);
13286 return -ENODEV;
13287 }
13288 if (vsi == pf->vsi[pf->lan_vsi] &&
13289 !test_bit(__I40E_DOWN, pf->state)) {
13290 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13291 return -ENODEV;
13292 }
13293
13294 uplink_seid = vsi->uplink_seid;
13295 if (vsi->type != I40E_VSI_SRIOV) {
13296 if (vsi->netdev_registered) {
13297 vsi->netdev_registered = false;
13298 if (vsi->netdev) {
13299
13300 unregister_netdev(vsi->netdev);
13301 }
13302 } else {
13303 i40e_vsi_close(vsi);
13304 }
13305 i40e_vsi_disable_irq(vsi);
13306 }
13307
13308 spin_lock_bh(&vsi->mac_filter_hash_lock);
13309
13310
13311 if (vsi->netdev) {
13312 __dev_uc_unsync(vsi->netdev, NULL);
13313 __dev_mc_unsync(vsi->netdev, NULL);
13314 }
13315
13316
13317 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13318 __i40e_del_filter(vsi, f);
13319
13320 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13321
13322 i40e_sync_vsi_filters(vsi);
13323
13324 i40e_vsi_delete(vsi);
13325 i40e_vsi_free_q_vectors(vsi);
13326 if (vsi->netdev) {
13327 free_netdev(vsi->netdev);
13328 vsi->netdev = NULL;
13329 }
13330 i40e_vsi_clear_rings(vsi);
13331 i40e_vsi_clear(vsi);
13332
13333
13334
13335
13336
13337
13338
13339
13340
13341 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13342 if (pf->vsi[i] &&
13343 pf->vsi[i]->uplink_seid == uplink_seid &&
13344 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13345 n++;
13346 }
13347 }
13348 for (i = 0; i < I40E_MAX_VEB; i++) {
13349 if (!pf->veb[i])
13350 continue;
13351 if (pf->veb[i]->uplink_seid == uplink_seid)
13352 n++;
13353 if (pf->veb[i]->seid == uplink_seid)
13354 veb = pf->veb[i];
13355 }
13356 if (n == 0 && veb && veb->uplink_seid != 0)
13357 i40e_veb_release(veb);
13358
13359 return 0;
13360}
13361
13362
13363
13364
13365
13366
13367
13368
13369
13370
13371
13372static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13373{
13374 int ret = -ENOENT;
13375 struct i40e_pf *pf = vsi->back;
13376
13377 if (vsi->q_vectors[0]) {
13378 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13379 vsi->seid);
13380 return -EEXIST;
13381 }
13382
13383 if (vsi->base_vector) {
13384 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13385 vsi->seid, vsi->base_vector);
13386 return -EEXIST;
13387 }
13388
13389 ret = i40e_vsi_alloc_q_vectors(vsi);
13390 if (ret) {
13391 dev_info(&pf->pdev->dev,
13392 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13393 vsi->num_q_vectors, vsi->seid, ret);
13394 vsi->num_q_vectors = 0;
13395 goto vector_setup_out;
13396 }
13397
13398
13399
13400
13401 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13402 return ret;
13403 if (vsi->num_q_vectors)
13404 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13405 vsi->num_q_vectors, vsi->idx);
13406 if (vsi->base_vector < 0) {
13407 dev_info(&pf->pdev->dev,
13408 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13409 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13410 i40e_vsi_free_q_vectors(vsi);
13411 ret = -ENOENT;
13412 goto vector_setup_out;
13413 }
13414
13415vector_setup_out:
13416 return ret;
13417}
13418
13419
13420
13421
13422
13423
13424
13425
13426
13427
13428static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13429{
13430 u16 alloc_queue_pairs;
13431 struct i40e_pf *pf;
13432 u8 enabled_tc;
13433 int ret;
13434
13435 if (!vsi)
13436 return NULL;
13437
13438 pf = vsi->back;
13439
13440 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13441 i40e_vsi_clear_rings(vsi);
13442
13443 i40e_vsi_free_arrays(vsi, false);
13444 i40e_set_num_rings_in_vsi(vsi);
13445 ret = i40e_vsi_alloc_arrays(vsi, false);
13446 if (ret)
13447 goto err_vsi;
13448
13449 alloc_queue_pairs = vsi->alloc_queue_pairs *
13450 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13451
13452 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13453 if (ret < 0) {
13454 dev_info(&pf->pdev->dev,
13455 "failed to get tracking for %d queues for VSI %d err %d\n",
13456 alloc_queue_pairs, vsi->seid, ret);
13457 goto err_vsi;
13458 }
13459 vsi->base_queue = ret;
13460
13461
13462
13463
13464 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13465 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13466 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13467 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13468 if (vsi->type == I40E_VSI_MAIN)
13469 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13470
13471
13472 ret = i40e_alloc_rings(vsi);
13473 if (ret)
13474 goto err_rings;
13475
13476
13477 i40e_vsi_map_rings_to_vectors(vsi);
13478 return vsi;
13479
13480err_rings:
13481 i40e_vsi_free_q_vectors(vsi);
13482 if (vsi->netdev_registered) {
13483 vsi->netdev_registered = false;
13484 unregister_netdev(vsi->netdev);
13485 free_netdev(vsi->netdev);
13486 vsi->netdev = NULL;
13487 }
13488 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13489err_vsi:
13490 i40e_vsi_clear(vsi);
13491 return NULL;
13492}
13493
13494
13495
13496
13497
13498
13499
13500
13501
13502
13503
13504
13505
13506
13507struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13508 u16 uplink_seid, u32 param1)
13509{
13510 struct i40e_vsi *vsi = NULL;
13511 struct i40e_veb *veb = NULL;
13512 u16 alloc_queue_pairs;
13513 int ret, i;
13514 int v_idx;
13515
13516
13517
13518
13519
13520
13521
13522
13523
13524
13525
13526
13527
13528
13529 for (i = 0; i < I40E_MAX_VEB; i++) {
13530 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13531 veb = pf->veb[i];
13532 break;
13533 }
13534 }
13535
13536 if (!veb && uplink_seid != pf->mac_seid) {
13537
13538 for (i = 0; i < pf->num_alloc_vsi; i++) {
13539 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
13540 vsi = pf->vsi[i];
13541 break;
13542 }
13543 }
13544 if (!vsi) {
13545 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
13546 uplink_seid);
13547 return NULL;
13548 }
13549
13550 if (vsi->uplink_seid == pf->mac_seid)
13551 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
13552 vsi->tc_config.enabled_tc);
13553 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
13554 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13555 vsi->tc_config.enabled_tc);
13556 if (veb) {
13557 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
13558 dev_info(&vsi->back->pdev->dev,
13559 "New VSI creation error, uplink seid of LAN VSI expected.\n");
13560 return NULL;
13561 }
13562
13563
13564
13565
13566 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
13567 veb->bridge_mode = BRIDGE_MODE_VEPA;
13568 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13569 }
13570 i40e_config_bridge_mode(veb);
13571 }
13572 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13573 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13574 veb = pf->veb[i];
13575 }
13576 if (!veb) {
13577 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
13578 return NULL;
13579 }
13580
13581 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13582 uplink_seid = veb->seid;
13583 }
13584
13585
13586 v_idx = i40e_vsi_mem_alloc(pf, type);
13587 if (v_idx < 0)
13588 goto err_alloc;
13589 vsi = pf->vsi[v_idx];
13590 if (!vsi)
13591 goto err_alloc;
13592 vsi->type = type;
13593 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
13594
13595 if (type == I40E_VSI_MAIN)
13596 pf->lan_vsi = v_idx;
13597 else if (type == I40E_VSI_SRIOV)
13598 vsi->vf_id = param1;
13599
13600 alloc_queue_pairs = vsi->alloc_queue_pairs *
13601 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13602
13603 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13604 if (ret < 0) {
13605 dev_info(&pf->pdev->dev,
13606 "failed to get tracking for %d queues for VSI %d err=%d\n",
13607 alloc_queue_pairs, vsi->seid, ret);
13608 goto err_vsi;
13609 }
13610 vsi->base_queue = ret;
13611
13612
13613 vsi->uplink_seid = uplink_seid;
13614 ret = i40e_add_vsi(vsi);
13615 if (ret)
13616 goto err_vsi;
13617
13618 switch (vsi->type) {
13619
13620 case I40E_VSI_MAIN:
13621 case I40E_VSI_VMDQ2:
13622 ret = i40e_config_netdev(vsi);
13623 if (ret)
13624 goto err_netdev;
13625 ret = register_netdev(vsi->netdev);
13626 if (ret)
13627 goto err_netdev;
13628 vsi->netdev_registered = true;
13629 netif_carrier_off(vsi->netdev);
13630#ifdef CONFIG_I40E_DCB
13631
13632 i40e_dcbnl_setup(vsi);
13633#endif
13634 fallthrough;
13635 case I40E_VSI_FDIR:
13636
13637 ret = i40e_vsi_setup_vectors(vsi);
13638 if (ret)
13639 goto err_msix;
13640
13641 ret = i40e_alloc_rings(vsi);
13642 if (ret)
13643 goto err_rings;
13644
13645
13646 i40e_vsi_map_rings_to_vectors(vsi);
13647
13648 i40e_vsi_reset_stats(vsi);
13649 break;
13650 default:
13651
13652 break;
13653 }
13654
13655 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13656 (vsi->type == I40E_VSI_VMDQ2)) {
13657 ret = i40e_vsi_config_rss(vsi);
13658 }
13659 return vsi;
13660
13661err_rings:
13662 i40e_vsi_free_q_vectors(vsi);
13663err_msix:
13664 if (vsi->netdev_registered) {
13665 vsi->netdev_registered = false;
13666 unregister_netdev(vsi->netdev);
13667 free_netdev(vsi->netdev);
13668 vsi->netdev = NULL;
13669 }
13670err_netdev:
13671 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13672err_vsi:
13673 i40e_vsi_clear(vsi);
13674err_alloc:
13675 return NULL;
13676}
13677
13678
13679
13680
13681
13682
13683
13684static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13685{
13686 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13687 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13688 struct i40e_pf *pf = veb->pf;
13689 struct i40e_hw *hw = &pf->hw;
13690 u32 tc_bw_max;
13691 int ret = 0;
13692 int i;
13693
13694 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13695 &bw_data, NULL);
13696 if (ret) {
13697 dev_info(&pf->pdev->dev,
13698 "query veb bw config failed, err %s aq_err %s\n",
13699 i40e_stat_str(&pf->hw, ret),
13700 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13701 goto out;
13702 }
13703
13704 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13705 &ets_data, NULL);
13706 if (ret) {
13707 dev_info(&pf->pdev->dev,
13708 "query veb bw ets config failed, err %s aq_err %s\n",
13709 i40e_stat_str(&pf->hw, ret),
13710 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13711 goto out;
13712 }
13713
13714 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13715 veb->bw_max_quanta = ets_data.tc_bw_max;
13716 veb->is_abs_credits = bw_data.absolute_credits_enable;
13717 veb->enabled_tc = ets_data.tc_valid_bits;
13718 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13719 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13720 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13721 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13722 veb->bw_tc_limit_credits[i] =
13723 le16_to_cpu(bw_data.tc_bw_limits[i]);
13724 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13725 }
13726
13727out:
13728 return ret;
13729}
13730
13731
13732
13733
13734
13735
13736
13737
13738static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13739{
13740 int ret = -ENOENT;
13741 struct i40e_veb *veb;
13742 int i;
13743
13744
13745 mutex_lock(&pf->switch_mutex);
13746
13747
13748
13749
13750
13751
13752
13753 i = 0;
13754 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13755 i++;
13756 if (i >= I40E_MAX_VEB) {
13757 ret = -ENOMEM;
13758 goto err_alloc_veb;
13759 }
13760
13761 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13762 if (!veb) {
13763 ret = -ENOMEM;
13764 goto err_alloc_veb;
13765 }
13766 veb->pf = pf;
13767 veb->idx = i;
13768 veb->enabled_tc = 1;
13769
13770 pf->veb[i] = veb;
13771 ret = i;
13772err_alloc_veb:
13773 mutex_unlock(&pf->switch_mutex);
13774 return ret;
13775}
13776
13777
13778
13779
13780
13781
13782
13783
13784static void i40e_switch_branch_release(struct i40e_veb *branch)
13785{
13786 struct i40e_pf *pf = branch->pf;
13787 u16 branch_seid = branch->seid;
13788 u16 veb_idx = branch->idx;
13789 int i;
13790
13791
13792 for (i = 0; i < I40E_MAX_VEB; i++) {
13793 if (!pf->veb[i])
13794 continue;
13795 if (pf->veb[i]->uplink_seid == branch->seid)
13796 i40e_switch_branch_release(pf->veb[i]);
13797 }
13798
13799
13800
13801
13802
13803
13804 for (i = 0; i < pf->num_alloc_vsi; i++) {
13805 if (!pf->vsi[i])
13806 continue;
13807 if (pf->vsi[i]->uplink_seid == branch_seid &&
13808 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13809 i40e_vsi_release(pf->vsi[i]);
13810 }
13811 }
13812
13813
13814
13815
13816
13817
13818 if (pf->veb[veb_idx])
13819 i40e_veb_release(pf->veb[veb_idx]);
13820}
13821
13822
13823
13824
13825
13826static void i40e_veb_clear(struct i40e_veb *veb)
13827{
13828 if (!veb)
13829 return;
13830
13831 if (veb->pf) {
13832 struct i40e_pf *pf = veb->pf;
13833
13834 mutex_lock(&pf->switch_mutex);
13835 if (pf->veb[veb->idx] == veb)
13836 pf->veb[veb->idx] = NULL;
13837 mutex_unlock(&pf->switch_mutex);
13838 }
13839
13840 kfree(veb);
13841}
13842
13843
13844
13845
13846
13847void i40e_veb_release(struct i40e_veb *veb)
13848{
13849 struct i40e_vsi *vsi = NULL;
13850 struct i40e_pf *pf;
13851 int i, n = 0;
13852
13853 pf = veb->pf;
13854
13855
13856 for (i = 0; i < pf->num_alloc_vsi; i++) {
13857 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13858 n++;
13859 vsi = pf->vsi[i];
13860 }
13861 }
13862 if (n != 1) {
13863 dev_info(&pf->pdev->dev,
13864 "can't remove VEB %d with %d VSIs left\n",
13865 veb->seid, n);
13866 return;
13867 }
13868
13869
13870 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13871 if (veb->uplink_seid) {
13872 vsi->uplink_seid = veb->uplink_seid;
13873 if (veb->uplink_seid == pf->mac_seid)
13874 vsi->veb_idx = I40E_NO_VEB;
13875 else
13876 vsi->veb_idx = veb->veb_idx;
13877 } else {
13878
13879 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13880 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13881 }
13882
13883 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13884 i40e_veb_clear(veb);
13885}
13886
13887
13888
13889
13890
13891
13892static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13893{
13894 struct i40e_pf *pf = veb->pf;
13895 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13896 int ret;
13897
13898 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13899 veb->enabled_tc, false,
13900 &veb->seid, enable_stats, NULL);
13901
13902
13903 if (ret) {
13904 dev_info(&pf->pdev->dev,
13905 "couldn't add VEB, err %s aq_err %s\n",
13906 i40e_stat_str(&pf->hw, ret),
13907 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13908 return -EPERM;
13909 }
13910
13911
13912 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13913 &veb->stats_idx, NULL, NULL, NULL);
13914 if (ret) {
13915 dev_info(&pf->pdev->dev,
13916 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13917 i40e_stat_str(&pf->hw, ret),
13918 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13919 return -EPERM;
13920 }
13921 ret = i40e_veb_get_bw_info(veb);
13922 if (ret) {
13923 dev_info(&pf->pdev->dev,
13924 "couldn't get VEB bw info, err %s aq_err %s\n",
13925 i40e_stat_str(&pf->hw, ret),
13926 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13927 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13928 return -ENOENT;
13929 }
13930
13931 vsi->uplink_seid = veb->seid;
13932 vsi->veb_idx = veb->idx;
13933 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13934
13935 return 0;
13936}
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951
13952
13953
13954struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
13955 u16 uplink_seid, u16 vsi_seid,
13956 u8 enabled_tc)
13957{
13958 struct i40e_veb *veb, *uplink_veb = NULL;
13959 int vsi_idx, veb_idx;
13960 int ret;
13961
13962
13963 if ((uplink_seid == 0 || vsi_seid == 0) &&
13964 (uplink_seid + vsi_seid != 0)) {
13965 dev_info(&pf->pdev->dev,
13966 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13967 uplink_seid, vsi_seid);
13968 return NULL;
13969 }
13970
13971
13972 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
13973 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
13974 break;
13975 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
13976 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
13977 vsi_seid);
13978 return NULL;
13979 }
13980
13981 if (uplink_seid && uplink_seid != pf->mac_seid) {
13982 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
13983 if (pf->veb[veb_idx] &&
13984 pf->veb[veb_idx]->seid == uplink_seid) {
13985 uplink_veb = pf->veb[veb_idx];
13986 break;
13987 }
13988 }
13989 if (!uplink_veb) {
13990 dev_info(&pf->pdev->dev,
13991 "uplink seid %d not found\n", uplink_seid);
13992 return NULL;
13993 }
13994 }
13995
13996
13997 veb_idx = i40e_veb_mem_alloc(pf);
13998 if (veb_idx < 0)
13999 goto err_alloc;
14000 veb = pf->veb[veb_idx];
14001 veb->flags = flags;
14002 veb->uplink_seid = uplink_seid;
14003 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14004 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14005
14006
14007 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14008 if (ret)
14009 goto err_veb;
14010 if (vsi_idx == pf->lan_vsi)
14011 pf->lan_veb = veb->idx;
14012
14013 return veb;
14014
14015err_veb:
14016 i40e_veb_clear(veb);
14017err_alloc:
14018 return NULL;
14019}
14020
14021
14022
14023
14024
14025
14026
14027
14028
14029
14030static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14031 struct i40e_aqc_switch_config_element_resp *ele,
14032 u16 num_reported, bool printconfig)
14033{
14034 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14035 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14036 u8 element_type = ele->element_type;
14037 u16 seid = le16_to_cpu(ele->seid);
14038
14039 if (printconfig)
14040 dev_info(&pf->pdev->dev,
14041 "type=%d seid=%d uplink=%d downlink=%d\n",
14042 element_type, seid, uplink_seid, downlink_seid);
14043
14044 switch (element_type) {
14045 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14046 pf->mac_seid = seid;
14047 break;
14048 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14049
14050 if (uplink_seid != pf->mac_seid)
14051 break;
14052 if (pf->lan_veb >= I40E_MAX_VEB) {
14053 int v;
14054
14055
14056 for (v = 0; v < I40E_MAX_VEB; v++) {
14057 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14058 pf->lan_veb = v;
14059 break;
14060 }
14061 }
14062 if (pf->lan_veb >= I40E_MAX_VEB) {
14063 v = i40e_veb_mem_alloc(pf);
14064 if (v < 0)
14065 break;
14066 pf->lan_veb = v;
14067 }
14068 }
14069 if (pf->lan_veb >= I40E_MAX_VEB)
14070 break;
14071
14072 pf->veb[pf->lan_veb]->seid = seid;
14073 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14074 pf->veb[pf->lan_veb]->pf = pf;
14075 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14076 break;
14077 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14078 if (num_reported != 1)
14079 break;
14080
14081
14082
14083 pf->mac_seid = uplink_seid;
14084 pf->pf_seid = downlink_seid;
14085 pf->main_vsi_seid = seid;
14086 if (printconfig)
14087 dev_info(&pf->pdev->dev,
14088 "pf_seid=%d main_vsi_seid=%d\n",
14089 pf->pf_seid, pf->main_vsi_seid);
14090 break;
14091 case I40E_SWITCH_ELEMENT_TYPE_PF:
14092 case I40E_SWITCH_ELEMENT_TYPE_VF:
14093 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14094 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14095 case I40E_SWITCH_ELEMENT_TYPE_PE:
14096 case I40E_SWITCH_ELEMENT_TYPE_PA:
14097
14098 break;
14099 default:
14100 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14101 element_type, seid);
14102 break;
14103 }
14104}
14105
14106
14107
14108
14109
14110
14111
14112
14113
14114int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14115{
14116 struct i40e_aqc_get_switch_config_resp *sw_config;
14117 u16 next_seid = 0;
14118 int ret = 0;
14119 u8 *aq_buf;
14120 int i;
14121
14122 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14123 if (!aq_buf)
14124 return -ENOMEM;
14125
14126 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14127 do {
14128 u16 num_reported, num_total;
14129
14130 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14131 I40E_AQ_LARGE_BUF,
14132 &next_seid, NULL);
14133 if (ret) {
14134 dev_info(&pf->pdev->dev,
14135 "get switch config failed err %s aq_err %s\n",
14136 i40e_stat_str(&pf->hw, ret),
14137 i40e_aq_str(&pf->hw,
14138 pf->hw.aq.asq_last_status));
14139 kfree(aq_buf);
14140 return -ENOENT;
14141 }
14142
14143 num_reported = le16_to_cpu(sw_config->header.num_reported);
14144 num_total = le16_to_cpu(sw_config->header.num_total);
14145
14146 if (printconfig)
14147 dev_info(&pf->pdev->dev,
14148 "header: %d reported %d total\n",
14149 num_reported, num_total);
14150
14151 for (i = 0; i < num_reported; i++) {
14152 struct i40e_aqc_switch_config_element_resp *ele =
14153 &sw_config->element[i];
14154
14155 i40e_setup_pf_switch_element(pf, ele, num_reported,
14156 printconfig);
14157 }
14158 } while (next_seid != 0);
14159
14160 kfree(aq_buf);
14161 return ret;
14162}
14163
14164
14165
14166
14167
14168
14169
14170
14171static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14172{
14173 u16 flags = 0;
14174 int ret;
14175
14176
14177 ret = i40e_fetch_switch_configuration(pf, false);
14178 if (ret) {
14179 dev_info(&pf->pdev->dev,
14180 "couldn't fetch switch config, err %s aq_err %s\n",
14181 i40e_stat_str(&pf->hw, ret),
14182 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14183 return ret;
14184 }
14185 i40e_pf_reset_stats(pf);
14186
14187
14188
14189
14190
14191
14192
14193 if ((pf->hw.pf_id == 0) &&
14194 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14195 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14196 pf->last_sw_conf_flags = flags;
14197 }
14198
14199 if (pf->hw.pf_id == 0) {
14200 u16 valid_flags;
14201
14202 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14203 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14204 NULL);
14205 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14206 dev_info(&pf->pdev->dev,
14207 "couldn't set switch config bits, err %s aq_err %s\n",
14208 i40e_stat_str(&pf->hw, ret),
14209 i40e_aq_str(&pf->hw,
14210 pf->hw.aq.asq_last_status));
14211
14212 }
14213 pf->last_sw_conf_valid_flags = valid_flags;
14214 }
14215
14216
14217 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14218 struct i40e_vsi *vsi = NULL;
14219 u16 uplink_seid;
14220
14221
14222
14223
14224 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14225 uplink_seid = pf->veb[pf->lan_veb]->seid;
14226 else
14227 uplink_seid = pf->mac_seid;
14228 if (pf->lan_vsi == I40E_NO_VSI)
14229 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14230 else if (reinit)
14231 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14232 if (!vsi) {
14233 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14234 i40e_cloud_filter_exit(pf);
14235 i40e_fdir_teardown(pf);
14236 return -EAGAIN;
14237 }
14238 } else {
14239
14240 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14241
14242 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14243 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14244 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14245 }
14246 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14247
14248 i40e_fdir_sb_setup(pf);
14249
14250
14251 ret = i40e_setup_pf_filter_control(pf);
14252 if (ret) {
14253 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14254 ret);
14255
14256 }
14257
14258
14259
14260
14261 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14262 i40e_pf_config_rss(pf);
14263
14264
14265 i40e_link_event(pf);
14266
14267
14268 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14269 I40E_AQ_AN_COMPLETED) ? true : false);
14270
14271 i40e_ptp_init(pf);
14272
14273
14274 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14275
14276 return ret;
14277}
14278
14279
14280
14281
14282
14283static void i40e_determine_queue_usage(struct i40e_pf *pf)
14284{
14285 int queues_left;
14286 int q_max;
14287
14288 pf->num_lan_qps = 0;
14289
14290
14291
14292
14293
14294 queues_left = pf->hw.func_caps.num_tx_qp;
14295
14296 if ((queues_left == 1) ||
14297 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14298
14299 queues_left = 0;
14300 pf->alloc_rss_size = pf->num_lan_qps = 1;
14301
14302
14303 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14304 I40E_FLAG_IWARP_ENABLED |
14305 I40E_FLAG_FD_SB_ENABLED |
14306 I40E_FLAG_FD_ATR_ENABLED |
14307 I40E_FLAG_DCB_CAPABLE |
14308 I40E_FLAG_DCB_ENABLED |
14309 I40E_FLAG_SRIOV_ENABLED |
14310 I40E_FLAG_VMDQ_ENABLED);
14311 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14312 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14313 I40E_FLAG_FD_SB_ENABLED |
14314 I40E_FLAG_FD_ATR_ENABLED |
14315 I40E_FLAG_DCB_CAPABLE))) {
14316
14317 pf->alloc_rss_size = pf->num_lan_qps = 1;
14318 queues_left -= pf->num_lan_qps;
14319
14320 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14321 I40E_FLAG_IWARP_ENABLED |
14322 I40E_FLAG_FD_SB_ENABLED |
14323 I40E_FLAG_FD_ATR_ENABLED |
14324 I40E_FLAG_DCB_ENABLED |
14325 I40E_FLAG_VMDQ_ENABLED);
14326 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14327 } else {
14328
14329 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14330 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14331 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14332 I40E_FLAG_DCB_ENABLED);
14333 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14334 }
14335
14336
14337 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14338 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14339 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14340 pf->num_lan_qps = q_max;
14341
14342 queues_left -= pf->num_lan_qps;
14343 }
14344
14345 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14346 if (queues_left > 1) {
14347 queues_left -= 1;
14348 } else {
14349 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14350 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14351 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14352 }
14353 }
14354
14355 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14356 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14357 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14358 (queues_left / pf->num_vf_qps));
14359 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14360 }
14361
14362 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14363 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14364 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14365 (queues_left / pf->num_vmdq_qps));
14366 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14367 }
14368
14369 pf->queues_left = queues_left;
14370 dev_dbg(&pf->pdev->dev,
14371 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14372 pf->hw.func_caps.num_tx_qp,
14373 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14374 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14375 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14376 queues_left);
14377}
14378
14379
14380
14381
14382
14383
14384
14385
14386
14387
14388
14389
14390static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14391{
14392 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14393
14394 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14395
14396
14397 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14398 settings->enable_fdir = true;
14399
14400
14401 settings->enable_ethtype = true;
14402 settings->enable_macvlan = true;
14403
14404 if (i40e_set_filter_control(&pf->hw, settings))
14405 return -ENOENT;
14406
14407 return 0;
14408}
14409
14410#define INFO_STRING_LEN 255
14411#define REMAIN(__x) (INFO_STRING_LEN - (__x))
14412static void i40e_print_features(struct i40e_pf *pf)
14413{
14414 struct i40e_hw *hw = &pf->hw;
14415 char *buf;
14416 int i;
14417
14418 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14419 if (!buf)
14420 return;
14421
14422 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14423#ifdef CONFIG_PCI_IOV
14424 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14425#endif
14426 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14427 pf->hw.func_caps.num_vsis,
14428 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14429 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14430 i += scnprintf(&buf[i], REMAIN(i), " RSS");
14431 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14432 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
14433 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14434 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
14435 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
14436 }
14437 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14438 i += scnprintf(&buf[i], REMAIN(i), " DCB");
14439 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
14440 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
14441 if (pf->flags & I40E_FLAG_PTP)
14442 i += scnprintf(&buf[i], REMAIN(i), " PTP");
14443 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14444 i += scnprintf(&buf[i], REMAIN(i), " VEB");
14445 else
14446 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
14447
14448 dev_info(&pf->pdev->dev, "%s\n", buf);
14449 kfree(buf);
14450 WARN_ON(i > INFO_STRING_LEN);
14451}
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462
14463static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14464{
14465 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14466 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14467}
14468
14469
14470
14471
14472
14473
14474void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14475{
14476 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14477 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14478 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14479 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14480 *flags |= I40E_FLAG_RS_FEC;
14481 *flags &= ~I40E_FLAG_BASE_R_FEC;
14482 }
14483 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14484 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14485 *flags |= I40E_FLAG_BASE_R_FEC;
14486 *flags &= ~I40E_FLAG_RS_FEC;
14487 }
14488 if (fec_cfg == 0)
14489 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14490}
14491
14492
14493
14494
14495
14496
14497
14498
14499
14500
14501static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14502{
14503 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
14504
14505 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
14506 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14507 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14508 set_bit(__I40E_RECOVERY_MODE, pf->state);
14509
14510 return true;
14511 }
14512 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14513 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
14514
14515 return false;
14516}
14517
14518
14519
14520
14521
14522
14523
14524
14525
14526
14527
14528
14529
14530
14531
14532
14533
14534
14535
14536
14537
14538
14539static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
14540{
14541
14542 const unsigned long time_end = jiffies + 10 * HZ;
14543
14544 struct i40e_hw *hw = &pf->hw;
14545 i40e_status ret;
14546
14547 ret = i40e_pf_reset(hw);
14548 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
14549 usleep_range(10000, 20000);
14550 ret = i40e_pf_reset(hw);
14551 }
14552
14553 if (ret == I40E_SUCCESS)
14554 pf->pfr_count++;
14555 else
14556 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
14557
14558 return ret;
14559}
14560
14561
14562
14563
14564
14565
14566
14567
14568
14569
14570
14571
14572static bool i40e_check_fw_empr(struct i40e_pf *pf)
14573{
14574 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
14575 I40E_GL_FWSTS_FWS1B_MASK;
14576 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
14577 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
14578}
14579
14580
14581
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591static i40e_status i40e_handle_resets(struct i40e_pf *pf)
14592{
14593 const i40e_status pfr = i40e_pf_loop_reset(pf);
14594 const bool is_empr = i40e_check_fw_empr(pf);
14595
14596 if (is_empr || pfr != I40E_SUCCESS)
14597 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
14598
14599 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
14600}
14601
14602
14603
14604
14605
14606
14607
14608
14609
14610
14611
14612static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14613{
14614 struct i40e_vsi *vsi;
14615 int err;
14616 int v_idx;
14617
14618 pci_save_state(pf->pdev);
14619
14620
14621 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14622 pf->service_timer_period = HZ;
14623
14624 INIT_WORK(&pf->service_task, i40e_service_task);
14625 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14626
14627 err = i40e_init_interrupt_scheme(pf);
14628 if (err)
14629 goto err_switch_setup;
14630
14631
14632
14633
14634
14635
14636 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14637 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14638 else
14639 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14640
14641
14642 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14643 GFP_KERNEL);
14644 if (!pf->vsi) {
14645 err = -ENOMEM;
14646 goto err_switch_setup;
14647 }
14648
14649
14650
14651
14652 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14653 if (v_idx < 0)
14654 goto err_switch_setup;
14655 pf->lan_vsi = v_idx;
14656 vsi = pf->vsi[v_idx];
14657 if (!vsi)
14658 goto err_switch_setup;
14659 vsi->alloc_queue_pairs = 1;
14660 err = i40e_config_netdev(vsi);
14661 if (err)
14662 goto err_switch_setup;
14663 err = register_netdev(vsi->netdev);
14664 if (err)
14665 goto err_switch_setup;
14666 vsi->netdev_registered = true;
14667 i40e_dbg_pf_init(pf);
14668
14669 err = i40e_setup_misc_vector_for_recovery_mode(pf);
14670 if (err)
14671 goto err_switch_setup;
14672
14673
14674 i40e_send_version(pf);
14675
14676
14677 mod_timer(&pf->service_timer,
14678 round_jiffies(jiffies + pf->service_timer_period));
14679
14680 return 0;
14681
14682err_switch_setup:
14683 i40e_reset_interrupt_capability(pf);
14684 del_timer_sync(&pf->service_timer);
14685 i40e_shutdown_adminq(hw);
14686 iounmap(hw->hw_addr);
14687 pci_disable_pcie_error_reporting(pf->pdev);
14688 pci_release_mem_regions(pf->pdev);
14689 pci_disable_device(pf->pdev);
14690 kfree(pf);
14691
14692 return err;
14693}
14694
14695
14696
14697
14698
14699
14700
14701
14702
14703
14704
14705
14706static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14707{
14708 struct i40e_aq_get_phy_abilities_resp abilities;
14709 struct i40e_pf *pf;
14710 struct i40e_hw *hw;
14711 static u16 pfs_found;
14712 u16 wol_nvm_bits;
14713 u16 link_status;
14714 int err;
14715 u32 val;
14716 u32 i;
14717 u8 set_fc_aq_fail;
14718
14719 err = pci_enable_device_mem(pdev);
14720 if (err)
14721 return err;
14722
14723
14724 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
14725 if (err) {
14726 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
14727 if (err) {
14728 dev_err(&pdev->dev,
14729 "DMA configuration failed: 0x%x\n", err);
14730 goto err_dma;
14731 }
14732 }
14733
14734
14735 err = pci_request_mem_regions(pdev, i40e_driver_name);
14736 if (err) {
14737 dev_info(&pdev->dev,
14738 "pci_request_selected_regions failed %d\n", err);
14739 goto err_pci_reg;
14740 }
14741
14742 pci_enable_pcie_error_reporting(pdev);
14743 pci_set_master(pdev);
14744
14745
14746
14747
14748
14749
14750 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
14751 if (!pf) {
14752 err = -ENOMEM;
14753 goto err_pf_alloc;
14754 }
14755 pf->next_vsi = 0;
14756 pf->pdev = pdev;
14757 set_bit(__I40E_DOWN, pf->state);
14758
14759 hw = &pf->hw;
14760 hw->back = pf;
14761
14762 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
14763 I40E_MAX_CSR_SPACE);
14764
14765
14766
14767
14768
14769 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
14770 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
14771 pf->ioremap_len);
14772 err = -ENOMEM;
14773 goto err_ioremap;
14774 }
14775 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
14776 if (!hw->hw_addr) {
14777 err = -EIO;
14778 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
14779 (unsigned int)pci_resource_start(pdev, 0),
14780 pf->ioremap_len, err);
14781 goto err_ioremap;
14782 }
14783 hw->vendor_id = pdev->vendor;
14784 hw->device_id = pdev->device;
14785 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
14786 hw->subsystem_vendor_id = pdev->subsystem_vendor;
14787 hw->subsystem_device_id = pdev->subsystem_device;
14788 hw->bus.device = PCI_SLOT(pdev->devfn);
14789 hw->bus.func = PCI_FUNC(pdev->devfn);
14790 hw->bus.bus_id = pdev->bus->number;
14791 pf->instance = pfs_found;
14792
14793
14794
14795
14796 hw->switch_tag = 0xffff;
14797 hw->first_tag = ETH_P_8021AD;
14798 hw->second_tag = ETH_P_8021Q;
14799
14800 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
14801 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
14802 INIT_LIST_HEAD(&pf->ddp_old_prof);
14803
14804
14805
14806
14807 mutex_init(&hw->aq.asq_mutex);
14808 mutex_init(&hw->aq.arq_mutex);
14809
14810 pf->msg_enable = netif_msg_init(debug,
14811 NETIF_MSG_DRV |
14812 NETIF_MSG_PROBE |
14813 NETIF_MSG_LINK);
14814 if (debug < -1)
14815 pf->hw.debug_mask = debug;
14816
14817
14818 if (hw->revision_id == 0 &&
14819 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14820 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14821 i40e_flush(hw);
14822 msleep(200);
14823 pf->corer_count++;
14824
14825 i40e_clear_pxe_mode(hw);
14826 }
14827
14828
14829 i40e_clear_hw(hw);
14830
14831 err = i40e_set_mac_type(hw);
14832 if (err) {
14833 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14834 err);
14835 goto err_pf_reset;
14836 }
14837
14838 err = i40e_handle_resets(pf);
14839 if (err)
14840 goto err_pf_reset;
14841
14842 i40e_check_recovery_mode(pf);
14843
14844 hw->aq.num_arq_entries = I40E_AQ_LEN;
14845 hw->aq.num_asq_entries = I40E_AQ_LEN;
14846 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14847 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14848 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14849
14850 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14851 "%s-%s:misc",
14852 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14853
14854 err = i40e_init_shared_code(hw);
14855 if (err) {
14856 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14857 err);
14858 goto err_pf_reset;
14859 }
14860
14861
14862 pf->hw.fc.requested_mode = I40E_FC_NONE;
14863
14864 err = i40e_init_adminq(hw);
14865 if (err) {
14866 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14867 dev_info(&pdev->dev,
14868 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
14869 hw->aq.api_maj_ver,
14870 hw->aq.api_min_ver,
14871 I40E_FW_API_VERSION_MAJOR,
14872 I40E_FW_MINOR_VERSION(hw));
14873 else
14874 dev_info(&pdev->dev,
14875 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14876
14877 goto err_pf_reset;
14878 }
14879 i40e_get_oem_version(hw);
14880
14881
14882 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
14883 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14884 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14885 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
14886 hw->subsystem_vendor_id, hw->subsystem_device_id);
14887
14888 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14889 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14890 dev_info(&pdev->dev,
14891 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
14892 hw->aq.api_maj_ver,
14893 hw->aq.api_min_ver,
14894 I40E_FW_API_VERSION_MAJOR,
14895 I40E_FW_MINOR_VERSION(hw));
14896 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14897 dev_info(&pdev->dev,
14898 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
14899 hw->aq.api_maj_ver,
14900 hw->aq.api_min_ver,
14901 I40E_FW_API_VERSION_MAJOR,
14902 I40E_FW_MINOR_VERSION(hw));
14903
14904 i40e_verify_eeprom(pf);
14905
14906
14907 if (hw->revision_id < 1)
14908 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14909
14910 i40e_clear_pxe_mode(hw);
14911
14912 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14913 if (err)
14914 goto err_adminq_setup;
14915
14916 err = i40e_sw_init(pf);
14917 if (err) {
14918 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14919 goto err_sw_init;
14920 }
14921
14922 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14923 return i40e_init_recovery_mode(pf, hw);
14924
14925 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14926 hw->func_caps.num_rx_qp, 0, 0);
14927 if (err) {
14928 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14929 goto err_init_lan_hmc;
14930 }
14931
14932 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14933 if (err) {
14934 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14935 err = -ENOENT;
14936 goto err_configure_lan_hmc;
14937 }
14938
14939
14940
14941
14942
14943 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14944 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14945 i40e_aq_stop_lldp(hw, true, false, NULL);
14946 }
14947
14948
14949 i40e_get_platform_mac_addr(pdev, pf);
14950
14951 if (!is_valid_ether_addr(hw->mac.addr)) {
14952 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14953 err = -EIO;
14954 goto err_mac_addr;
14955 }
14956 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14957 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14958 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14959 if (is_valid_ether_addr(hw->mac.port_addr))
14960 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14961
14962 pci_set_drvdata(pdev, pf);
14963 pci_save_state(pdev);
14964
14965 dev_info(&pdev->dev,
14966 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
14967 "FW LLDP is disabled\n" :
14968 "FW LLDP is enabled\n");
14969
14970
14971 i40e_aq_set_dcb_parameters(hw, true, NULL);
14972
14973#ifdef CONFIG_I40E_DCB
14974 err = i40e_init_pf_dcb(pf);
14975 if (err) {
14976 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
14977 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
14978
14979 }
14980#endif
14981
14982
14983 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14984 pf->service_timer_period = HZ;
14985
14986 INIT_WORK(&pf->service_task, i40e_service_task);
14987 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14988
14989
14990 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
14991 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
14992 pf->wol_en = false;
14993 else
14994 pf->wol_en = true;
14995 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
14996
14997
14998 i40e_determine_queue_usage(pf);
14999 err = i40e_init_interrupt_scheme(pf);
15000 if (err)
15001 goto err_switch_setup;
15002
15003 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15004 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15005 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15006 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15007 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15008 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15009 UDP_TUNNEL_TYPE_GENEVE;
15010
15011
15012
15013
15014
15015
15016 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15017 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15018 else
15019 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15020 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15021 dev_warn(&pf->pdev->dev,
15022 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15023 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15024 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15025 }
15026
15027
15028 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15029 GFP_KERNEL);
15030 if (!pf->vsi) {
15031 err = -ENOMEM;
15032 goto err_switch_setup;
15033 }
15034
15035#ifdef CONFIG_PCI_IOV
15036
15037 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15038 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15039 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15040 if (pci_num_vf(pdev))
15041 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15042 }
15043#endif
15044 err = i40e_setup_pf_switch(pf, false);
15045 if (err) {
15046 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15047 goto err_vsis;
15048 }
15049 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15050
15051
15052 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
15053 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
15054 dev_dbg(&pf->pdev->dev,
15055 "Set fc with err %s aq_err %s on get_phy_cap\n",
15056 i40e_stat_str(hw, err),
15057 i40e_aq_str(hw, hw->aq.asq_last_status));
15058 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
15059 dev_dbg(&pf->pdev->dev,
15060 "Set fc with err %s aq_err %s on set_phy_config\n",
15061 i40e_stat_str(hw, err),
15062 i40e_aq_str(hw, hw->aq.asq_last_status));
15063 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
15064 dev_dbg(&pf->pdev->dev,
15065 "Set fc with err %s aq_err %s on get_link_info\n",
15066 i40e_stat_str(hw, err),
15067 i40e_aq_str(hw, hw->aq.asq_last_status));
15068
15069
15070 for (i = 0; i < pf->num_alloc_vsi; i++) {
15071 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15072 i40e_vsi_open(pf->vsi[i]);
15073 break;
15074 }
15075 }
15076
15077
15078
15079
15080 err = i40e_aq_set_phy_int_mask(&pf->hw,
15081 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15082 I40E_AQ_EVENT_MEDIA_NA |
15083 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15084 if (err)
15085 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15086 i40e_stat_str(&pf->hw, err),
15087 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15088
15089
15090
15091
15092
15093 val = rd32(hw, I40E_REG_MSS);
15094 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15095 val &= ~I40E_REG_MSS_MIN_MASK;
15096 val |= I40E_64BYTE_MSS;
15097 wr32(hw, I40E_REG_MSS, val);
15098 }
15099
15100 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15101 msleep(75);
15102 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15103 if (err)
15104 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15105 i40e_stat_str(&pf->hw, err),
15106 i40e_aq_str(&pf->hw,
15107 pf->hw.aq.asq_last_status));
15108 }
15109
15110
15111
15112
15113 clear_bit(__I40E_DOWN, pf->state);
15114
15115
15116
15117
15118
15119
15120 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15121 err = i40e_setup_misc_vector(pf);
15122 if (err) {
15123 dev_info(&pdev->dev,
15124 "setup of misc vector failed: %d\n", err);
15125 goto err_vsis;
15126 }
15127 }
15128
15129#ifdef CONFIG_PCI_IOV
15130
15131 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15132 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15133 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15134
15135 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15136 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15137 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15138 i40e_flush(hw);
15139
15140 if (pci_num_vf(pdev)) {
15141 dev_info(&pdev->dev,
15142 "Active VFs found, allocating resources.\n");
15143 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15144 if (err)
15145 dev_info(&pdev->dev,
15146 "Error %d allocating resources for existing VFs\n",
15147 err);
15148 }
15149 }
15150#endif
15151
15152 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15153 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15154 pf->num_iwarp_msix,
15155 I40E_IWARP_IRQ_PILE_ID);
15156 if (pf->iwarp_base_vector < 0) {
15157 dev_info(&pdev->dev,
15158 "failed to get tracking for %d vectors for IWARP err=%d\n",
15159 pf->num_iwarp_msix, pf->iwarp_base_vector);
15160 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15161 }
15162 }
15163
15164 i40e_dbg_pf_init(pf);
15165
15166
15167 i40e_send_version(pf);
15168
15169
15170 mod_timer(&pf->service_timer,
15171 round_jiffies(jiffies + pf->service_timer_period));
15172
15173
15174 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15175 err = i40e_lan_add_device(pf);
15176 if (err)
15177 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15178 err);
15179 }
15180
15181#define PCI_SPEED_SIZE 8
15182#define PCI_WIDTH_SIZE 8
15183
15184
15185
15186
15187 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15188 char speed[PCI_SPEED_SIZE] = "Unknown";
15189 char width[PCI_WIDTH_SIZE] = "Unknown";
15190
15191
15192
15193
15194 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15195 &link_status);
15196
15197 i40e_set_pci_config_data(hw, link_status);
15198
15199 switch (hw->bus.speed) {
15200 case i40e_bus_speed_8000:
15201 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15202 case i40e_bus_speed_5000:
15203 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15204 case i40e_bus_speed_2500:
15205 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15206 default:
15207 break;
15208 }
15209 switch (hw->bus.width) {
15210 case i40e_bus_width_pcie_x8:
15211 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15212 case i40e_bus_width_pcie_x4:
15213 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15214 case i40e_bus_width_pcie_x2:
15215 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15216 case i40e_bus_width_pcie_x1:
15217 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15218 default:
15219 break;
15220 }
15221
15222 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15223 speed, width);
15224
15225 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15226 hw->bus.speed < i40e_bus_speed_8000) {
15227 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15228 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15229 }
15230 }
15231
15232
15233 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15234 if (err)
15235 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15236 i40e_stat_str(&pf->hw, err),
15237 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15238 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15239
15240
15241 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15242
15243
15244 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15245 if (err)
15246 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15247 i40e_stat_str(&pf->hw, err),
15248 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15249
15250
15251#define MAX_FRAME_SIZE_DEFAULT 0x2600
15252 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15253 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15254 if (val < MAX_FRAME_SIZE_DEFAULT)
15255 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15256 i, val);
15257
15258
15259
15260
15261
15262
15263
15264 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15265 pf->main_vsi_seid);
15266
15267 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15268 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15269 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15270 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15271 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15272
15273 i40e_print_features(pf);
15274
15275 return 0;
15276
15277
15278err_vsis:
15279 set_bit(__I40E_DOWN, pf->state);
15280 i40e_clear_interrupt_scheme(pf);
15281 kfree(pf->vsi);
15282err_switch_setup:
15283 i40e_reset_interrupt_capability(pf);
15284 del_timer_sync(&pf->service_timer);
15285err_mac_addr:
15286err_configure_lan_hmc:
15287 (void)i40e_shutdown_lan_hmc(hw);
15288err_init_lan_hmc:
15289 kfree(pf->qp_pile);
15290err_sw_init:
15291err_adminq_setup:
15292err_pf_reset:
15293 iounmap(hw->hw_addr);
15294err_ioremap:
15295 kfree(pf);
15296err_pf_alloc:
15297 pci_disable_pcie_error_reporting(pdev);
15298 pci_release_mem_regions(pdev);
15299err_pci_reg:
15300err_dma:
15301 pci_disable_device(pdev);
15302 return err;
15303}
15304
15305
15306
15307
15308
15309
15310
15311
15312
15313
15314static void i40e_remove(struct pci_dev *pdev)
15315{
15316 struct i40e_pf *pf = pci_get_drvdata(pdev);
15317 struct i40e_hw *hw = &pf->hw;
15318 i40e_status ret_code;
15319 int i;
15320
15321 i40e_dbg_pf_exit(pf);
15322
15323 i40e_ptp_stop(pf);
15324
15325
15326 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15327 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15328
15329 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15330 usleep_range(1000, 2000);
15331
15332 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15333 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
15334 i40e_free_vfs(pf);
15335 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15336 }
15337
15338 set_bit(__I40E_SUSPENDED, pf->state);
15339 set_bit(__I40E_DOWN, pf->state);
15340 if (pf->service_timer.function)
15341 del_timer_sync(&pf->service_timer);
15342 if (pf->service_task.func)
15343 cancel_work_sync(&pf->service_task);
15344
15345 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15346 struct i40e_vsi *vsi = pf->vsi[0];
15347
15348
15349
15350
15351
15352 unregister_netdev(vsi->netdev);
15353 free_netdev(vsi->netdev);
15354
15355 goto unmap;
15356 }
15357
15358
15359
15360
15361 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15362
15363 i40e_fdir_teardown(pf);
15364
15365
15366
15367
15368 for (i = 0; i < I40E_MAX_VEB; i++) {
15369 if (!pf->veb[i])
15370 continue;
15371
15372 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15373 pf->veb[i]->uplink_seid == 0)
15374 i40e_switch_branch_release(pf->veb[i]);
15375 }
15376
15377
15378
15379
15380 if (pf->vsi[pf->lan_vsi])
15381 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15382
15383 i40e_cloud_filter_exit(pf);
15384
15385
15386 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15387 ret_code = i40e_lan_del_device(pf);
15388 if (ret_code)
15389 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15390 ret_code);
15391 }
15392
15393
15394 if (hw->hmc.hmc_obj) {
15395 ret_code = i40e_shutdown_lan_hmc(hw);
15396 if (ret_code)
15397 dev_warn(&pdev->dev,
15398 "Failed to destroy the HMC resources: %d\n",
15399 ret_code);
15400 }
15401
15402unmap:
15403
15404 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15405 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15406 free_irq(pf->pdev->irq, pf);
15407
15408
15409 i40e_shutdown_adminq(hw);
15410
15411
15412 mutex_destroy(&hw->aq.arq_mutex);
15413 mutex_destroy(&hw->aq.asq_mutex);
15414
15415
15416 rtnl_lock();
15417 i40e_clear_interrupt_scheme(pf);
15418 for (i = 0; i < pf->num_alloc_vsi; i++) {
15419 if (pf->vsi[i]) {
15420 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15421 i40e_vsi_clear_rings(pf->vsi[i]);
15422 i40e_vsi_clear(pf->vsi[i]);
15423 pf->vsi[i] = NULL;
15424 }
15425 }
15426 rtnl_unlock();
15427
15428 for (i = 0; i < I40E_MAX_VEB; i++) {
15429 kfree(pf->veb[i]);
15430 pf->veb[i] = NULL;
15431 }
15432
15433 kfree(pf->qp_pile);
15434 kfree(pf->vsi);
15435
15436 iounmap(hw->hw_addr);
15437 kfree(pf);
15438 pci_release_mem_regions(pdev);
15439
15440 pci_disable_pcie_error_reporting(pdev);
15441 pci_disable_device(pdev);
15442}
15443
15444
15445
15446
15447
15448
15449
15450
15451
15452
15453static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15454 pci_channel_state_t error)
15455{
15456 struct i40e_pf *pf = pci_get_drvdata(pdev);
15457
15458 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15459
15460 if (!pf) {
15461 dev_info(&pdev->dev,
15462 "Cannot recover - error happened during device probe\n");
15463 return PCI_ERS_RESULT_DISCONNECT;
15464 }
15465
15466
15467 if (!test_bit(__I40E_SUSPENDED, pf->state))
15468 i40e_prep_for_reset(pf, false);
15469
15470
15471 return PCI_ERS_RESULT_NEED_RESET;
15472}
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482
15483static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15484{
15485 struct i40e_pf *pf = pci_get_drvdata(pdev);
15486 pci_ers_result_t result;
15487 u32 reg;
15488
15489 dev_dbg(&pdev->dev, "%s\n", __func__);
15490 if (pci_enable_device_mem(pdev)) {
15491 dev_info(&pdev->dev,
15492 "Cannot re-enable PCI device after reset.\n");
15493 result = PCI_ERS_RESULT_DISCONNECT;
15494 } else {
15495 pci_set_master(pdev);
15496 pci_restore_state(pdev);
15497 pci_save_state(pdev);
15498 pci_wake_from_d3(pdev, false);
15499
15500 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15501 if (reg == 0)
15502 result = PCI_ERS_RESULT_RECOVERED;
15503 else
15504 result = PCI_ERS_RESULT_DISCONNECT;
15505 }
15506
15507 return result;
15508}
15509
15510
15511
15512
15513
15514static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
15515{
15516 struct i40e_pf *pf = pci_get_drvdata(pdev);
15517
15518 i40e_prep_for_reset(pf, false);
15519}
15520
15521
15522
15523
15524
15525static void i40e_pci_error_reset_done(struct pci_dev *pdev)
15526{
15527 struct i40e_pf *pf = pci_get_drvdata(pdev);
15528
15529 i40e_reset_and_rebuild(pf, false, false);
15530}
15531
15532
15533
15534
15535
15536
15537
15538
15539static void i40e_pci_error_resume(struct pci_dev *pdev)
15540{
15541 struct i40e_pf *pf = pci_get_drvdata(pdev);
15542
15543 dev_dbg(&pdev->dev, "%s\n", __func__);
15544 if (test_bit(__I40E_SUSPENDED, pf->state))
15545 return;
15546
15547 i40e_handle_reset_warning(pf, false);
15548}
15549
15550
15551
15552
15553
15554
15555static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
15556{
15557 struct i40e_hw *hw = &pf->hw;
15558 i40e_status ret;
15559 u8 mac_addr[6];
15560 u16 flags = 0;
15561
15562
15563 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
15564 ether_addr_copy(mac_addr,
15565 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
15566 } else {
15567 dev_err(&pf->pdev->dev,
15568 "Failed to retrieve MAC address; using default\n");
15569 ether_addr_copy(mac_addr, hw->mac.addr);
15570 }
15571
15572
15573
15574
15575
15576 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
15577
15578 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
15579 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
15580
15581 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15582 if (ret) {
15583 dev_err(&pf->pdev->dev,
15584 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
15585 return;
15586 }
15587
15588 flags = I40E_AQC_MC_MAG_EN
15589 | I40E_AQC_WOL_PRESERVE_ON_PFR
15590 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
15591 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15592 if (ret)
15593 dev_err(&pf->pdev->dev,
15594 "Failed to enable Multicast Magic Packet wake up\n");
15595}
15596
15597
15598
15599
15600
15601static void i40e_shutdown(struct pci_dev *pdev)
15602{
15603 struct i40e_pf *pf = pci_get_drvdata(pdev);
15604 struct i40e_hw *hw = &pf->hw;
15605
15606 set_bit(__I40E_SUSPENDED, pf->state);
15607 set_bit(__I40E_DOWN, pf->state);
15608
15609 del_timer_sync(&pf->service_timer);
15610 cancel_work_sync(&pf->service_task);
15611 i40e_cloud_filter_exit(pf);
15612 i40e_fdir_teardown(pf);
15613
15614
15615
15616
15617 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15618
15619 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15620 i40e_enable_mc_magic_wake(pf);
15621
15622 i40e_prep_for_reset(pf, false);
15623
15624 wr32(hw, I40E_PFPM_APM,
15625 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15626 wr32(hw, I40E_PFPM_WUFC,
15627 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15628
15629
15630 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15631 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15632 free_irq(pf->pdev->irq, pf);
15633
15634
15635
15636
15637
15638 rtnl_lock();
15639 i40e_clear_interrupt_scheme(pf);
15640 rtnl_unlock();
15641
15642 if (system_state == SYSTEM_POWER_OFF) {
15643 pci_wake_from_d3(pdev, pf->wol_en);
15644 pci_set_power_state(pdev, PCI_D3hot);
15645 }
15646}
15647
15648
15649
15650
15651
15652static int __maybe_unused i40e_suspend(struct device *dev)
15653{
15654 struct i40e_pf *pf = dev_get_drvdata(dev);
15655 struct i40e_hw *hw = &pf->hw;
15656
15657
15658 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
15659 return 0;
15660
15661 set_bit(__I40E_DOWN, pf->state);
15662
15663
15664 del_timer_sync(&pf->service_timer);
15665 cancel_work_sync(&pf->service_task);
15666
15667
15668
15669
15670 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15671
15672 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15673 i40e_enable_mc_magic_wake(pf);
15674
15675
15676
15677
15678
15679 rtnl_lock();
15680
15681 i40e_prep_for_reset(pf, true);
15682
15683 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15684 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15685
15686
15687
15688
15689
15690
15691 i40e_clear_interrupt_scheme(pf);
15692
15693 rtnl_unlock();
15694
15695 return 0;
15696}
15697
15698
15699
15700
15701
15702static int __maybe_unused i40e_resume(struct device *dev)
15703{
15704 struct i40e_pf *pf = dev_get_drvdata(dev);
15705 int err;
15706
15707
15708 if (!test_bit(__I40E_SUSPENDED, pf->state))
15709 return 0;
15710
15711
15712
15713
15714 rtnl_lock();
15715
15716
15717
15718
15719 err = i40e_restore_interrupt_scheme(pf);
15720 if (err) {
15721 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
15722 err);
15723 }
15724
15725 clear_bit(__I40E_DOWN, pf->state);
15726 i40e_reset_and_rebuild(pf, false, true);
15727
15728 rtnl_unlock();
15729
15730
15731 clear_bit(__I40E_SUSPENDED, pf->state);
15732
15733
15734 mod_timer(&pf->service_timer,
15735 round_jiffies(jiffies + pf->service_timer_period));
15736
15737 return 0;
15738}
15739
15740static const struct pci_error_handlers i40e_err_handler = {
15741 .error_detected = i40e_pci_error_detected,
15742 .slot_reset = i40e_pci_error_slot_reset,
15743 .reset_prepare = i40e_pci_error_reset_prepare,
15744 .reset_done = i40e_pci_error_reset_done,
15745 .resume = i40e_pci_error_resume,
15746};
15747
15748static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
15749
15750static struct pci_driver i40e_driver = {
15751 .name = i40e_driver_name,
15752 .id_table = i40e_pci_tbl,
15753 .probe = i40e_probe,
15754 .remove = i40e_remove,
15755 .driver = {
15756 .pm = &i40e_pm_ops,
15757 },
15758 .shutdown = i40e_shutdown,
15759 .err_handler = &i40e_err_handler,
15760 .sriov_configure = i40e_pci_sriov_configure,
15761};
15762
15763
15764
15765
15766
15767
15768
15769static int __init i40e_init_module(void)
15770{
15771 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
15772 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
15773
15774
15775
15776
15777
15778
15779
15780
15781 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
15782 if (!i40e_wq) {
15783 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
15784 return -ENOMEM;
15785 }
15786
15787 i40e_dbg_init();
15788 return pci_register_driver(&i40e_driver);
15789}
15790module_init(i40e_init_module);
15791
15792
15793
15794
15795
15796
15797
15798static void __exit i40e_exit_module(void)
15799{
15800 pci_unregister_driver(&i40e_driver);
15801 destroy_workqueue(i40e_wq);
15802 i40e_dbg_exit();
15803}
15804module_exit(i40e_exit_module);
15805