1
2
3
4#include <linux/etherdevice.h>
5#include <linux/of_net.h>
6#include <linux/pci.h>
7#include <linux/bpf.h>
8#include <generated/utsrelease.h>
9#include <linux/crash_dump.h>
10
11
12#include "i40e.h"
13#include "i40e_diag.h"
14#include "i40e_xsk.h"
15#include <net/udp_tunnel.h>
16#include <net/xdp_sock_drv.h>
17
18
19
20
21#define CREATE_TRACE_POINTS
22#include "i40e_trace.h"
23
24const char i40e_driver_name[] = "i40e";
25static const char i40e_driver_string[] =
26 "Intel(R) Ethernet Connection XL710 Network Driver";
27
28static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
29
30
31static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
32static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
33static int i40e_add_vsi(struct i40e_vsi *vsi);
34static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
35static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
36static int i40e_setup_misc_vector(struct i40e_pf *pf);
37static void i40e_determine_queue_usage(struct i40e_pf *pf);
38static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
39static void i40e_prep_for_reset(struct i40e_pf *pf);
40static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
41 bool lock_acquired);
42static int i40e_reset(struct i40e_pf *pf);
43static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
44static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
45static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
46static bool i40e_check_recovery_mode(struct i40e_pf *pf);
47static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
48static void i40e_fdir_sb_setup(struct i40e_pf *pf);
49static int i40e_veb_get_bw_info(struct i40e_veb *veb);
50static int i40e_get_capabilities(struct i40e_pf *pf,
51 enum i40e_admin_queue_opc list_type);
52static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
53
54
55
56
57
58
59
60
61static const struct pci_device_id i40e_pci_tbl[] = {
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
86
87 {0, }
88};
89MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
90
91#define I40E_MAX_VF_COUNT 128
92static int debug = -1;
93module_param(debug, uint, 0);
94MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
95
96MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
97MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
98MODULE_LICENSE("GPL v2");
99
100static struct workqueue_struct *i40e_wq;
101
102
103
104
105
106
107
108
109int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
110 u64 size, u32 alignment)
111{
112 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
113
114 mem->size = ALIGN(size, alignment);
115 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
116 GFP_KERNEL);
117 if (!mem->va)
118 return -ENOMEM;
119
120 return 0;
121}
122
123
124
125
126
127
128int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
129{
130 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
131
132 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
133 mem->va = NULL;
134 mem->pa = 0;
135 mem->size = 0;
136
137 return 0;
138}
139
140
141
142
143
144
145
146int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
147 u32 size)
148{
149 mem->size = size;
150 mem->va = kzalloc(size, GFP_KERNEL);
151
152 if (!mem->va)
153 return -ENOMEM;
154
155 return 0;
156}
157
158
159
160
161
162
163int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
164{
165
166 kfree(mem->va);
167 mem->va = NULL;
168 mem->size = 0;
169
170 return 0;
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
187 u16 needed, u16 id)
188{
189 int ret = -ENOMEM;
190 int i, j;
191
192 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
193 dev_info(&pf->pdev->dev,
194 "param err: pile=%s needed=%d id=0x%04x\n",
195 pile ? "<valid>" : "<null>", needed, id);
196 return -EINVAL;
197 }
198
199
200 i = pile->search_hint;
201 while (i < pile->num_entries) {
202
203 if (pile->list[i] & I40E_PILE_VALID_BIT) {
204 i++;
205 continue;
206 }
207
208
209 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
210 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
211 break;
212 }
213
214 if (j == needed) {
215
216 for (j = 0; j < needed; j++)
217 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
218 ret = i;
219 pile->search_hint = i + j;
220 break;
221 }
222
223
224 i += j;
225 }
226
227 return ret;
228}
229
230
231
232
233
234
235
236
237
238static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
239{
240 int valid_id = (id | I40E_PILE_VALID_BIT);
241 int count = 0;
242 int i;
243
244 if (!pile || index >= pile->num_entries)
245 return -EINVAL;
246
247 for (i = index;
248 i < pile->num_entries && pile->list[i] == valid_id;
249 i++) {
250 pile->list[i] = 0;
251 count++;
252 }
253
254 if (count && index < pile->search_hint)
255 pile->search_hint = index;
256
257 return count;
258}
259
260
261
262
263
264
265struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
266{
267 int i;
268
269 for (i = 0; i < pf->num_alloc_vsi; i++)
270 if (pf->vsi[i] && (pf->vsi[i]->id == id))
271 return pf->vsi[i];
272
273 return NULL;
274}
275
276
277
278
279
280
281
282void i40e_service_event_schedule(struct i40e_pf *pf)
283{
284 if ((!test_bit(__I40E_DOWN, pf->state) &&
285 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
286 test_bit(__I40E_RECOVERY_MODE, pf->state))
287 queue_work(i40e_wq, &pf->service_task);
288}
289
290
291
292
293
294
295
296
297
298
299static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
300{
301 struct i40e_netdev_priv *np = netdev_priv(netdev);
302 struct i40e_vsi *vsi = np->vsi;
303 struct i40e_pf *pf = vsi->back;
304 struct i40e_ring *tx_ring = NULL;
305 unsigned int i;
306 u32 head, val;
307
308 pf->tx_timeout_count++;
309
310
311 for (i = 0; i < vsi->num_queue_pairs; i++) {
312 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
313 if (txqueue ==
314 vsi->tx_rings[i]->queue_index) {
315 tx_ring = vsi->tx_rings[i];
316 break;
317 }
318 }
319 }
320
321 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
322 pf->tx_timeout_recovery_level = 1;
323 else if (time_before(jiffies,
324 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
325 return;
326
327
328 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
329 return;
330
331 if (tx_ring) {
332 head = i40e_get_head(tx_ring);
333
334 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
335 val = rd32(&pf->hw,
336 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
337 tx_ring->vsi->base_vector - 1));
338 else
339 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
340
341 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
342 vsi->seid, txqueue, tx_ring->next_to_clean,
343 head, tx_ring->next_to_use,
344 readl(tx_ring->tail), val);
345 }
346
347 pf->tx_timeout_last_recovery = jiffies;
348 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
349 pf->tx_timeout_recovery_level, txqueue);
350
351 switch (pf->tx_timeout_recovery_level) {
352 case 1:
353 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
354 break;
355 case 2:
356 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
357 break;
358 case 3:
359 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
360 break;
361 default:
362 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
363 break;
364 }
365
366 i40e_service_event_schedule(pf);
367 pf->tx_timeout_recovery_level++;
368}
369
370
371
372
373
374
375
376
377struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
378{
379 return &vsi->net_stats;
380}
381
382
383
384
385
386
387static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
388 struct rtnl_link_stats64 *stats)
389{
390 u64 bytes, packets;
391 unsigned int start;
392
393 do {
394 start = u64_stats_fetch_begin_irq(&ring->syncp);
395 packets = ring->stats.packets;
396 bytes = ring->stats.bytes;
397 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
398
399 stats->tx_packets += packets;
400 stats->tx_bytes += bytes;
401}
402
403
404
405
406
407
408
409
410
411static void i40e_get_netdev_stats_struct(struct net_device *netdev,
412 struct rtnl_link_stats64 *stats)
413{
414 struct i40e_netdev_priv *np = netdev_priv(netdev);
415 struct i40e_vsi *vsi = np->vsi;
416 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
417 struct i40e_ring *ring;
418 int i;
419
420 if (test_bit(__I40E_VSI_DOWN, vsi->state))
421 return;
422
423 if (!vsi->tx_rings)
424 return;
425
426 rcu_read_lock();
427 for (i = 0; i < vsi->num_queue_pairs; i++) {
428 u64 bytes, packets;
429 unsigned int start;
430
431 ring = READ_ONCE(vsi->tx_rings[i]);
432 if (!ring)
433 continue;
434 i40e_get_netdev_stats_struct_tx(ring, stats);
435
436 if (i40e_enabled_xdp_vsi(vsi)) {
437 ring = READ_ONCE(vsi->xdp_rings[i]);
438 if (!ring)
439 continue;
440 i40e_get_netdev_stats_struct_tx(ring, stats);
441 }
442
443 ring = READ_ONCE(vsi->rx_rings[i]);
444 if (!ring)
445 continue;
446 do {
447 start = u64_stats_fetch_begin_irq(&ring->syncp);
448 packets = ring->stats.packets;
449 bytes = ring->stats.bytes;
450 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
451
452 stats->rx_packets += packets;
453 stats->rx_bytes += bytes;
454
455 }
456 rcu_read_unlock();
457
458
459 stats->multicast = vsi_stats->multicast;
460 stats->tx_errors = vsi_stats->tx_errors;
461 stats->tx_dropped = vsi_stats->tx_dropped;
462 stats->rx_errors = vsi_stats->rx_errors;
463 stats->rx_dropped = vsi_stats->rx_dropped;
464 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
465 stats->rx_length_errors = vsi_stats->rx_length_errors;
466}
467
468
469
470
471
472void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
473{
474 struct rtnl_link_stats64 *ns;
475 int i;
476
477 if (!vsi)
478 return;
479
480 ns = i40e_get_vsi_stats_struct(vsi);
481 memset(ns, 0, sizeof(*ns));
482 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
483 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
484 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
485 if (vsi->rx_rings && vsi->rx_rings[0]) {
486 for (i = 0; i < vsi->num_queue_pairs; i++) {
487 memset(&vsi->rx_rings[i]->stats, 0,
488 sizeof(vsi->rx_rings[i]->stats));
489 memset(&vsi->rx_rings[i]->rx_stats, 0,
490 sizeof(vsi->rx_rings[i]->rx_stats));
491 memset(&vsi->tx_rings[i]->stats, 0,
492 sizeof(vsi->tx_rings[i]->stats));
493 memset(&vsi->tx_rings[i]->tx_stats, 0,
494 sizeof(vsi->tx_rings[i]->tx_stats));
495 }
496 }
497 vsi->stat_offsets_loaded = false;
498}
499
500
501
502
503
504void i40e_pf_reset_stats(struct i40e_pf *pf)
505{
506 int i;
507
508 memset(&pf->stats, 0, sizeof(pf->stats));
509 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
510 pf->stat_offsets_loaded = false;
511
512 for (i = 0; i < I40E_MAX_VEB; i++) {
513 if (pf->veb[i]) {
514 memset(&pf->veb[i]->stats, 0,
515 sizeof(pf->veb[i]->stats));
516 memset(&pf->veb[i]->stats_offsets, 0,
517 sizeof(pf->veb[i]->stats_offsets));
518 memset(&pf->veb[i]->tc_stats, 0,
519 sizeof(pf->veb[i]->tc_stats));
520 memset(&pf->veb[i]->tc_stats_offsets, 0,
521 sizeof(pf->veb[i]->tc_stats_offsets));
522 pf->veb[i]->stat_offsets_loaded = false;
523 }
524 }
525 pf->hw_csum_rx_error = 0;
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
544 bool offset_loaded, u64 *offset, u64 *stat)
545{
546 u64 new_data;
547
548 if (hw->device_id == I40E_DEV_ID_QEMU) {
549 new_data = rd32(hw, loreg);
550 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
551 } else {
552 new_data = rd64(hw, loreg);
553 }
554 if (!offset_loaded)
555 *offset = new_data;
556 if (likely(new_data >= *offset))
557 *stat = new_data - *offset;
558 else
559 *stat = (new_data + BIT_ULL(48)) - *offset;
560 *stat &= 0xFFFFFFFFFFFFULL;
561}
562
563
564
565
566
567
568
569
570
571static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
572 bool offset_loaded, u64 *offset, u64 *stat)
573{
574 u32 new_data;
575
576 new_data = rd32(hw, reg);
577 if (!offset_loaded)
578 *offset = new_data;
579 if (likely(new_data >= *offset))
580 *stat = (u32)(new_data - *offset);
581 else
582 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
583}
584
585
586
587
588
589
590
591static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
592{
593 u32 new_data = rd32(hw, reg);
594
595 wr32(hw, reg, 1);
596 *stat += new_data;
597}
598
599
600
601
602
603void i40e_update_eth_stats(struct i40e_vsi *vsi)
604{
605 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
606 struct i40e_pf *pf = vsi->back;
607 struct i40e_hw *hw = &pf->hw;
608 struct i40e_eth_stats *oes;
609 struct i40e_eth_stats *es;
610
611 es = &vsi->eth_stats;
612 oes = &vsi->eth_stats_offsets;
613
614
615 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
616 vsi->stat_offsets_loaded,
617 &oes->tx_errors, &es->tx_errors);
618 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
619 vsi->stat_offsets_loaded,
620 &oes->rx_discards, &es->rx_discards);
621 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
622 vsi->stat_offsets_loaded,
623 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
624
625 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
626 I40E_GLV_GORCL(stat_idx),
627 vsi->stat_offsets_loaded,
628 &oes->rx_bytes, &es->rx_bytes);
629 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
630 I40E_GLV_UPRCL(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->rx_unicast, &es->rx_unicast);
633 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
634 I40E_GLV_MPRCL(stat_idx),
635 vsi->stat_offsets_loaded,
636 &oes->rx_multicast, &es->rx_multicast);
637 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
638 I40E_GLV_BPRCL(stat_idx),
639 vsi->stat_offsets_loaded,
640 &oes->rx_broadcast, &es->rx_broadcast);
641
642 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
643 I40E_GLV_GOTCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->tx_bytes, &es->tx_bytes);
646 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
647 I40E_GLV_UPTCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->tx_unicast, &es->tx_unicast);
650 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
651 I40E_GLV_MPTCL(stat_idx),
652 vsi->stat_offsets_loaded,
653 &oes->tx_multicast, &es->tx_multicast);
654 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
655 I40E_GLV_BPTCL(stat_idx),
656 vsi->stat_offsets_loaded,
657 &oes->tx_broadcast, &es->tx_broadcast);
658 vsi->stat_offsets_loaded = true;
659}
660
661
662
663
664
665void i40e_update_veb_stats(struct i40e_veb *veb)
666{
667 struct i40e_pf *pf = veb->pf;
668 struct i40e_hw *hw = &pf->hw;
669 struct i40e_eth_stats *oes;
670 struct i40e_eth_stats *es;
671 struct i40e_veb_tc_stats *veb_oes;
672 struct i40e_veb_tc_stats *veb_es;
673 int i, idx = 0;
674
675 idx = veb->stats_idx;
676 es = &veb->stats;
677 oes = &veb->stats_offsets;
678 veb_es = &veb->tc_stats;
679 veb_oes = &veb->tc_stats_offsets;
680
681
682 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
683 veb->stat_offsets_loaded,
684 &oes->tx_discards, &es->tx_discards);
685 if (hw->revision_id > 0)
686 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
687 veb->stat_offsets_loaded,
688 &oes->rx_unknown_protocol,
689 &es->rx_unknown_protocol);
690 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
691 veb->stat_offsets_loaded,
692 &oes->rx_bytes, &es->rx_bytes);
693 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
694 veb->stat_offsets_loaded,
695 &oes->rx_unicast, &es->rx_unicast);
696 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
697 veb->stat_offsets_loaded,
698 &oes->rx_multicast, &es->rx_multicast);
699 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_broadcast, &es->rx_broadcast);
702
703 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
704 veb->stat_offsets_loaded,
705 &oes->tx_bytes, &es->tx_bytes);
706 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
707 veb->stat_offsets_loaded,
708 &oes->tx_unicast, &es->tx_unicast);
709 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
710 veb->stat_offsets_loaded,
711 &oes->tx_multicast, &es->tx_multicast);
712 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->tx_broadcast, &es->tx_broadcast);
715 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
716 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
717 I40E_GLVEBTC_RPCL(i, idx),
718 veb->stat_offsets_loaded,
719 &veb_oes->tc_rx_packets[i],
720 &veb_es->tc_rx_packets[i]);
721 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
722 I40E_GLVEBTC_RBCL(i, idx),
723 veb->stat_offsets_loaded,
724 &veb_oes->tc_rx_bytes[i],
725 &veb_es->tc_rx_bytes[i]);
726 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
727 I40E_GLVEBTC_TPCL(i, idx),
728 veb->stat_offsets_loaded,
729 &veb_oes->tc_tx_packets[i],
730 &veb_es->tc_tx_packets[i]);
731 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
732 I40E_GLVEBTC_TBCL(i, idx),
733 veb->stat_offsets_loaded,
734 &veb_oes->tc_tx_bytes[i],
735 &veb_es->tc_tx_bytes[i]);
736 }
737 veb->stat_offsets_loaded = true;
738}
739
740
741
742
743
744
745
746
747
748
749
750static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
751{
752 struct i40e_pf *pf = vsi->back;
753 struct rtnl_link_stats64 *ons;
754 struct rtnl_link_stats64 *ns;
755 struct i40e_eth_stats *oes;
756 struct i40e_eth_stats *es;
757 u32 tx_restart, tx_busy;
758 struct i40e_ring *p;
759 u32 rx_page, rx_buf;
760 u64 bytes, packets;
761 unsigned int start;
762 u64 tx_linearize;
763 u64 tx_force_wb;
764 u64 rx_p, rx_b;
765 u64 tx_p, tx_b;
766 u16 q;
767
768 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
769 test_bit(__I40E_CONFIG_BUSY, pf->state))
770 return;
771
772 ns = i40e_get_vsi_stats_struct(vsi);
773 ons = &vsi->net_stats_offsets;
774 es = &vsi->eth_stats;
775 oes = &vsi->eth_stats_offsets;
776
777
778
779
780 rx_b = rx_p = 0;
781 tx_b = tx_p = 0;
782 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
783 rx_page = 0;
784 rx_buf = 0;
785 rcu_read_lock();
786 for (q = 0; q < vsi->num_queue_pairs; q++) {
787
788 p = READ_ONCE(vsi->tx_rings[q]);
789 if (!p)
790 continue;
791
792 do {
793 start = u64_stats_fetch_begin_irq(&p->syncp);
794 packets = p->stats.packets;
795 bytes = p->stats.bytes;
796 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
797 tx_b += bytes;
798 tx_p += packets;
799 tx_restart += p->tx_stats.restart_queue;
800 tx_busy += p->tx_stats.tx_busy;
801 tx_linearize += p->tx_stats.tx_linearize;
802 tx_force_wb += p->tx_stats.tx_force_wb;
803
804
805 p = READ_ONCE(vsi->rx_rings[q]);
806 if (!p)
807 continue;
808
809 do {
810 start = u64_stats_fetch_begin_irq(&p->syncp);
811 packets = p->stats.packets;
812 bytes = p->stats.bytes;
813 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
814 rx_b += bytes;
815 rx_p += packets;
816 rx_buf += p->rx_stats.alloc_buff_failed;
817 rx_page += p->rx_stats.alloc_page_failed;
818
819 if (i40e_enabled_xdp_vsi(vsi)) {
820
821 p = READ_ONCE(vsi->xdp_rings[q]);
822 if (!p)
823 continue;
824
825 do {
826 start = u64_stats_fetch_begin_irq(&p->syncp);
827 packets = p->stats.packets;
828 bytes = p->stats.bytes;
829 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
830 tx_b += bytes;
831 tx_p += packets;
832 tx_restart += p->tx_stats.restart_queue;
833 tx_busy += p->tx_stats.tx_busy;
834 tx_linearize += p->tx_stats.tx_linearize;
835 tx_force_wb += p->tx_stats.tx_force_wb;
836 }
837 }
838 rcu_read_unlock();
839 vsi->tx_restart = tx_restart;
840 vsi->tx_busy = tx_busy;
841 vsi->tx_linearize = tx_linearize;
842 vsi->tx_force_wb = tx_force_wb;
843 vsi->rx_page_failed = rx_page;
844 vsi->rx_buf_failed = rx_buf;
845
846 ns->rx_packets = rx_p;
847 ns->rx_bytes = rx_b;
848 ns->tx_packets = tx_p;
849 ns->tx_bytes = tx_b;
850
851
852 i40e_update_eth_stats(vsi);
853 ons->tx_errors = oes->tx_errors;
854 ns->tx_errors = es->tx_errors;
855 ons->multicast = oes->rx_multicast;
856 ns->multicast = es->rx_multicast;
857 ons->rx_dropped = oes->rx_discards;
858 ns->rx_dropped = es->rx_discards;
859 ons->tx_dropped = oes->tx_discards;
860 ns->tx_dropped = es->tx_discards;
861
862
863 if (vsi == pf->vsi[pf->lan_vsi]) {
864 ns->rx_crc_errors = pf->stats.crc_errors;
865 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
866 ns->rx_length_errors = pf->stats.rx_length_errors;
867 }
868}
869
870
871
872
873
874static void i40e_update_pf_stats(struct i40e_pf *pf)
875{
876 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
877 struct i40e_hw_port_stats *nsd = &pf->stats;
878 struct i40e_hw *hw = &pf->hw;
879 u32 val;
880 int i;
881
882 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
883 I40E_GLPRT_GORCL(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
886 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
887 I40E_GLPRT_GOTCL(hw->port),
888 pf->stat_offsets_loaded,
889 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
890 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
891 pf->stat_offsets_loaded,
892 &osd->eth.rx_discards,
893 &nsd->eth.rx_discards);
894 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
895 I40E_GLPRT_UPRCL(hw->port),
896 pf->stat_offsets_loaded,
897 &osd->eth.rx_unicast,
898 &nsd->eth.rx_unicast);
899 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
900 I40E_GLPRT_MPRCL(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->eth.rx_multicast,
903 &nsd->eth.rx_multicast);
904 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
905 I40E_GLPRT_BPRCL(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->eth.rx_broadcast,
908 &nsd->eth.rx_broadcast);
909 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
910 I40E_GLPRT_UPTCL(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->eth.tx_unicast,
913 &nsd->eth.tx_unicast);
914 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
915 I40E_GLPRT_MPTCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.tx_multicast,
918 &nsd->eth.tx_multicast);
919 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
920 I40E_GLPRT_BPTCL(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->eth.tx_broadcast,
923 &nsd->eth.tx_broadcast);
924
925 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
926 pf->stat_offsets_loaded,
927 &osd->tx_dropped_link_down,
928 &nsd->tx_dropped_link_down);
929
930 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->crc_errors, &nsd->crc_errors);
933
934 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->illegal_bytes, &nsd->illegal_bytes);
937
938 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->mac_local_faults,
941 &nsd->mac_local_faults);
942 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
943 pf->stat_offsets_loaded,
944 &osd->mac_remote_faults,
945 &nsd->mac_remote_faults);
946
947 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
948 pf->stat_offsets_loaded,
949 &osd->rx_length_errors,
950 &nsd->rx_length_errors);
951
952 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->link_xon_rx, &nsd->link_xon_rx);
955 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
956 pf->stat_offsets_loaded,
957 &osd->link_xon_tx, &nsd->link_xon_tx);
958 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->link_xoff_rx, &nsd->link_xoff_rx);
961 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->link_xoff_tx, &nsd->link_xoff_tx);
964
965 for (i = 0; i < 8; i++) {
966 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
967 pf->stat_offsets_loaded,
968 &osd->priority_xoff_rx[i],
969 &nsd->priority_xoff_rx[i]);
970 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
971 pf->stat_offsets_loaded,
972 &osd->priority_xon_rx[i],
973 &nsd->priority_xon_rx[i]);
974 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
975 pf->stat_offsets_loaded,
976 &osd->priority_xon_tx[i],
977 &nsd->priority_xon_tx[i]);
978 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
979 pf->stat_offsets_loaded,
980 &osd->priority_xoff_tx[i],
981 &nsd->priority_xoff_tx[i]);
982 i40e_stat_update32(hw,
983 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
984 pf->stat_offsets_loaded,
985 &osd->priority_xon_2_xoff[i],
986 &nsd->priority_xon_2_xoff[i]);
987 }
988
989 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
990 I40E_GLPRT_PRC64L(hw->port),
991 pf->stat_offsets_loaded,
992 &osd->rx_size_64, &nsd->rx_size_64);
993 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
994 I40E_GLPRT_PRC127L(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_size_127, &nsd->rx_size_127);
997 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
998 I40E_GLPRT_PRC255L(hw->port),
999 pf->stat_offsets_loaded,
1000 &osd->rx_size_255, &nsd->rx_size_255);
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1002 I40E_GLPRT_PRC511L(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->rx_size_511, &nsd->rx_size_511);
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1006 I40E_GLPRT_PRC1023L(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_size_1023, &nsd->rx_size_1023);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1010 I40E_GLPRT_PRC1522L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_1522, &nsd->rx_size_1522);
1013 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1014 I40E_GLPRT_PRC9522L(hw->port),
1015 pf->stat_offsets_loaded,
1016 &osd->rx_size_big, &nsd->rx_size_big);
1017
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1019 I40E_GLPRT_PTC64L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->tx_size_64, &nsd->tx_size_64);
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1023 I40E_GLPRT_PTC127L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->tx_size_127, &nsd->tx_size_127);
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1027 I40E_GLPRT_PTC255L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->tx_size_255, &nsd->tx_size_255);
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1031 I40E_GLPRT_PTC511L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->tx_size_511, &nsd->tx_size_511);
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1035 I40E_GLPRT_PTC1023L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->tx_size_1023, &nsd->tx_size_1023);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1039 I40E_GLPRT_PTC1522L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_1522, &nsd->tx_size_1522);
1042 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1043 I40E_GLPRT_PTC9522L(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->tx_size_big, &nsd->tx_size_big);
1046
1047 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->rx_undersize, &nsd->rx_undersize);
1050 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1051 pf->stat_offsets_loaded,
1052 &osd->rx_fragments, &nsd->rx_fragments);
1053 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1054 pf->stat_offsets_loaded,
1055 &osd->rx_oversize, &nsd->rx_oversize);
1056 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1057 pf->stat_offsets_loaded,
1058 &osd->rx_jabber, &nsd->rx_jabber);
1059
1060
1061 i40e_stat_update_and_clear32(hw,
1062 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1063 &nsd->fd_atr_match);
1064 i40e_stat_update_and_clear32(hw,
1065 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1066 &nsd->fd_sb_match);
1067 i40e_stat_update_and_clear32(hw,
1068 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1069 &nsd->fd_atr_tunnel_match);
1070
1071 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1072 nsd->tx_lpi_status =
1073 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1074 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1075 nsd->rx_lpi_status =
1076 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1077 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1078 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1079 pf->stat_offsets_loaded,
1080 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1081 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1082 pf->stat_offsets_loaded,
1083 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1084
1085 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1086 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1087 nsd->fd_sb_status = true;
1088 else
1089 nsd->fd_sb_status = false;
1090
1091 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1092 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1093 nsd->fd_atr_status = true;
1094 else
1095 nsd->fd_atr_status = false;
1096
1097 pf->stat_offsets_loaded = true;
1098}
1099
1100
1101
1102
1103
1104
1105
1106void i40e_update_stats(struct i40e_vsi *vsi)
1107{
1108 struct i40e_pf *pf = vsi->back;
1109
1110 if (vsi == pf->vsi[pf->lan_vsi])
1111 i40e_update_pf_stats(pf);
1112
1113 i40e_update_vsi_stats(vsi);
1114}
1115
1116
1117
1118
1119
1120
1121
1122int i40e_count_filters(struct i40e_vsi *vsi)
1123{
1124 struct i40e_mac_filter *f;
1125 struct hlist_node *h;
1126 int bkt;
1127 int cnt = 0;
1128
1129 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1130 ++cnt;
1131
1132 return cnt;
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1144 const u8 *macaddr, s16 vlan)
1145{
1146 struct i40e_mac_filter *f;
1147 u64 key;
1148
1149 if (!vsi || !macaddr)
1150 return NULL;
1151
1152 key = i40e_addr_to_hkey(macaddr);
1153 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1154 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1155 (vlan == f->vlan))
1156 return f;
1157 }
1158 return NULL;
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1170{
1171 struct i40e_mac_filter *f;
1172 u64 key;
1173
1174 if (!vsi || !macaddr)
1175 return NULL;
1176
1177 key = i40e_addr_to_hkey(macaddr);
1178 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1179 if ((ether_addr_equal(macaddr, f->macaddr)))
1180 return f;
1181 }
1182 return NULL;
1183}
1184
1185
1186
1187
1188
1189
1190
1191bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1192{
1193
1194 if (vsi->info.pvid)
1195 return true;
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217 return vsi->has_vlan_filter;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1250 struct hlist_head *tmp_add_list,
1251 struct hlist_head *tmp_del_list,
1252 int vlan_filters)
1253{
1254 s16 pvid = le16_to_cpu(vsi->info.pvid);
1255 struct i40e_mac_filter *f, *add_head;
1256 struct i40e_new_mac_filter *new;
1257 struct hlist_node *h;
1258 int bkt, new_vlan;
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 hlist_for_each_entry(new, tmp_add_list, hlist) {
1276 if (pvid && new->f->vlan != pvid)
1277 new->f->vlan = pvid;
1278 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1279 new->f->vlan = 0;
1280 else if (!vlan_filters && new->f->vlan == 0)
1281 new->f->vlan = I40E_VLAN_ANY;
1282 }
1283
1284
1285 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1286
1287
1288
1289
1290
1291 if ((pvid && f->vlan != pvid) ||
1292 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1293 (!vlan_filters && f->vlan == 0)) {
1294
1295 if (pvid)
1296 new_vlan = pvid;
1297 else if (vlan_filters)
1298 new_vlan = 0;
1299 else
1300 new_vlan = I40E_VLAN_ANY;
1301
1302
1303 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1304 if (!add_head)
1305 return -ENOMEM;
1306
1307
1308 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1309 if (!new)
1310 return -ENOMEM;
1311
1312 new->f = add_head;
1313 new->state = add_head->state;
1314
1315
1316 hlist_add_head(&new->hlist, tmp_add_list);
1317
1318
1319 f->state = I40E_FILTER_REMOVE;
1320 hash_del(&f->hlist);
1321 hlist_add_head(&f->hlist, tmp_del_list);
1322 }
1323 }
1324
1325 vsi->has_vlan_filter = !!vlan_filters;
1326
1327 return 0;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1339{
1340 struct i40e_aqc_remove_macvlan_element_data element;
1341 struct i40e_pf *pf = vsi->back;
1342
1343
1344 if (vsi->type != I40E_VSI_MAIN)
1345 return;
1346
1347 memset(&element, 0, sizeof(element));
1348 ether_addr_copy(element.mac_addr, macaddr);
1349 element.vlan_tag = 0;
1350
1351 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1352 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1353
1354 memset(&element, 0, sizeof(element));
1355 ether_addr_copy(element.mac_addr, macaddr);
1356 element.vlan_tag = 0;
1357
1358 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1359 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1360 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1361}
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1375 const u8 *macaddr, s16 vlan)
1376{
1377 struct i40e_mac_filter *f;
1378 u64 key;
1379
1380 if (!vsi || !macaddr)
1381 return NULL;
1382
1383 f = i40e_find_filter(vsi, macaddr, vlan);
1384 if (!f) {
1385 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1386 if (!f)
1387 return NULL;
1388
1389
1390
1391
1392 if (vlan >= 0)
1393 vsi->has_vlan_filter = true;
1394
1395 ether_addr_copy(f->macaddr, macaddr);
1396 f->vlan = vlan;
1397 f->state = I40E_FILTER_NEW;
1398 INIT_HLIST_NODE(&f->hlist);
1399
1400 key = i40e_addr_to_hkey(macaddr);
1401 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1402
1403 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1404 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415 if (f->state == I40E_FILTER_REMOVE)
1416 f->state = I40E_FILTER_ACTIVE;
1417
1418 return f;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1437{
1438 if (!f)
1439 return;
1440
1441
1442
1443
1444
1445 if ((f->state == I40E_FILTER_FAILED) ||
1446 (f->state == I40E_FILTER_NEW)) {
1447 hash_del(&f->hlist);
1448 kfree(f);
1449 } else {
1450 f->state = I40E_FILTER_REMOVE;
1451 }
1452
1453 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1454 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1470{
1471 struct i40e_mac_filter *f;
1472
1473 if (!vsi || !macaddr)
1474 return;
1475
1476 f = i40e_find_filter(vsi, macaddr, vlan);
1477 __i40e_del_filter(vsi, f);
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1493 const u8 *macaddr)
1494{
1495 struct i40e_mac_filter *f, *add = NULL;
1496 struct hlist_node *h;
1497 int bkt;
1498
1499 if (vsi->info.pvid)
1500 return i40e_add_filter(vsi, macaddr,
1501 le16_to_cpu(vsi->info.pvid));
1502
1503 if (!i40e_is_vsi_in_vlan(vsi))
1504 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1505
1506 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1507 if (f->state == I40E_FILTER_REMOVE)
1508 continue;
1509 add = i40e_add_filter(vsi, macaddr, f->vlan);
1510 if (!add)
1511 return NULL;
1512 }
1513
1514 return add;
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1528{
1529 struct i40e_mac_filter *f;
1530 struct hlist_node *h;
1531 bool found = false;
1532 int bkt;
1533
1534 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1535 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1536 if (ether_addr_equal(macaddr, f->macaddr)) {
1537 __i40e_del_filter(vsi, f);
1538 found = true;
1539 }
1540 }
1541
1542 if (found)
1543 return 0;
1544 else
1545 return -ENOENT;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555static int i40e_set_mac(struct net_device *netdev, void *p)
1556{
1557 struct i40e_netdev_priv *np = netdev_priv(netdev);
1558 struct i40e_vsi *vsi = np->vsi;
1559 struct i40e_pf *pf = vsi->back;
1560 struct i40e_hw *hw = &pf->hw;
1561 struct sockaddr *addr = p;
1562
1563 if (!is_valid_ether_addr(addr->sa_data))
1564 return -EADDRNOTAVAIL;
1565
1566 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1567 netdev_info(netdev, "already using mac address %pM\n",
1568 addr->sa_data);
1569 return 0;
1570 }
1571
1572 if (test_bit(__I40E_DOWN, pf->state) ||
1573 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1574 return -EADDRNOTAVAIL;
1575
1576 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1577 netdev_info(netdev, "returning to hw mac address %pM\n",
1578 hw->mac.addr);
1579 else
1580 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1581
1582
1583
1584
1585
1586
1587
1588 spin_lock_bh(&vsi->mac_filter_hash_lock);
1589 i40e_del_mac_filter(vsi, netdev->dev_addr);
1590 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1591 i40e_add_mac_filter(vsi, netdev->dev_addr);
1592 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1593
1594 if (vsi->type == I40E_VSI_MAIN) {
1595 i40e_status ret;
1596
1597 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1598 addr->sa_data, NULL);
1599 if (ret)
1600 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1601 i40e_stat_str(hw, ret),
1602 i40e_aq_str(hw, hw->aq.asq_last_status));
1603 }
1604
1605
1606
1607
1608 i40e_service_event_schedule(pf);
1609 return 0;
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1620 u8 *lut, u16 lut_size)
1621{
1622 struct i40e_pf *pf = vsi->back;
1623 struct i40e_hw *hw = &pf->hw;
1624 int ret = 0;
1625
1626 if (seed) {
1627 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1628 (struct i40e_aqc_get_set_rss_key_data *)seed;
1629 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1630 if (ret) {
1631 dev_info(&pf->pdev->dev,
1632 "Cannot set RSS key, err %s aq_err %s\n",
1633 i40e_stat_str(hw, ret),
1634 i40e_aq_str(hw, hw->aq.asq_last_status));
1635 return ret;
1636 }
1637 }
1638 if (lut) {
1639 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1640
1641 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1642 if (ret) {
1643 dev_info(&pf->pdev->dev,
1644 "Cannot set RSS lut, err %s aq_err %s\n",
1645 i40e_stat_str(hw, ret),
1646 i40e_aq_str(hw, hw->aq.asq_last_status));
1647 return ret;
1648 }
1649 }
1650 return ret;
1651}
1652
1653
1654
1655
1656
1657static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1658{
1659 struct i40e_pf *pf = vsi->back;
1660 u8 seed[I40E_HKEY_ARRAY_SIZE];
1661 u8 *lut;
1662 int ret;
1663
1664 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1665 return 0;
1666 if (!vsi->rss_size)
1667 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1668 vsi->num_queue_pairs);
1669 if (!vsi->rss_size)
1670 return -EINVAL;
1671 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1672 if (!lut)
1673 return -ENOMEM;
1674
1675
1676
1677
1678 if (vsi->rss_lut_user)
1679 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1680 else
1681 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1682 if (vsi->rss_hkey_user)
1683 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1684 else
1685 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1686 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1687 kfree(lut);
1688 return ret;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1700 struct i40e_vsi_context *ctxt,
1701 u8 enabled_tc)
1702{
1703 u16 qcount = 0, max_qcount, qmap, sections = 0;
1704 int i, override_q, pow, num_qps, ret;
1705 u8 netdev_tc = 0, offset = 0;
1706
1707 if (vsi->type != I40E_VSI_MAIN)
1708 return -EINVAL;
1709 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1710 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1711 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1712 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1713 num_qps = vsi->mqprio_qopt.qopt.count[0];
1714
1715
1716 pow = ilog2(num_qps);
1717 if (!is_power_of_2(num_qps))
1718 pow++;
1719 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1720 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1721
1722
1723 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1724 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1725
1726 if (vsi->tc_config.enabled_tc & BIT(i)) {
1727 offset = vsi->mqprio_qopt.qopt.offset[i];
1728 qcount = vsi->mqprio_qopt.qopt.count[i];
1729 if (qcount > max_qcount)
1730 max_qcount = qcount;
1731 vsi->tc_config.tc_info[i].qoffset = offset;
1732 vsi->tc_config.tc_info[i].qcount = qcount;
1733 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1734 } else {
1735
1736
1737
1738
1739 vsi->tc_config.tc_info[i].qoffset = 0;
1740 vsi->tc_config.tc_info[i].qcount = 1;
1741 vsi->tc_config.tc_info[i].netdev_tc = 0;
1742 }
1743 }
1744
1745
1746 vsi->num_queue_pairs = offset + qcount;
1747
1748
1749 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1750 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1751 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1752 ctxt->info.valid_sections |= cpu_to_le16(sections);
1753
1754
1755 vsi->rss_size = max_qcount;
1756 ret = i40e_vsi_config_rss(vsi);
1757 if (ret) {
1758 dev_info(&vsi->back->pdev->dev,
1759 "Failed to reconfig rss for num_queues (%u)\n",
1760 max_qcount);
1761 return ret;
1762 }
1763 vsi->reconfig_rss = true;
1764 dev_dbg(&vsi->back->pdev->dev,
1765 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1766
1767
1768
1769
1770 override_q = vsi->mqprio_qopt.qopt.count[0];
1771 if (override_q && override_q < vsi->num_queue_pairs) {
1772 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1773 vsi->next_base_queue = override_q;
1774 }
1775 return 0;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1788 struct i40e_vsi_context *ctxt,
1789 u8 enabled_tc,
1790 bool is_add)
1791{
1792 struct i40e_pf *pf = vsi->back;
1793 u16 sections = 0;
1794 u8 netdev_tc = 0;
1795 u16 numtc = 1;
1796 u16 qcount;
1797 u8 offset;
1798 u16 qmap;
1799 int i;
1800 u16 num_tc_qps = 0;
1801
1802 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1803 offset = 0;
1804
1805
1806 num_tc_qps = vsi->alloc_queue_pairs;
1807 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1808
1809 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1810 if (enabled_tc & BIT(i))
1811 numtc++;
1812 }
1813 if (!numtc) {
1814 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1815 numtc = 1;
1816 }
1817 num_tc_qps = num_tc_qps / numtc;
1818 num_tc_qps = min_t(int, num_tc_qps,
1819 i40e_pf_get_max_q_per_tc(pf));
1820 }
1821
1822 vsi->tc_config.numtc = numtc;
1823 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1824
1825
1826 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1827 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1828
1829
1830 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1831
1832 if (vsi->tc_config.enabled_tc & BIT(i)) {
1833
1834 int pow, num_qps;
1835
1836 switch (vsi->type) {
1837 case I40E_VSI_MAIN:
1838 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1839 I40E_FLAG_FD_ATR_ENABLED)) ||
1840 vsi->tc_config.enabled_tc != 1) {
1841 qcount = min_t(int, pf->alloc_rss_size,
1842 num_tc_qps);
1843 break;
1844 }
1845 fallthrough;
1846 case I40E_VSI_FDIR:
1847 case I40E_VSI_SRIOV:
1848 case I40E_VSI_VMDQ2:
1849 default:
1850 qcount = num_tc_qps;
1851 WARN_ON(i != 0);
1852 break;
1853 }
1854 vsi->tc_config.tc_info[i].qoffset = offset;
1855 vsi->tc_config.tc_info[i].qcount = qcount;
1856
1857
1858 num_qps = qcount;
1859 pow = 0;
1860 while (num_qps && (BIT_ULL(pow) < qcount)) {
1861 pow++;
1862 num_qps >>= 1;
1863 }
1864
1865 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1866 qmap =
1867 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1868 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1869
1870 offset += qcount;
1871 } else {
1872
1873
1874
1875
1876 vsi->tc_config.tc_info[i].qoffset = 0;
1877 vsi->tc_config.tc_info[i].qcount = 1;
1878 vsi->tc_config.tc_info[i].netdev_tc = 0;
1879
1880 qmap = 0;
1881 }
1882 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1883 }
1884
1885
1886 vsi->num_queue_pairs = offset;
1887 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1888 if (vsi->req_queue_pairs > 0)
1889 vsi->num_queue_pairs = vsi->req_queue_pairs;
1890 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1891 vsi->num_queue_pairs = pf->num_lan_msix;
1892 }
1893
1894
1895 if (is_add) {
1896 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1897
1898 ctxt->info.up_enable_bits = enabled_tc;
1899 }
1900 if (vsi->type == I40E_VSI_SRIOV) {
1901 ctxt->info.mapping_flags |=
1902 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1903 for (i = 0; i < vsi->num_queue_pairs; i++)
1904 ctxt->info.queue_mapping[i] =
1905 cpu_to_le16(vsi->base_queue + i);
1906 } else {
1907 ctxt->info.mapping_flags |=
1908 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1909 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1910 }
1911 ctxt->info.valid_sections |= cpu_to_le16(sections);
1912}
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1923{
1924 struct i40e_netdev_priv *np = netdev_priv(netdev);
1925 struct i40e_vsi *vsi = np->vsi;
1926
1927 if (i40e_add_mac_filter(vsi, addr))
1928 return 0;
1929 else
1930 return -ENOMEM;
1931}
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1942{
1943 struct i40e_netdev_priv *np = netdev_priv(netdev);
1944 struct i40e_vsi *vsi = np->vsi;
1945
1946
1947
1948
1949
1950
1951 if (ether_addr_equal(addr, netdev->dev_addr))
1952 return 0;
1953
1954 i40e_del_mac_filter(vsi, addr);
1955
1956 return 0;
1957}
1958
1959
1960
1961
1962
1963static void i40e_set_rx_mode(struct net_device *netdev)
1964{
1965 struct i40e_netdev_priv *np = netdev_priv(netdev);
1966 struct i40e_vsi *vsi = np->vsi;
1967
1968 spin_lock_bh(&vsi->mac_filter_hash_lock);
1969
1970 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1971 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1972
1973 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1974
1975
1976 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1977 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1978 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1979 }
1980}
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1991 struct hlist_head *from)
1992{
1993 struct i40e_mac_filter *f;
1994 struct hlist_node *h;
1995
1996 hlist_for_each_entry_safe(f, h, from, hlist) {
1997 u64 key = i40e_addr_to_hkey(f->macaddr);
1998
1999
2000 hlist_del(&f->hlist);
2001 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2002 }
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2014 struct hlist_head *from)
2015{
2016 struct i40e_new_mac_filter *new;
2017 struct hlist_node *h;
2018
2019 hlist_for_each_entry_safe(new, h, from, hlist) {
2020
2021 hlist_del(&new->hlist);
2022 kfree(new);
2023 }
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034static
2035struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2036{
2037 hlist_for_each_entry_continue(next, hlist) {
2038 if (!is_broadcast_ether_addr(next->f->macaddr))
2039 return next;
2040 }
2041
2042 return NULL;
2043}
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055static int
2056i40e_update_filter_state(int count,
2057 struct i40e_aqc_add_macvlan_element_data *add_list,
2058 struct i40e_new_mac_filter *add_head)
2059{
2060 int retval = 0;
2061 int i;
2062
2063 for (i = 0; i < count; i++) {
2064
2065
2066
2067
2068
2069
2070 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2071 add_head->state = I40E_FILTER_FAILED;
2072 } else {
2073 add_head->state = I40E_FILTER_ACTIVE;
2074 retval++;
2075 }
2076
2077 add_head = i40e_next_filter(add_head);
2078 if (!add_head)
2079 break;
2080 }
2081
2082 return retval;
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098static
2099void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2100 struct i40e_aqc_remove_macvlan_element_data *list,
2101 int num_del, int *retval)
2102{
2103 struct i40e_hw *hw = &vsi->back->hw;
2104 i40e_status aq_ret;
2105 int aq_err;
2106
2107 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2108 aq_err = hw->aq.asq_last_status;
2109
2110
2111 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2112 *retval = -EIO;
2113 dev_info(&vsi->back->pdev->dev,
2114 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2115 vsi_name, i40e_stat_str(hw, aq_ret),
2116 i40e_aq_str(hw, aq_err));
2117 }
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132static
2133void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2134 struct i40e_aqc_add_macvlan_element_data *list,
2135 struct i40e_new_mac_filter *add_head,
2136 int num_add)
2137{
2138 struct i40e_hw *hw = &vsi->back->hw;
2139 int aq_err, fcnt;
2140
2141 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2142 aq_err = hw->aq.asq_last_status;
2143 fcnt = i40e_update_filter_state(num_add, list, add_head);
2144
2145 if (fcnt != num_add) {
2146 if (vsi->type == I40E_VSI_MAIN) {
2147 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2148 dev_warn(&vsi->back->pdev->dev,
2149 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2150 i40e_aq_str(hw, aq_err), vsi_name);
2151 } else if (vsi->type == I40E_VSI_SRIOV ||
2152 vsi->type == I40E_VSI_VMDQ1 ||
2153 vsi->type == I40E_VSI_VMDQ2) {
2154 dev_warn(&vsi->back->pdev->dev,
2155 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2156 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2157 } else {
2158 dev_warn(&vsi->back->pdev->dev,
2159 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2160 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2161 }
2162 }
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177static i40e_status
2178i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2179 struct i40e_mac_filter *f)
2180{
2181 bool enable = f->state == I40E_FILTER_NEW;
2182 struct i40e_hw *hw = &vsi->back->hw;
2183 i40e_status aq_ret;
2184
2185 if (f->vlan == I40E_VLAN_ANY) {
2186 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2187 vsi->seid,
2188 enable,
2189 NULL);
2190 } else {
2191 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2192 vsi->seid,
2193 enable,
2194 f->vlan,
2195 NULL);
2196 }
2197
2198 if (aq_ret) {
2199 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2200 dev_warn(&vsi->back->pdev->dev,
2201 "Error %s, forcing overflow promiscuous on %s\n",
2202 i40e_aq_str(hw, hw->aq.asq_last_status),
2203 vsi_name);
2204 }
2205
2206 return aq_ret;
2207}
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2219{
2220 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2221 struct i40e_hw *hw = &pf->hw;
2222 i40e_status aq_ret;
2223
2224 if (vsi->type == I40E_VSI_MAIN &&
2225 pf->lan_veb != I40E_NO_VEB &&
2226 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2227
2228
2229
2230
2231
2232 if (promisc)
2233 aq_ret = i40e_aq_set_default_vsi(hw,
2234 vsi->seid,
2235 NULL);
2236 else
2237 aq_ret = i40e_aq_clear_default_vsi(hw,
2238 vsi->seid,
2239 NULL);
2240 if (aq_ret) {
2241 dev_info(&pf->pdev->dev,
2242 "Set default VSI failed, err %s, aq_err %s\n",
2243 i40e_stat_str(hw, aq_ret),
2244 i40e_aq_str(hw, hw->aq.asq_last_status));
2245 }
2246 } else {
2247 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2248 hw,
2249 vsi->seid,
2250 promisc, NULL,
2251 true);
2252 if (aq_ret) {
2253 dev_info(&pf->pdev->dev,
2254 "set unicast promisc failed, err %s, aq_err %s\n",
2255 i40e_stat_str(hw, aq_ret),
2256 i40e_aq_str(hw, hw->aq.asq_last_status));
2257 }
2258 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2259 hw,
2260 vsi->seid,
2261 promisc, NULL);
2262 if (aq_ret) {
2263 dev_info(&pf->pdev->dev,
2264 "set multicast promisc failed, err %s, aq_err %s\n",
2265 i40e_stat_str(hw, aq_ret),
2266 i40e_aq_str(hw, hw->aq.asq_last_status));
2267 }
2268 }
2269
2270 if (!aq_ret)
2271 pf->cur_promisc = promisc;
2272
2273 return aq_ret;
2274}
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2285{
2286 struct hlist_head tmp_add_list, tmp_del_list;
2287 struct i40e_mac_filter *f;
2288 struct i40e_new_mac_filter *new, *add_head = NULL;
2289 struct i40e_hw *hw = &vsi->back->hw;
2290 bool old_overflow, new_overflow;
2291 unsigned int failed_filters = 0;
2292 unsigned int vlan_filters = 0;
2293 char vsi_name[16] = "PF";
2294 int filter_list_len = 0;
2295 i40e_status aq_ret = 0;
2296 u32 changed_flags = 0;
2297 struct hlist_node *h;
2298 struct i40e_pf *pf;
2299 int num_add = 0;
2300 int num_del = 0;
2301 int retval = 0;
2302 u16 cmd_flags;
2303 int list_size;
2304 int bkt;
2305
2306
2307 struct i40e_aqc_add_macvlan_element_data *add_list;
2308 struct i40e_aqc_remove_macvlan_element_data *del_list;
2309
2310 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2311 usleep_range(1000, 2000);
2312 pf = vsi->back;
2313
2314 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2315
2316 if (vsi->netdev) {
2317 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2318 vsi->current_netdev_flags = vsi->netdev->flags;
2319 }
2320
2321 INIT_HLIST_HEAD(&tmp_add_list);
2322 INIT_HLIST_HEAD(&tmp_del_list);
2323
2324 if (vsi->type == I40E_VSI_SRIOV)
2325 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2326 else if (vsi->type != I40E_VSI_MAIN)
2327 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2328
2329 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2330 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2331
2332 spin_lock_bh(&vsi->mac_filter_hash_lock);
2333
2334 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2335 if (f->state == I40E_FILTER_REMOVE) {
2336
2337 hash_del(&f->hlist);
2338 hlist_add_head(&f->hlist, &tmp_del_list);
2339
2340
2341 continue;
2342 }
2343 if (f->state == I40E_FILTER_NEW) {
2344
2345 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2346 if (!new)
2347 goto err_no_memory_locked;
2348
2349
2350 new->f = f;
2351 new->state = f->state;
2352
2353
2354 hlist_add_head(&new->hlist, &tmp_add_list);
2355 }
2356
2357
2358
2359
2360
2361 if (f->vlan > 0)
2362 vlan_filters++;
2363 }
2364
2365 retval = i40e_correct_mac_vlan_filters(vsi,
2366 &tmp_add_list,
2367 &tmp_del_list,
2368 vlan_filters);
2369 if (retval)
2370 goto err_no_memory_locked;
2371
2372 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2373 }
2374
2375
2376 if (!hlist_empty(&tmp_del_list)) {
2377 filter_list_len = hw->aq.asq_buf_size /
2378 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2379 list_size = filter_list_len *
2380 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2381 del_list = kzalloc(list_size, GFP_ATOMIC);
2382 if (!del_list)
2383 goto err_no_memory;
2384
2385 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2386 cmd_flags = 0;
2387
2388
2389
2390
2391 if (is_broadcast_ether_addr(f->macaddr)) {
2392 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2393
2394 hlist_del(&f->hlist);
2395 kfree(f);
2396 continue;
2397 }
2398
2399
2400 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2401 if (f->vlan == I40E_VLAN_ANY) {
2402 del_list[num_del].vlan_tag = 0;
2403 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2404 } else {
2405 del_list[num_del].vlan_tag =
2406 cpu_to_le16((u16)(f->vlan));
2407 }
2408
2409 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2410 del_list[num_del].flags = cmd_flags;
2411 num_del++;
2412
2413
2414 if (num_del == filter_list_len) {
2415 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2416 num_del, &retval);
2417 memset(del_list, 0, list_size);
2418 num_del = 0;
2419 }
2420
2421
2422
2423 hlist_del(&f->hlist);
2424 kfree(f);
2425 }
2426
2427 if (num_del) {
2428 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2429 num_del, &retval);
2430 }
2431
2432 kfree(del_list);
2433 del_list = NULL;
2434 }
2435
2436 if (!hlist_empty(&tmp_add_list)) {
2437
2438 filter_list_len = hw->aq.asq_buf_size /
2439 sizeof(struct i40e_aqc_add_macvlan_element_data);
2440 list_size = filter_list_len *
2441 sizeof(struct i40e_aqc_add_macvlan_element_data);
2442 add_list = kzalloc(list_size, GFP_ATOMIC);
2443 if (!add_list)
2444 goto err_no_memory;
2445
2446 num_add = 0;
2447 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2448
2449
2450
2451 if (is_broadcast_ether_addr(new->f->macaddr)) {
2452 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2453 new->f))
2454 new->state = I40E_FILTER_FAILED;
2455 else
2456 new->state = I40E_FILTER_ACTIVE;
2457 continue;
2458 }
2459
2460
2461 if (num_add == 0)
2462 add_head = new;
2463 cmd_flags = 0;
2464 ether_addr_copy(add_list[num_add].mac_addr,
2465 new->f->macaddr);
2466 if (new->f->vlan == I40E_VLAN_ANY) {
2467 add_list[num_add].vlan_tag = 0;
2468 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2469 } else {
2470 add_list[num_add].vlan_tag =
2471 cpu_to_le16((u16)(new->f->vlan));
2472 }
2473 add_list[num_add].queue_number = 0;
2474
2475 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2476 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2477 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2478 num_add++;
2479
2480
2481 if (num_add == filter_list_len) {
2482 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2483 add_head, num_add);
2484 memset(add_list, 0, list_size);
2485 num_add = 0;
2486 }
2487 }
2488 if (num_add) {
2489 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2490 num_add);
2491 }
2492
2493
2494
2495 spin_lock_bh(&vsi->mac_filter_hash_lock);
2496 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2497
2498 if (new->f->state == I40E_FILTER_NEW)
2499 new->f->state = new->state;
2500 hlist_del(&new->hlist);
2501 kfree(new);
2502 }
2503 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2504 kfree(add_list);
2505 add_list = NULL;
2506 }
2507
2508
2509 spin_lock_bh(&vsi->mac_filter_hash_lock);
2510 vsi->active_filters = 0;
2511 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2512 if (f->state == I40E_FILTER_ACTIVE)
2513 vsi->active_filters++;
2514 else if (f->state == I40E_FILTER_FAILED)
2515 failed_filters++;
2516 }
2517 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2518
2519
2520
2521
2522
2523 if (old_overflow && !failed_filters &&
2524 vsi->active_filters < vsi->promisc_threshold) {
2525 dev_info(&pf->pdev->dev,
2526 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2527 vsi_name);
2528 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2529 vsi->promisc_threshold = 0;
2530 }
2531
2532
2533 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2534 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2535 goto out;
2536 }
2537
2538 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2539
2540
2541
2542
2543 if (!old_overflow && new_overflow)
2544 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2545
2546
2547 if (changed_flags & IFF_ALLMULTI) {
2548 bool cur_multipromisc;
2549
2550 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2551 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2552 vsi->seid,
2553 cur_multipromisc,
2554 NULL);
2555 if (aq_ret) {
2556 retval = i40e_aq_rc_to_posix(aq_ret,
2557 hw->aq.asq_last_status);
2558 dev_info(&pf->pdev->dev,
2559 "set multi promisc failed on %s, err %s aq_err %s\n",
2560 vsi_name,
2561 i40e_stat_str(hw, aq_ret),
2562 i40e_aq_str(hw, hw->aq.asq_last_status));
2563 } else {
2564 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2565 cur_multipromisc ? "entering" : "leaving");
2566 }
2567 }
2568
2569 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2570 bool cur_promisc;
2571
2572 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2573 new_overflow);
2574 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2575 if (aq_ret) {
2576 retval = i40e_aq_rc_to_posix(aq_ret,
2577 hw->aq.asq_last_status);
2578 dev_info(&pf->pdev->dev,
2579 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2580 cur_promisc ? "on" : "off",
2581 vsi_name,
2582 i40e_stat_str(hw, aq_ret),
2583 i40e_aq_str(hw, hw->aq.asq_last_status));
2584 }
2585 }
2586out:
2587
2588 if (retval)
2589 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2590
2591 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2592 return retval;
2593
2594err_no_memory:
2595
2596 spin_lock_bh(&vsi->mac_filter_hash_lock);
2597err_no_memory_locked:
2598 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2599 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2600 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2601
2602 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2603 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2604 return -ENOMEM;
2605}
2606
2607
2608
2609
2610
2611static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2612{
2613 int v;
2614
2615 if (!pf)
2616 return;
2617 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2618 return;
2619 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2620 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2621 return;
2622 }
2623
2624 for (v = 0; v < pf->num_alloc_vsi; v++) {
2625 if (pf->vsi[v] &&
2626 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2627 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2628
2629 if (ret) {
2630
2631 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2632 pf->state);
2633 break;
2634 }
2635 }
2636 }
2637}
2638
2639
2640
2641
2642
2643static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2644{
2645 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2646 return I40E_RXBUFFER_2048;
2647 else
2648 return I40E_RXBUFFER_3072;
2649}
2650
2651
2652
2653
2654
2655
2656
2657
2658static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2659{
2660 struct i40e_netdev_priv *np = netdev_priv(netdev);
2661 struct i40e_vsi *vsi = np->vsi;
2662 struct i40e_pf *pf = vsi->back;
2663
2664 if (i40e_enabled_xdp_vsi(vsi)) {
2665 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2666
2667 if (frame_size > i40e_max_xdp_frame_size(vsi))
2668 return -EINVAL;
2669 }
2670
2671 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2672 netdev->mtu, new_mtu);
2673 netdev->mtu = new_mtu;
2674 if (netif_running(netdev))
2675 i40e_vsi_reinit_locked(vsi);
2676 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2677 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2678 return 0;
2679}
2680
2681
2682
2683
2684
2685
2686
2687int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2688{
2689 struct i40e_netdev_priv *np = netdev_priv(netdev);
2690 struct i40e_pf *pf = np->vsi->back;
2691
2692 switch (cmd) {
2693 case SIOCGHWTSTAMP:
2694 return i40e_ptp_get_ts_config(pf, ifr);
2695 case SIOCSHWTSTAMP:
2696 return i40e_ptp_set_ts_config(pf, ifr);
2697 default:
2698 return -EOPNOTSUPP;
2699 }
2700}
2701
2702
2703
2704
2705
2706void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2707{
2708 struct i40e_vsi_context ctxt;
2709 i40e_status ret;
2710
2711
2712 if (vsi->info.pvid)
2713 return;
2714
2715 if ((vsi->info.valid_sections &
2716 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2717 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2718 return;
2719
2720 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2721 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2722 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2723
2724 ctxt.seid = vsi->seid;
2725 ctxt.info = vsi->info;
2726 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2727 if (ret) {
2728 dev_info(&vsi->back->pdev->dev,
2729 "update vlan stripping failed, err %s aq_err %s\n",
2730 i40e_stat_str(&vsi->back->hw, ret),
2731 i40e_aq_str(&vsi->back->hw,
2732 vsi->back->hw.aq.asq_last_status));
2733 }
2734}
2735
2736
2737
2738
2739
2740void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2741{
2742 struct i40e_vsi_context ctxt;
2743 i40e_status ret;
2744
2745
2746 if (vsi->info.pvid)
2747 return;
2748
2749 if ((vsi->info.valid_sections &
2750 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2751 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2752 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2753 return;
2754
2755 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2756 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2757 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2758
2759 ctxt.seid = vsi->seid;
2760 ctxt.info = vsi->info;
2761 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2762 if (ret) {
2763 dev_info(&vsi->back->pdev->dev,
2764 "update vlan stripping failed, err %s aq_err %s\n",
2765 i40e_stat_str(&vsi->back->hw, ret),
2766 i40e_aq_str(&vsi->back->hw,
2767 vsi->back->hw.aq.asq_last_status));
2768 }
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2785{
2786 struct i40e_mac_filter *f, *add_f;
2787 struct hlist_node *h;
2788 int bkt;
2789
2790 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2791 if (f->state == I40E_FILTER_REMOVE)
2792 continue;
2793 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2794 if (!add_f) {
2795 dev_info(&vsi->back->pdev->dev,
2796 "Could not add vlan filter %d for %pM\n",
2797 vid, f->macaddr);
2798 return -ENOMEM;
2799 }
2800 }
2801
2802 return 0;
2803}
2804
2805
2806
2807
2808
2809
2810int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2811{
2812 int err;
2813
2814 if (vsi->info.pvid)
2815 return -EINVAL;
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825 if (!vid)
2826 return 0;
2827
2828
2829 spin_lock_bh(&vsi->mac_filter_hash_lock);
2830 err = i40e_add_vlan_all_mac(vsi, vid);
2831 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2832 if (err)
2833 return err;
2834
2835
2836
2837
2838 i40e_service_event_schedule(vsi->back);
2839 return 0;
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2856{
2857 struct i40e_mac_filter *f;
2858 struct hlist_node *h;
2859 int bkt;
2860
2861 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2862 if (f->vlan == vid)
2863 __i40e_del_filter(vsi, f);
2864 }
2865}
2866
2867
2868
2869
2870
2871
2872void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2873{
2874 if (!vid || vsi->info.pvid)
2875 return;
2876
2877 spin_lock_bh(&vsi->mac_filter_hash_lock);
2878 i40e_rm_vlan_all_mac(vsi, vid);
2879 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2880
2881
2882
2883
2884 i40e_service_event_schedule(vsi->back);
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2896 __always_unused __be16 proto, u16 vid)
2897{
2898 struct i40e_netdev_priv *np = netdev_priv(netdev);
2899 struct i40e_vsi *vsi = np->vsi;
2900 int ret = 0;
2901
2902 if (vid >= VLAN_N_VID)
2903 return -EINVAL;
2904
2905 ret = i40e_vsi_add_vlan(vsi, vid);
2906 if (!ret)
2907 set_bit(vid, vsi->active_vlans);
2908
2909 return ret;
2910}
2911
2912
2913
2914
2915
2916
2917
2918static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2919 __always_unused __be16 proto, u16 vid)
2920{
2921 struct i40e_netdev_priv *np = netdev_priv(netdev);
2922 struct i40e_vsi *vsi = np->vsi;
2923
2924 if (vid >= VLAN_N_VID)
2925 return;
2926 set_bit(vid, vsi->active_vlans);
2927}
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2938 __always_unused __be16 proto, u16 vid)
2939{
2940 struct i40e_netdev_priv *np = netdev_priv(netdev);
2941 struct i40e_vsi *vsi = np->vsi;
2942
2943
2944
2945
2946
2947 i40e_vsi_kill_vlan(vsi, vid);
2948
2949 clear_bit(vid, vsi->active_vlans);
2950
2951 return 0;
2952}
2953
2954
2955
2956
2957
2958static void i40e_restore_vlan(struct i40e_vsi *vsi)
2959{
2960 u16 vid;
2961
2962 if (!vsi->netdev)
2963 return;
2964
2965 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2966 i40e_vlan_stripping_enable(vsi);
2967 else
2968 i40e_vlan_stripping_disable(vsi);
2969
2970 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2971 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2972 vid);
2973}
2974
2975
2976
2977
2978
2979
2980int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2981{
2982 struct i40e_vsi_context ctxt;
2983 i40e_status ret;
2984
2985 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2986 vsi->info.pvid = cpu_to_le16(vid);
2987 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2988 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2989 I40E_AQ_VSI_PVLAN_EMOD_STR;
2990
2991 ctxt.seid = vsi->seid;
2992 ctxt.info = vsi->info;
2993 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2994 if (ret) {
2995 dev_info(&vsi->back->pdev->dev,
2996 "add pvid failed, err %s aq_err %s\n",
2997 i40e_stat_str(&vsi->back->hw, ret),
2998 i40e_aq_str(&vsi->back->hw,
2999 vsi->back->hw.aq.asq_last_status));
3000 return -ENOENT;
3001 }
3002
3003 return 0;
3004}
3005
3006
3007
3008
3009
3010
3011
3012void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3013{
3014 vsi->info.pvid = 0;
3015
3016 i40e_vlan_stripping_disable(vsi);
3017}
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3030{
3031 int i, err = 0;
3032
3033 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3034 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3035
3036 if (!i40e_enabled_xdp_vsi(vsi))
3037 return err;
3038
3039 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3040 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3041
3042 return err;
3043}
3044
3045
3046
3047
3048
3049
3050
3051static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3052{
3053 int i;
3054
3055 if (vsi->tx_rings) {
3056 for (i = 0; i < vsi->num_queue_pairs; i++)
3057 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3058 i40e_free_tx_resources(vsi->tx_rings[i]);
3059 }
3060
3061 if (vsi->xdp_rings) {
3062 for (i = 0; i < vsi->num_queue_pairs; i++)
3063 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3064 i40e_free_tx_resources(vsi->xdp_rings[i]);
3065 }
3066}
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3079{
3080 int i, err = 0;
3081
3082 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3083 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3084 return err;
3085}
3086
3087
3088
3089
3090
3091
3092
3093static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3094{
3095 int i;
3096
3097 if (!vsi->rx_rings)
3098 return;
3099
3100 for (i = 0; i < vsi->num_queue_pairs; i++)
3101 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3102 i40e_free_rx_resources(vsi->rx_rings[i]);
3103}
3104
3105
3106
3107
3108
3109
3110
3111
3112static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3113{
3114 int cpu;
3115
3116 if (!ring->q_vector || !ring->netdev || ring->ch)
3117 return;
3118
3119
3120 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3121 return;
3122
3123 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3124 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3125 ring->queue_index);
3126}
3127
3128
3129
3130
3131
3132
3133
3134static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3135{
3136 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3137 int qid = ring->queue_index;
3138
3139 if (ring_is_xdp(ring))
3140 qid -= ring->vsi->alloc_queue_pairs;
3141
3142 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3143 return NULL;
3144
3145 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3146}
3147
3148
3149
3150
3151
3152
3153
3154static int i40e_configure_tx_ring(struct i40e_ring *ring)
3155{
3156 struct i40e_vsi *vsi = ring->vsi;
3157 u16 pf_q = vsi->base_queue + ring->queue_index;
3158 struct i40e_hw *hw = &vsi->back->hw;
3159 struct i40e_hmc_obj_txq tx_ctx;
3160 i40e_status err = 0;
3161 u32 qtx_ctl = 0;
3162
3163 if (ring_is_xdp(ring))
3164 ring->xsk_pool = i40e_xsk_pool(ring);
3165
3166
3167 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3168 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3169 ring->atr_count = 0;
3170 } else {
3171 ring->atr_sample_rate = 0;
3172 }
3173
3174
3175 i40e_config_xps_tx_ring(ring);
3176
3177
3178 memset(&tx_ctx, 0, sizeof(tx_ctx));
3179
3180 tx_ctx.new_context = 1;
3181 tx_ctx.base = (ring->dma / 128);
3182 tx_ctx.qlen = ring->count;
3183 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3184 I40E_FLAG_FD_ATR_ENABLED));
3185 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3186
3187 if (vsi->type != I40E_VSI_FDIR)
3188 tx_ctx.head_wb_ena = 1;
3189 tx_ctx.head_wb_addr = ring->dma +
3190 (ring->count * sizeof(struct i40e_tx_desc));
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203 if (ring->ch)
3204 tx_ctx.rdylist =
3205 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3206
3207 else
3208 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3209
3210 tx_ctx.rdylist_act = 0;
3211
3212
3213 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3214 if (err) {
3215 dev_info(&vsi->back->pdev->dev,
3216 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3217 ring->queue_index, pf_q, err);
3218 return -ENOMEM;
3219 }
3220
3221
3222 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3223 if (err) {
3224 dev_info(&vsi->back->pdev->dev,
3225 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3226 ring->queue_index, pf_q, err);
3227 return -ENOMEM;
3228 }
3229
3230
3231 if (ring->ch) {
3232 if (ring->ch->type == I40E_VSI_VMDQ2)
3233 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3234 else
3235 return -EINVAL;
3236
3237 qtx_ctl |= (ring->ch->vsi_number <<
3238 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3239 I40E_QTX_CTL_VFVM_INDX_MASK;
3240 } else {
3241 if (vsi->type == I40E_VSI_VMDQ2) {
3242 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3243 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3244 I40E_QTX_CTL_VFVM_INDX_MASK;
3245 } else {
3246 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3247 }
3248 }
3249
3250 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3251 I40E_QTX_CTL_PF_INDX_MASK);
3252 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3253 i40e_flush(hw);
3254
3255
3256 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3257
3258 return 0;
3259}
3260
3261
3262
3263
3264
3265
3266
3267static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3268{
3269 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3270}
3271
3272
3273
3274
3275
3276
3277
3278static int i40e_configure_rx_ring(struct i40e_ring *ring)
3279{
3280 struct i40e_vsi *vsi = ring->vsi;
3281 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3282 u16 pf_q = vsi->base_queue + ring->queue_index;
3283 struct i40e_hw *hw = &vsi->back->hw;
3284 struct i40e_hmc_obj_rxq rx_ctx;
3285 i40e_status err = 0;
3286 bool ok;
3287 int ret;
3288
3289 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3290
3291
3292 memset(&rx_ctx, 0, sizeof(rx_ctx));
3293
3294 if (ring->vsi->type == I40E_VSI_MAIN)
3295 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3296
3297 kfree(ring->rx_bi);
3298 ring->xsk_pool = i40e_xsk_pool(ring);
3299 if (ring->xsk_pool) {
3300 ret = i40e_alloc_rx_bi_zc(ring);
3301 if (ret)
3302 return ret;
3303 ring->rx_buf_len =
3304 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3305
3306
3307
3308
3309 chain_len = 1;
3310 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3311 MEM_TYPE_XSK_BUFF_POOL,
3312 NULL);
3313 if (ret)
3314 return ret;
3315 dev_info(&vsi->back->pdev->dev,
3316 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3317 ring->queue_index);
3318
3319 } else {
3320 ret = i40e_alloc_rx_bi(ring);
3321 if (ret)
3322 return ret;
3323 ring->rx_buf_len = vsi->rx_buf_len;
3324 if (ring->vsi->type == I40E_VSI_MAIN) {
3325 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3326 MEM_TYPE_PAGE_SHARED,
3327 NULL);
3328 if (ret)
3329 return ret;
3330 }
3331 }
3332
3333 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3334 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3335
3336 rx_ctx.base = (ring->dma / 128);
3337 rx_ctx.qlen = ring->count;
3338
3339
3340 rx_ctx.dsize = 0;
3341
3342
3343
3344
3345 rx_ctx.hsplit_0 = 0;
3346
3347 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3348 if (hw->revision_id == 0)
3349 rx_ctx.lrxqthresh = 0;
3350 else
3351 rx_ctx.lrxqthresh = 1;
3352 rx_ctx.crcstrip = 1;
3353 rx_ctx.l2tsel = 1;
3354
3355 rx_ctx.showiv = 0;
3356
3357 rx_ctx.prefena = 1;
3358
3359
3360 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3361 if (err) {
3362 dev_info(&vsi->back->pdev->dev,
3363 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3364 ring->queue_index, pf_q, err);
3365 return -ENOMEM;
3366 }
3367
3368
3369 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3370 if (err) {
3371 dev_info(&vsi->back->pdev->dev,
3372 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3373 ring->queue_index, pf_q, err);
3374 return -ENOMEM;
3375 }
3376
3377
3378 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3379 clear_ring_build_skb_enabled(ring);
3380 else
3381 set_ring_build_skb_enabled(ring);
3382
3383 ring->rx_offset = i40e_rx_offset(ring);
3384
3385
3386 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3387 writel(0, ring->tail);
3388
3389 if (ring->xsk_pool) {
3390 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3391 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3392 } else {
3393 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3394 }
3395 if (!ok) {
3396
3397
3398
3399 dev_info(&vsi->back->pdev->dev,
3400 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3401 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3402 ring->queue_index, pf_q);
3403 }
3404
3405 return 0;
3406}
3407
3408
3409
3410
3411
3412
3413
3414static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3415{
3416 int err = 0;
3417 u16 i;
3418
3419 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3420 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3421
3422 if (err || !i40e_enabled_xdp_vsi(vsi))
3423 return err;
3424
3425 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3426 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3427
3428 return err;
3429}
3430
3431
3432
3433
3434
3435
3436
3437static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3438{
3439 int err = 0;
3440 u16 i;
3441
3442 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3443 vsi->max_frame = I40E_MAX_RXBUFFER;
3444 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3445#if (PAGE_SIZE < 8192)
3446 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3447 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3448 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3449 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3450#endif
3451 } else {
3452 vsi->max_frame = I40E_MAX_RXBUFFER;
3453 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3454 I40E_RXBUFFER_2048;
3455 }
3456
3457
3458 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3459 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3460
3461 return err;
3462}
3463
3464
3465
3466
3467
3468static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3469{
3470 struct i40e_ring *tx_ring, *rx_ring;
3471 u16 qoffset, qcount;
3472 int i, n;
3473
3474 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3475
3476 for (i = 0; i < vsi->num_queue_pairs; i++) {
3477 rx_ring = vsi->rx_rings[i];
3478 tx_ring = vsi->tx_rings[i];
3479 rx_ring->dcb_tc = 0;
3480 tx_ring->dcb_tc = 0;
3481 }
3482 return;
3483 }
3484
3485 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3486 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3487 continue;
3488
3489 qoffset = vsi->tc_config.tc_info[n].qoffset;
3490 qcount = vsi->tc_config.tc_info[n].qcount;
3491 for (i = qoffset; i < (qoffset + qcount); i++) {
3492 rx_ring = vsi->rx_rings[i];
3493 tx_ring = vsi->tx_rings[i];
3494 rx_ring->dcb_tc = n;
3495 tx_ring->dcb_tc = n;
3496 }
3497 }
3498}
3499
3500
3501
3502
3503
3504static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3505{
3506 if (vsi->netdev)
3507 i40e_set_rx_mode(vsi->netdev);
3508}
3509
3510
3511
3512
3513
3514
3515
3516static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3517{
3518 pf->fd_tcp4_filter_cnt = 0;
3519 pf->fd_udp4_filter_cnt = 0;
3520 pf->fd_sctp4_filter_cnt = 0;
3521 pf->fd_ip4_filter_cnt = 0;
3522 pf->fd_tcp6_filter_cnt = 0;
3523 pf->fd_udp6_filter_cnt = 0;
3524 pf->fd_sctp6_filter_cnt = 0;
3525 pf->fd_ip6_filter_cnt = 0;
3526}
3527
3528
3529
3530
3531
3532
3533
3534
3535static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3536{
3537 struct i40e_fdir_filter *filter;
3538 struct i40e_pf *pf = vsi->back;
3539 struct hlist_node *node;
3540
3541 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3542 return;
3543
3544
3545 i40e_reset_fdir_filter_cnt(pf);
3546
3547 hlist_for_each_entry_safe(filter, node,
3548 &pf->fdir_filter_list, fdir_node) {
3549 i40e_add_del_fdir(vsi, filter, true);
3550 }
3551}
3552
3553
3554
3555
3556
3557static int i40e_vsi_configure(struct i40e_vsi *vsi)
3558{
3559 int err;
3560
3561 i40e_set_vsi_rx_mode(vsi);
3562 i40e_restore_vlan(vsi);
3563 i40e_vsi_config_dcb_rings(vsi);
3564 err = i40e_vsi_configure_tx(vsi);
3565 if (!err)
3566 err = i40e_vsi_configure_rx(vsi);
3567
3568 return err;
3569}
3570
3571
3572
3573
3574
3575static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3576{
3577 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3578 struct i40e_pf *pf = vsi->back;
3579 struct i40e_hw *hw = &pf->hw;
3580 u16 vector;
3581 int i, q;
3582 u32 qp;
3583
3584
3585
3586
3587
3588 qp = vsi->base_queue;
3589 vector = vsi->base_vector;
3590 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3591 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3592
3593 q_vector->rx.next_update = jiffies + 1;
3594 q_vector->rx.target_itr =
3595 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3596 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3597 q_vector->rx.target_itr >> 1);
3598 q_vector->rx.current_itr = q_vector->rx.target_itr;
3599
3600 q_vector->tx.next_update = jiffies + 1;
3601 q_vector->tx.target_itr =
3602 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3603 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3604 q_vector->tx.target_itr >> 1);
3605 q_vector->tx.current_itr = q_vector->tx.target_itr;
3606
3607 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3608 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3609
3610
3611 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3612 for (q = 0; q < q_vector->num_ringpairs; q++) {
3613 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3614 u32 val;
3615
3616 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3617 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3618 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3619 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3620 (I40E_QUEUE_TYPE_TX <<
3621 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3622
3623 wr32(hw, I40E_QINT_RQCTL(qp), val);
3624
3625 if (has_xdp) {
3626 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3627 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3628 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3629 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3630 (I40E_QUEUE_TYPE_TX <<
3631 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3632
3633 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3634 }
3635
3636 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3637 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3638 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3639 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3640 (I40E_QUEUE_TYPE_RX <<
3641 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3642
3643
3644 if (q == (q_vector->num_ringpairs - 1))
3645 val |= (I40E_QUEUE_END_OF_LIST <<
3646 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3647
3648 wr32(hw, I40E_QINT_TQCTL(qp), val);
3649 qp++;
3650 }
3651 }
3652
3653 i40e_flush(hw);
3654}
3655
3656
3657
3658
3659
3660static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3661{
3662 struct i40e_hw *hw = &pf->hw;
3663 u32 val;
3664
3665
3666 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3667 rd32(hw, I40E_PFINT_ICR0);
3668
3669 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3670 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3671 I40E_PFINT_ICR0_ENA_GRST_MASK |
3672 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3673 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3674 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3675 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3676 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3677
3678 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3679 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3680
3681 if (pf->flags & I40E_FLAG_PTP)
3682 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3683
3684 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3685
3686
3687 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3688 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3689
3690
3691 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3692}
3693
3694
3695
3696
3697
3698static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3699{
3700 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3701 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3702 struct i40e_pf *pf = vsi->back;
3703 struct i40e_hw *hw = &pf->hw;
3704 u32 val;
3705
3706
3707 q_vector->rx.next_update = jiffies + 1;
3708 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3709 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3710 q_vector->rx.current_itr = q_vector->rx.target_itr;
3711 q_vector->tx.next_update = jiffies + 1;
3712 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3713 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3714 q_vector->tx.current_itr = q_vector->tx.target_itr;
3715
3716 i40e_enable_misc_int_causes(pf);
3717
3718
3719 wr32(hw, I40E_PFINT_LNKLST0, 0);
3720
3721
3722 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3723 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3724 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3725 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3726
3727 wr32(hw, I40E_QINT_RQCTL(0), val);
3728
3729 if (i40e_enabled_xdp_vsi(vsi)) {
3730 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3731 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3732 (I40E_QUEUE_TYPE_TX
3733 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3734
3735 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3736 }
3737
3738 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3739 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3740 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3741
3742 wr32(hw, I40E_QINT_TQCTL(0), val);
3743 i40e_flush(hw);
3744}
3745
3746
3747
3748
3749
3750void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3751{
3752 struct i40e_hw *hw = &pf->hw;
3753
3754 wr32(hw, I40E_PFINT_DYN_CTL0,
3755 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3756 i40e_flush(hw);
3757}
3758
3759
3760
3761
3762
3763void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3764{
3765 struct i40e_hw *hw = &pf->hw;
3766 u32 val;
3767
3768 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3769 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3770 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3771
3772 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3773 i40e_flush(hw);
3774}
3775
3776
3777
3778
3779
3780
3781static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3782{
3783 struct i40e_q_vector *q_vector = data;
3784
3785 if (!q_vector->tx.ring && !q_vector->rx.ring)
3786 return IRQ_HANDLED;
3787
3788 napi_schedule_irqoff(&q_vector->napi);
3789
3790 return IRQ_HANDLED;
3791}
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3802 const cpumask_t *mask)
3803{
3804 struct i40e_q_vector *q_vector =
3805 container_of(notify, struct i40e_q_vector, affinity_notify);
3806
3807 cpumask_copy(&q_vector->affinity_mask, mask);
3808}
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818static void i40e_irq_affinity_release(struct kref *ref) {}
3819
3820
3821
3822
3823
3824
3825
3826
3827static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3828{
3829 int q_vectors = vsi->num_q_vectors;
3830 struct i40e_pf *pf = vsi->back;
3831 int base = vsi->base_vector;
3832 int rx_int_idx = 0;
3833 int tx_int_idx = 0;
3834 int vector, err;
3835 int irq_num;
3836 int cpu;
3837
3838 for (vector = 0; vector < q_vectors; vector++) {
3839 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3840
3841 irq_num = pf->msix_entries[base + vector].vector;
3842
3843 if (q_vector->tx.ring && q_vector->rx.ring) {
3844 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3845 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3846 tx_int_idx++;
3847 } else if (q_vector->rx.ring) {
3848 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3849 "%s-%s-%d", basename, "rx", rx_int_idx++);
3850 } else if (q_vector->tx.ring) {
3851 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3852 "%s-%s-%d", basename, "tx", tx_int_idx++);
3853 } else {
3854
3855 continue;
3856 }
3857 err = request_irq(irq_num,
3858 vsi->irq_handler,
3859 0,
3860 q_vector->name,
3861 q_vector);
3862 if (err) {
3863 dev_info(&pf->pdev->dev,
3864 "MSIX request_irq failed, error: %d\n", err);
3865 goto free_queue_irqs;
3866 }
3867
3868
3869 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3870 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3871 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3872
3873
3874
3875
3876
3877
3878 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3879 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3880 }
3881
3882 vsi->irqs_ready = true;
3883 return 0;
3884
3885free_queue_irqs:
3886 while (vector) {
3887 vector--;
3888 irq_num = pf->msix_entries[base + vector].vector;
3889 irq_set_affinity_notifier(irq_num, NULL);
3890 irq_set_affinity_hint(irq_num, NULL);
3891 free_irq(irq_num, &vsi->q_vectors[vector]);
3892 }
3893 return err;
3894}
3895
3896
3897
3898
3899
3900static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3901{
3902 struct i40e_pf *pf = vsi->back;
3903 struct i40e_hw *hw = &pf->hw;
3904 int base = vsi->base_vector;
3905 int i;
3906
3907
3908 for (i = 0; i < vsi->num_queue_pairs; i++) {
3909 u32 val;
3910
3911 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3912 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3913 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3914
3915 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3916 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3917 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3918
3919 if (!i40e_enabled_xdp_vsi(vsi))
3920 continue;
3921 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3922 }
3923
3924
3925 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3926 for (i = vsi->base_vector;
3927 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3928 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3929
3930 i40e_flush(hw);
3931 for (i = 0; i < vsi->num_q_vectors; i++)
3932 synchronize_irq(pf->msix_entries[i + base].vector);
3933 } else {
3934
3935 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3936 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3937 i40e_flush(hw);
3938 synchronize_irq(pf->pdev->irq);
3939 }
3940}
3941
3942
3943
3944
3945
3946static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3947{
3948 struct i40e_pf *pf = vsi->back;
3949 int i;
3950
3951 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3952 for (i = 0; i < vsi->num_q_vectors; i++)
3953 i40e_irq_dynamic_enable(vsi, i);
3954 } else {
3955 i40e_irq_dynamic_enable_icr0(pf);
3956 }
3957
3958 i40e_flush(&pf->hw);
3959 return 0;
3960}
3961
3962
3963
3964
3965
3966static void i40e_free_misc_vector(struct i40e_pf *pf)
3967{
3968
3969 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3970 i40e_flush(&pf->hw);
3971
3972 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3973 synchronize_irq(pf->msix_entries[0].vector);
3974 free_irq(pf->msix_entries[0].vector, pf);
3975 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3976 }
3977}
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988static irqreturn_t i40e_intr(int irq, void *data)
3989{
3990 struct i40e_pf *pf = (struct i40e_pf *)data;
3991 struct i40e_hw *hw = &pf->hw;
3992 irqreturn_t ret = IRQ_NONE;
3993 u32 icr0, icr0_remaining;
3994 u32 val, ena_mask;
3995
3996 icr0 = rd32(hw, I40E_PFINT_ICR0);
3997 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3998
3999
4000 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4001 goto enable_intr;
4002
4003
4004 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4005 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4006 pf->sw_int_count++;
4007
4008 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4009 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4010 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4011 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4012 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4013 }
4014
4015
4016 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4017 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4018 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4019
4020
4021
4022
4023
4024
4025
4026 if (!test_bit(__I40E_DOWN, pf->state))
4027 napi_schedule_irqoff(&q_vector->napi);
4028 }
4029
4030 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4031 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4032 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4033 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4034 }
4035
4036 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4037 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4038 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4039 }
4040
4041 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4042
4043 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4044 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4045
4046 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4047 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4048 } else {
4049 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4050 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4051 }
4052 }
4053
4054 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4055 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4056 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4057 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4058 val = rd32(hw, I40E_GLGEN_RSTAT);
4059 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4060 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4061 if (val == I40E_RESET_CORER) {
4062 pf->corer_count++;
4063 } else if (val == I40E_RESET_GLOBR) {
4064 pf->globr_count++;
4065 } else if (val == I40E_RESET_EMPR) {
4066 pf->empr_count++;
4067 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4068 }
4069 }
4070
4071 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4072 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4073 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4074 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4075 rd32(hw, I40E_PFHMC_ERRORINFO),
4076 rd32(hw, I40E_PFHMC_ERRORDATA));
4077 }
4078
4079 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4080 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4081
4082 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4083 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4084 i40e_ptp_tx_hwtstamp(pf);
4085 }
4086 }
4087
4088
4089
4090
4091
4092 icr0_remaining = icr0 & ena_mask;
4093 if (icr0_remaining) {
4094 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4095 icr0_remaining);
4096 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4097 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4098 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4099 dev_info(&pf->pdev->dev, "device will be reset\n");
4100 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4101 i40e_service_event_schedule(pf);
4102 }
4103 ena_mask &= ~icr0_remaining;
4104 }
4105 ret = IRQ_HANDLED;
4106
4107enable_intr:
4108
4109 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4110 if (!test_bit(__I40E_DOWN, pf->state) ||
4111 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4112 i40e_service_event_schedule(pf);
4113 i40e_irq_dynamic_enable_icr0(pf);
4114 }
4115
4116 return ret;
4117}
4118
4119
4120
4121
4122
4123
4124
4125
4126static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4127{
4128 struct i40e_vsi *vsi = tx_ring->vsi;
4129 u16 i = tx_ring->next_to_clean;
4130 struct i40e_tx_buffer *tx_buf;
4131 struct i40e_tx_desc *tx_desc;
4132
4133 tx_buf = &tx_ring->tx_bi[i];
4134 tx_desc = I40E_TX_DESC(tx_ring, i);
4135 i -= tx_ring->count;
4136
4137 do {
4138 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4139
4140
4141 if (!eop_desc)
4142 break;
4143
4144
4145 smp_rmb();
4146
4147
4148 if (!(eop_desc->cmd_type_offset_bsz &
4149 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4150 break;
4151
4152
4153 tx_buf->next_to_watch = NULL;
4154
4155 tx_desc->buffer_addr = 0;
4156 tx_desc->cmd_type_offset_bsz = 0;
4157
4158 tx_buf++;
4159 tx_desc++;
4160 i++;
4161 if (unlikely(!i)) {
4162 i -= tx_ring->count;
4163 tx_buf = tx_ring->tx_bi;
4164 tx_desc = I40E_TX_DESC(tx_ring, 0);
4165 }
4166
4167 dma_unmap_single(tx_ring->dev,
4168 dma_unmap_addr(tx_buf, dma),
4169 dma_unmap_len(tx_buf, len),
4170 DMA_TO_DEVICE);
4171 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4172 kfree(tx_buf->raw_buf);
4173
4174 tx_buf->raw_buf = NULL;
4175 tx_buf->tx_flags = 0;
4176 tx_buf->next_to_watch = NULL;
4177 dma_unmap_len_set(tx_buf, len, 0);
4178 tx_desc->buffer_addr = 0;
4179 tx_desc->cmd_type_offset_bsz = 0;
4180
4181
4182 tx_buf++;
4183 tx_desc++;
4184 i++;
4185 if (unlikely(!i)) {
4186 i -= tx_ring->count;
4187 tx_buf = tx_ring->tx_bi;
4188 tx_desc = I40E_TX_DESC(tx_ring, 0);
4189 }
4190
4191
4192 budget--;
4193 } while (likely(budget));
4194
4195 i += tx_ring->count;
4196 tx_ring->next_to_clean = i;
4197
4198 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4199 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4200
4201 return budget > 0;
4202}
4203
4204
4205
4206
4207
4208
4209static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4210{
4211 struct i40e_q_vector *q_vector = data;
4212 struct i40e_vsi *vsi;
4213
4214 if (!q_vector->tx.ring)
4215 return IRQ_HANDLED;
4216
4217 vsi = q_vector->tx.ring->vsi;
4218 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4219
4220 return IRQ_HANDLED;
4221}
4222
4223
4224
4225
4226
4227
4228
4229static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4230{
4231 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4232 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4233 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4234
4235 tx_ring->q_vector = q_vector;
4236 tx_ring->next = q_vector->tx.ring;
4237 q_vector->tx.ring = tx_ring;
4238 q_vector->tx.count++;
4239
4240
4241 if (i40e_enabled_xdp_vsi(vsi)) {
4242 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4243
4244 xdp_ring->q_vector = q_vector;
4245 xdp_ring->next = q_vector->tx.ring;
4246 q_vector->tx.ring = xdp_ring;
4247 q_vector->tx.count++;
4248 }
4249
4250 rx_ring->q_vector = q_vector;
4251 rx_ring->next = q_vector->rx.ring;
4252 q_vector->rx.ring = rx_ring;
4253 q_vector->rx.count++;
4254}
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4266{
4267 int qp_remaining = vsi->num_queue_pairs;
4268 int q_vectors = vsi->num_q_vectors;
4269 int num_ringpairs;
4270 int v_start = 0;
4271 int qp_idx = 0;
4272
4273
4274
4275
4276
4277
4278
4279
4280 for (; v_start < q_vectors; v_start++) {
4281 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4282
4283 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4284
4285 q_vector->num_ringpairs = num_ringpairs;
4286 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4287
4288 q_vector->rx.count = 0;
4289 q_vector->tx.count = 0;
4290 q_vector->rx.ring = NULL;
4291 q_vector->tx.ring = NULL;
4292
4293 while (num_ringpairs--) {
4294 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4295 qp_idx++;
4296 qp_remaining--;
4297 }
4298 }
4299}
4300
4301
4302
4303
4304
4305
4306static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4307{
4308 struct i40e_pf *pf = vsi->back;
4309 int err;
4310
4311 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4312 err = i40e_vsi_request_irq_msix(vsi, basename);
4313 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4314 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4315 pf->int_name, pf);
4316 else
4317 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4318 pf->int_name, pf);
4319
4320 if (err)
4321 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4322
4323 return err;
4324}
4325
4326#ifdef CONFIG_NET_POLL_CONTROLLER
4327
4328
4329
4330
4331
4332
4333
4334static void i40e_netpoll(struct net_device *netdev)
4335{
4336 struct i40e_netdev_priv *np = netdev_priv(netdev);
4337 struct i40e_vsi *vsi = np->vsi;
4338 struct i40e_pf *pf = vsi->back;
4339 int i;
4340
4341
4342 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4343 return;
4344
4345 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4346 for (i = 0; i < vsi->num_q_vectors; i++)
4347 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4348 } else {
4349 i40e_intr(pf->pdev->irq, netdev);
4350 }
4351}
4352#endif
4353
4354#define I40E_QTX_ENA_WAIT_COUNT 50
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4368{
4369 int i;
4370 u32 tx_reg;
4371
4372 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4373 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4374 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4375 break;
4376
4377 usleep_range(10, 20);
4378 }
4379 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4380 return -ETIMEDOUT;
4381
4382 return 0;
4383}
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4396{
4397 struct i40e_hw *hw = &pf->hw;
4398 u32 tx_reg;
4399 int i;
4400
4401
4402 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4403 if (!enable)
4404 usleep_range(10, 20);
4405
4406 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4407 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4408 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4409 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4410 break;
4411 usleep_range(1000, 2000);
4412 }
4413
4414
4415 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4416 return;
4417
4418
4419 if (enable) {
4420 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4421 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4422 } else {
4423 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4424 }
4425
4426 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4427}
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4438 bool is_xdp, bool enable)
4439{
4440 int ret;
4441
4442 i40e_control_tx_q(pf, pf_q, enable);
4443
4444
4445 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4446 if (ret) {
4447 dev_info(&pf->pdev->dev,
4448 "VSI seid %d %sTx ring %d %sable timeout\n",
4449 seid, (is_xdp ? "XDP " : ""), pf_q,
4450 (enable ? "en" : "dis"));
4451 }
4452
4453 return ret;
4454}
4455
4456
4457
4458
4459
4460
4461static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4462{
4463 struct i40e_pf *pf = vsi->back;
4464 int i, pf_q, ret = 0;
4465
4466 pf_q = vsi->base_queue;
4467 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4468 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4469 pf_q,
4470 false , enable);
4471 if (ret)
4472 break;
4473
4474 if (!i40e_enabled_xdp_vsi(vsi))
4475 continue;
4476
4477 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4478 pf_q + vsi->alloc_queue_pairs,
4479 true , enable);
4480 if (ret)
4481 break;
4482 }
4483 return ret;
4484}
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4498{
4499 int i;
4500 u32 rx_reg;
4501
4502 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4503 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4504 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4505 break;
4506
4507 usleep_range(10, 20);
4508 }
4509 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4510 return -ETIMEDOUT;
4511
4512 return 0;
4513}
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4526{
4527 struct i40e_hw *hw = &pf->hw;
4528 u32 rx_reg;
4529 int i;
4530
4531 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4532 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4533 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4534 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4535 break;
4536 usleep_range(1000, 2000);
4537 }
4538
4539
4540 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4541 return;
4542
4543
4544 if (enable)
4545 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4546 else
4547 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4548
4549 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4550}
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4563{
4564 int ret = 0;
4565
4566 i40e_control_rx_q(pf, pf_q, enable);
4567
4568
4569 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4570 if (ret)
4571 return ret;
4572
4573 return ret;
4574}
4575
4576
4577
4578
4579
4580
4581static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4582{
4583 struct i40e_pf *pf = vsi->back;
4584 int i, pf_q, ret = 0;
4585
4586 pf_q = vsi->base_queue;
4587 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4588 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4589 if (ret) {
4590 dev_info(&pf->pdev->dev,
4591 "VSI seid %d Rx ring %d %sable timeout\n",
4592 vsi->seid, pf_q, (enable ? "en" : "dis"));
4593 break;
4594 }
4595 }
4596
4597
4598
4599
4600 if (!enable)
4601 mdelay(50);
4602
4603 return ret;
4604}
4605
4606
4607
4608
4609
4610int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4611{
4612 int ret = 0;
4613
4614
4615 ret = i40e_vsi_control_rx(vsi, true);
4616 if (ret)
4617 return ret;
4618 ret = i40e_vsi_control_tx(vsi, true);
4619
4620 return ret;
4621}
4622
4623
4624
4625
4626
4627void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4628{
4629
4630 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4631 return i40e_vsi_stop_rings_no_wait(vsi);
4632
4633
4634
4635
4636 i40e_vsi_control_tx(vsi, false);
4637 i40e_vsi_control_rx(vsi, false);
4638}
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4652{
4653 struct i40e_pf *pf = vsi->back;
4654 int i, pf_q;
4655
4656 pf_q = vsi->base_queue;
4657 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4658 i40e_control_tx_q(pf, pf_q, false);
4659 i40e_control_rx_q(pf, pf_q, false);
4660 }
4661}
4662
4663
4664
4665
4666
4667static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4668{
4669 struct i40e_pf *pf = vsi->back;
4670 struct i40e_hw *hw = &pf->hw;
4671 int base = vsi->base_vector;
4672 u32 val, qp;
4673 int i;
4674
4675 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4676 if (!vsi->q_vectors)
4677 return;
4678
4679 if (!vsi->irqs_ready)
4680 return;
4681
4682 vsi->irqs_ready = false;
4683 for (i = 0; i < vsi->num_q_vectors; i++) {
4684 int irq_num;
4685 u16 vector;
4686
4687 vector = i + base;
4688 irq_num = pf->msix_entries[vector].vector;
4689
4690
4691 if (!vsi->q_vectors[i] ||
4692 !vsi->q_vectors[i]->num_ringpairs)
4693 continue;
4694
4695
4696 irq_set_affinity_notifier(irq_num, NULL);
4697
4698 irq_set_affinity_hint(irq_num, NULL);
4699 synchronize_irq(irq_num);
4700 free_irq(irq_num, vsi->q_vectors[i]);
4701
4702
4703
4704
4705
4706
4707
4708
4709 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4710 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4711 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4712 val |= I40E_QUEUE_END_OF_LIST
4713 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4714 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4715
4716 while (qp != I40E_QUEUE_END_OF_LIST) {
4717 u32 next;
4718
4719 val = rd32(hw, I40E_QINT_RQCTL(qp));
4720
4721 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4722 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4723 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4724 I40E_QINT_RQCTL_INTEVENT_MASK);
4725
4726 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4727 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4728
4729 wr32(hw, I40E_QINT_RQCTL(qp), val);
4730
4731 val = rd32(hw, I40E_QINT_TQCTL(qp));
4732
4733 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4734 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4735
4736 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4737 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4738 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4739 I40E_QINT_TQCTL_INTEVENT_MASK);
4740
4741 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4742 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4743
4744 wr32(hw, I40E_QINT_TQCTL(qp), val);
4745 qp = next;
4746 }
4747 }
4748 } else {
4749 free_irq(pf->pdev->irq, pf);
4750
4751 val = rd32(hw, I40E_PFINT_LNKLST0);
4752 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4753 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4754 val |= I40E_QUEUE_END_OF_LIST
4755 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4756 wr32(hw, I40E_PFINT_LNKLST0, val);
4757
4758 val = rd32(hw, I40E_QINT_RQCTL(qp));
4759 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4760 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4761 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4762 I40E_QINT_RQCTL_INTEVENT_MASK);
4763
4764 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4765 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4766
4767 wr32(hw, I40E_QINT_RQCTL(qp), val);
4768
4769 val = rd32(hw, I40E_QINT_TQCTL(qp));
4770
4771 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4772 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4773 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4774 I40E_QINT_TQCTL_INTEVENT_MASK);
4775
4776 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4777 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4778
4779 wr32(hw, I40E_QINT_TQCTL(qp), val);
4780 }
4781}
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4793{
4794 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4795 struct i40e_ring *ring;
4796
4797 if (!q_vector)
4798 return;
4799
4800
4801 i40e_for_each_ring(ring, q_vector->tx)
4802 ring->q_vector = NULL;
4803
4804 i40e_for_each_ring(ring, q_vector->rx)
4805 ring->q_vector = NULL;
4806
4807
4808 if (vsi->netdev)
4809 netif_napi_del(&q_vector->napi);
4810
4811 vsi->q_vectors[v_idx] = NULL;
4812
4813 kfree_rcu(q_vector, rcu);
4814}
4815
4816
4817
4818
4819
4820
4821
4822
4823static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4824{
4825 int v_idx;
4826
4827 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4828 i40e_free_q_vector(vsi, v_idx);
4829}
4830
4831
4832
4833
4834
4835static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4836{
4837
4838 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4839 pci_disable_msix(pf->pdev);
4840 kfree(pf->msix_entries);
4841 pf->msix_entries = NULL;
4842 kfree(pf->irq_pile);
4843 pf->irq_pile = NULL;
4844 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4845 pci_disable_msi(pf->pdev);
4846 }
4847 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4848}
4849
4850
4851
4852
4853
4854
4855
4856
4857static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4858{
4859 int i;
4860
4861 i40e_free_misc_vector(pf);
4862
4863 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4864 I40E_IWARP_IRQ_PILE_ID);
4865
4866 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4867 for (i = 0; i < pf->num_alloc_vsi; i++)
4868 if (pf->vsi[i])
4869 i40e_vsi_free_q_vectors(pf->vsi[i]);
4870 i40e_reset_interrupt_capability(pf);
4871}
4872
4873
4874
4875
4876
4877static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4878{
4879 int q_idx;
4880
4881 if (!vsi->netdev)
4882 return;
4883
4884 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4885 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4886
4887 if (q_vector->rx.ring || q_vector->tx.ring)
4888 napi_enable(&q_vector->napi);
4889 }
4890}
4891
4892
4893
4894
4895
4896static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4897{
4898 int q_idx;
4899
4900 if (!vsi->netdev)
4901 return;
4902
4903 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4904 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4905
4906 if (q_vector->rx.ring || q_vector->tx.ring)
4907 napi_disable(&q_vector->napi);
4908 }
4909}
4910
4911
4912
4913
4914
4915static void i40e_vsi_close(struct i40e_vsi *vsi)
4916{
4917 struct i40e_pf *pf = vsi->back;
4918 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4919 i40e_down(vsi);
4920 i40e_vsi_free_irq(vsi);
4921 i40e_vsi_free_tx_resources(vsi);
4922 i40e_vsi_free_rx_resources(vsi);
4923 vsi->current_netdev_flags = 0;
4924 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4925 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4926 set_bit(__I40E_CLIENT_RESET, pf->state);
4927}
4928
4929
4930
4931
4932
4933static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4934{
4935 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4936 return;
4937
4938 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4939 if (vsi->netdev && netif_running(vsi->netdev))
4940 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4941 else
4942 i40e_vsi_close(vsi);
4943}
4944
4945
4946
4947
4948
4949static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4950{
4951 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4952 return;
4953
4954 if (vsi->netdev && netif_running(vsi->netdev))
4955 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4956 else
4957 i40e_vsi_open(vsi);
4958}
4959
4960
4961
4962
4963
4964static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4965{
4966 int v;
4967
4968 for (v = 0; v < pf->num_alloc_vsi; v++) {
4969 if (pf->vsi[v])
4970 i40e_quiesce_vsi(pf->vsi[v]);
4971 }
4972}
4973
4974
4975
4976
4977
4978static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4979{
4980 int v;
4981
4982 for (v = 0; v < pf->num_alloc_vsi; v++) {
4983 if (pf->vsi[v])
4984 i40e_unquiesce_vsi(pf->vsi[v]);
4985 }
4986}
4987
4988
4989
4990
4991
4992
4993
4994int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4995{
4996 struct i40e_pf *pf = vsi->back;
4997 int i, pf_q, ret;
4998
4999 pf_q = vsi->base_queue;
5000 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5001
5002 ret = i40e_pf_txq_wait(pf, pf_q, false);
5003 if (ret) {
5004 dev_info(&pf->pdev->dev,
5005 "VSI seid %d Tx ring %d disable timeout\n",
5006 vsi->seid, pf_q);
5007 return ret;
5008 }
5009
5010 if (!i40e_enabled_xdp_vsi(vsi))
5011 goto wait_rx;
5012
5013
5014 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5015 false);
5016 if (ret) {
5017 dev_info(&pf->pdev->dev,
5018 "VSI seid %d XDP Tx ring %d disable timeout\n",
5019 vsi->seid, pf_q);
5020 return ret;
5021 }
5022wait_rx:
5023
5024 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5025 if (ret) {
5026 dev_info(&pf->pdev->dev,
5027 "VSI seid %d Rx ring %d disable timeout\n",
5028 vsi->seid, pf_q);
5029 return ret;
5030 }
5031 }
5032
5033 return 0;
5034}
5035
5036#ifdef CONFIG_I40E_DCB
5037
5038
5039
5040
5041
5042
5043
5044static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5045{
5046 int v, ret = 0;
5047
5048 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5049 if (pf->vsi[v]) {
5050 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5051 if (ret)
5052 break;
5053 }
5054 }
5055
5056 return ret;
5057}
5058
5059#endif
5060
5061
5062
5063
5064
5065
5066
5067
5068static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5069{
5070 struct i40e_dcb_app_priority_table app;
5071 struct i40e_hw *hw = &pf->hw;
5072 u8 enabled_tc = 1;
5073 u8 tc, i;
5074
5075 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5076
5077 for (i = 0; i < dcbcfg->numapps; i++) {
5078 app = dcbcfg->app[i];
5079 if (app.selector == I40E_APP_SEL_TCPIP &&
5080 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5081 tc = dcbcfg->etscfg.prioritytable[app.priority];
5082 enabled_tc |= BIT(tc);
5083 break;
5084 }
5085 }
5086
5087 return enabled_tc;
5088}
5089
5090
5091
5092
5093
5094
5095
5096static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5097{
5098 int i, tc_unused = 0;
5099 u8 num_tc = 0;
5100 u8 ret = 0;
5101
5102
5103
5104
5105
5106 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5107 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5108
5109
5110
5111
5112 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5113 if (num_tc & BIT(i)) {
5114 if (!tc_unused) {
5115 ret++;
5116 } else {
5117 pr_err("Non-contiguous TC - Disabling DCB\n");
5118 return 1;
5119 }
5120 } else {
5121 tc_unused = 1;
5122 }
5123 }
5124
5125
5126 if (!ret)
5127 ret = 1;
5128
5129 return ret;
5130}
5131
5132
5133
5134
5135
5136
5137
5138
5139static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5140{
5141 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5142 u8 enabled_tc = 1;
5143 u8 i;
5144
5145 for (i = 0; i < num_tc; i++)
5146 enabled_tc |= BIT(i);
5147
5148 return enabled_tc;
5149}
5150
5151
5152
5153
5154
5155
5156
5157
5158static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5159{
5160 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5161 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5162 u8 enabled_tc = 1, i;
5163
5164 for (i = 1; i < num_tc; i++)
5165 enabled_tc |= BIT(i);
5166 return enabled_tc;
5167}
5168
5169
5170
5171
5172
5173
5174
5175static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5176{
5177 struct i40e_hw *hw = &pf->hw;
5178 u8 i, enabled_tc = 1;
5179 u8 num_tc = 0;
5180 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5181
5182 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5183 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5184
5185
5186 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5187 return 1;
5188
5189
5190 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5191 return i40e_dcb_get_num_tc(dcbcfg);
5192
5193
5194 if (pf->hw.func_caps.iscsi)
5195 enabled_tc = i40e_get_iscsi_tc_map(pf);
5196 else
5197 return 1;
5198
5199 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5200 if (enabled_tc & BIT(i))
5201 num_tc++;
5202 }
5203 return num_tc;
5204}
5205
5206
5207
5208
5209
5210
5211
5212static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5213{
5214 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5215 return i40e_mqprio_get_enabled_tc(pf);
5216
5217
5218
5219
5220 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5221 return I40E_DEFAULT_TRAFFIC_CLASS;
5222
5223
5224 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5225 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5226
5227
5228 if (pf->hw.func_caps.iscsi)
5229 return i40e_get_iscsi_tc_map(pf);
5230 else
5231 return I40E_DEFAULT_TRAFFIC_CLASS;
5232}
5233
5234
5235
5236
5237
5238
5239
5240static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5241{
5242 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5243 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5244 struct i40e_pf *pf = vsi->back;
5245 struct i40e_hw *hw = &pf->hw;
5246 i40e_status ret;
5247 u32 tc_bw_max;
5248 int i;
5249
5250
5251 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5252 if (ret) {
5253 dev_info(&pf->pdev->dev,
5254 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5255 i40e_stat_str(&pf->hw, ret),
5256 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5257 return -EINVAL;
5258 }
5259
5260
5261 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5262 NULL);
5263 if (ret) {
5264 dev_info(&pf->pdev->dev,
5265 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5266 i40e_stat_str(&pf->hw, ret),
5267 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5268 return -EINVAL;
5269 }
5270
5271 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5272 dev_info(&pf->pdev->dev,
5273 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5274 bw_config.tc_valid_bits,
5275 bw_ets_config.tc_valid_bits);
5276
5277 }
5278
5279 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5280 vsi->bw_max_quanta = bw_config.max_bw;
5281 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5282 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5283 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5284 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5285 vsi->bw_ets_limit_credits[i] =
5286 le16_to_cpu(bw_ets_config.credits[i]);
5287
5288 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5289 }
5290
5291 return 0;
5292}
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5303 u8 *bw_share)
5304{
5305 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5306 struct i40e_pf *pf = vsi->back;
5307 i40e_status ret;
5308 int i;
5309
5310
5311 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5312 return 0;
5313 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5314 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5315 if (ret)
5316 dev_info(&pf->pdev->dev,
5317 "Failed to reset tx rate for vsi->seid %u\n",
5318 vsi->seid);
5319 return ret;
5320 }
5321 memset(&bw_data, 0, sizeof(bw_data));
5322 bw_data.tc_valid_bits = enabled_tc;
5323 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5324 bw_data.tc_bw_credits[i] = bw_share[i];
5325
5326 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5327 if (ret) {
5328 dev_info(&pf->pdev->dev,
5329 "AQ command Config VSI BW allocation per TC failed = %d\n",
5330 pf->hw.aq.asq_last_status);
5331 return -EINVAL;
5332 }
5333
5334 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5335 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5336
5337 return 0;
5338}
5339
5340
5341
5342
5343
5344
5345
5346static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5347{
5348 struct net_device *netdev = vsi->netdev;
5349 struct i40e_pf *pf = vsi->back;
5350 struct i40e_hw *hw = &pf->hw;
5351 u8 netdev_tc = 0;
5352 int i;
5353 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5354
5355 if (!netdev)
5356 return;
5357
5358 if (!enabled_tc) {
5359 netdev_reset_tc(netdev);
5360 return;
5361 }
5362
5363
5364 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5365 return;
5366
5367
5368 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5369
5370
5371
5372
5373
5374
5375
5376 if (vsi->tc_config.enabled_tc & BIT(i))
5377 netdev_set_tc_queue(netdev,
5378 vsi->tc_config.tc_info[i].netdev_tc,
5379 vsi->tc_config.tc_info[i].qcount,
5380 vsi->tc_config.tc_info[i].qoffset);
5381 }
5382
5383 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5384 return;
5385
5386
5387 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5388
5389 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5390
5391 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5392 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5393 }
5394}
5395
5396
5397
5398
5399
5400
5401static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5402 struct i40e_vsi_context *ctxt)
5403{
5404
5405
5406
5407
5408 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5409 memcpy(&vsi->info.queue_mapping,
5410 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5411 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5412 sizeof(vsi->info.tc_mapping));
5413}
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5429{
5430 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5431 struct i40e_pf *pf = vsi->back;
5432 struct i40e_hw *hw = &pf->hw;
5433 struct i40e_vsi_context ctxt;
5434 int ret = 0;
5435 int i;
5436
5437
5438 if (vsi->tc_config.enabled_tc == enabled_tc &&
5439 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5440 return ret;
5441
5442
5443 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5444 if (enabled_tc & BIT(i))
5445 bw_share[i] = 1;
5446 }
5447
5448 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5449 if (ret) {
5450 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5451
5452 dev_info(&pf->pdev->dev,
5453 "Failed configuring TC map %d for VSI %d\n",
5454 enabled_tc, vsi->seid);
5455 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5456 &bw_config, NULL);
5457 if (ret) {
5458 dev_info(&pf->pdev->dev,
5459 "Failed querying vsi bw info, err %s aq_err %s\n",
5460 i40e_stat_str(hw, ret),
5461 i40e_aq_str(hw, hw->aq.asq_last_status));
5462 goto out;
5463 }
5464 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5465 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5466
5467 if (!valid_tc)
5468 valid_tc = bw_config.tc_valid_bits;
5469
5470 valid_tc |= 1;
5471 dev_info(&pf->pdev->dev,
5472 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5473 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5474 enabled_tc = valid_tc;
5475 }
5476
5477 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5478 if (ret) {
5479 dev_err(&pf->pdev->dev,
5480 "Unable to configure TC map %d for VSI %d\n",
5481 enabled_tc, vsi->seid);
5482 goto out;
5483 }
5484 }
5485
5486
5487 ctxt.seid = vsi->seid;
5488 ctxt.pf_num = vsi->back->hw.pf_id;
5489 ctxt.vf_num = 0;
5490 ctxt.uplink_seid = vsi->uplink_seid;
5491 ctxt.info = vsi->info;
5492 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5493 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5494 if (ret)
5495 goto out;
5496 } else {
5497 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5498 }
5499
5500
5501
5502
5503 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5504 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5505 vsi->num_queue_pairs);
5506 ret = i40e_vsi_config_rss(vsi);
5507 if (ret) {
5508 dev_info(&vsi->back->pdev->dev,
5509 "Failed to reconfig rss for num_queues\n");
5510 return ret;
5511 }
5512 vsi->reconfig_rss = false;
5513 }
5514 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5515 ctxt.info.valid_sections |=
5516 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5517 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5518 }
5519
5520
5521
5522
5523 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5524 if (ret) {
5525 dev_info(&pf->pdev->dev,
5526 "Update vsi tc config failed, err %s aq_err %s\n",
5527 i40e_stat_str(hw, ret),
5528 i40e_aq_str(hw, hw->aq.asq_last_status));
5529 goto out;
5530 }
5531
5532 i40e_vsi_update_queue_map(vsi, &ctxt);
5533 vsi->info.valid_sections = 0;
5534
5535
5536 ret = i40e_vsi_get_bw_info(vsi);
5537 if (ret) {
5538 dev_info(&pf->pdev->dev,
5539 "Failed updating vsi bw info, err %s aq_err %s\n",
5540 i40e_stat_str(hw, ret),
5541 i40e_aq_str(hw, hw->aq.asq_last_status));
5542 goto out;
5543 }
5544
5545
5546 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5547out:
5548 return ret;
5549}
5550
5551
5552
5553
5554
5555
5556static int i40e_get_link_speed(struct i40e_vsi *vsi)
5557{
5558 struct i40e_pf *pf = vsi->back;
5559
5560 switch (pf->hw.phy.link_info.link_speed) {
5561 case I40E_LINK_SPEED_40GB:
5562 return 40000;
5563 case I40E_LINK_SPEED_25GB:
5564 return 25000;
5565 case I40E_LINK_SPEED_20GB:
5566 return 20000;
5567 case I40E_LINK_SPEED_10GB:
5568 return 10000;
5569 case I40E_LINK_SPEED_1GB:
5570 return 1000;
5571 default:
5572 return -EINVAL;
5573 }
5574}
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5585{
5586 struct i40e_pf *pf = vsi->back;
5587 u64 credits = 0;
5588 int speed = 0;
5589 int ret = 0;
5590
5591 speed = i40e_get_link_speed(vsi);
5592 if (max_tx_rate > speed) {
5593 dev_err(&pf->pdev->dev,
5594 "Invalid max tx rate %llu specified for VSI seid %d.",
5595 max_tx_rate, seid);
5596 return -EINVAL;
5597 }
5598 if (max_tx_rate && max_tx_rate < 50) {
5599 dev_warn(&pf->pdev->dev,
5600 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5601 max_tx_rate = 50;
5602 }
5603
5604
5605 credits = max_tx_rate;
5606 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5607 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5608 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5609 if (ret)
5610 dev_err(&pf->pdev->dev,
5611 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5612 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5613 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5614 return ret;
5615}
5616
5617
5618
5619
5620
5621
5622
5623static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5624{
5625 enum i40e_admin_queue_err last_aq_status;
5626 struct i40e_cloud_filter *cfilter;
5627 struct i40e_channel *ch, *ch_tmp;
5628 struct i40e_pf *pf = vsi->back;
5629 struct hlist_node *node;
5630 int ret, i;
5631
5632
5633
5634
5635 vsi->current_rss_size = 0;
5636
5637
5638 if (list_empty(&vsi->ch_list))
5639 return;
5640
5641 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5642 struct i40e_vsi *p_vsi;
5643
5644 list_del(&ch->list);
5645 p_vsi = ch->parent_vsi;
5646 if (!p_vsi || !ch->initialized) {
5647 kfree(ch);
5648 continue;
5649 }
5650
5651 for (i = 0; i < ch->num_queue_pairs; i++) {
5652 struct i40e_ring *tx_ring, *rx_ring;
5653 u16 pf_q;
5654
5655 pf_q = ch->base_queue + i;
5656 tx_ring = vsi->tx_rings[pf_q];
5657 tx_ring->ch = NULL;
5658
5659 rx_ring = vsi->rx_rings[pf_q];
5660 rx_ring->ch = NULL;
5661 }
5662
5663
5664 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5665 if (ret)
5666 dev_info(&vsi->back->pdev->dev,
5667 "Failed to reset tx rate for ch->seid %u\n",
5668 ch->seid);
5669
5670
5671 hlist_for_each_entry_safe(cfilter, node,
5672 &pf->cloud_filter_list, cloud_node) {
5673 if (cfilter->seid != ch->seid)
5674 continue;
5675
5676 hash_del(&cfilter->cloud_node);
5677 if (cfilter->dst_port)
5678 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5679 cfilter,
5680 false);
5681 else
5682 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5683 false);
5684 last_aq_status = pf->hw.aq.asq_last_status;
5685 if (ret)
5686 dev_info(&pf->pdev->dev,
5687 "Failed to delete cloud filter, err %s aq_err %s\n",
5688 i40e_stat_str(&pf->hw, ret),
5689 i40e_aq_str(&pf->hw, last_aq_status));
5690 kfree(cfilter);
5691 }
5692
5693
5694 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5695 NULL);
5696 if (ret)
5697 dev_err(&vsi->back->pdev->dev,
5698 "unable to remove channel (%d) for parent VSI(%d)\n",
5699 ch->seid, p_vsi->seid);
5700 kfree(ch);
5701 }
5702 INIT_LIST_HEAD(&vsi->ch_list);
5703}
5704
5705
5706
5707
5708
5709
5710
5711static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5712{
5713 struct i40e_channel *ch, *ch_tmp;
5714
5715 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5716 if (ch->initialized)
5717 return true;
5718 }
5719
5720 return false;
5721}
5722
5723
5724
5725
5726
5727
5728
5729
5730static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5731{
5732 struct i40e_channel *ch, *ch_tmp;
5733 int max = 0;
5734
5735 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5736 if (!ch->initialized)
5737 continue;
5738 if (ch->num_queue_pairs > max)
5739 max = ch->num_queue_pairs;
5740 }
5741
5742 return max;
5743}
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5757 struct i40e_vsi *vsi, bool *reconfig_rss)
5758{
5759 int max_ch_queues;
5760
5761 if (!reconfig_rss)
5762 return -EINVAL;
5763
5764 *reconfig_rss = false;
5765 if (vsi->current_rss_size) {
5766 if (num_queues > vsi->current_rss_size) {
5767 dev_dbg(&pf->pdev->dev,
5768 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5769 num_queues, vsi->current_rss_size);
5770 return -EINVAL;
5771 } else if ((num_queues < vsi->current_rss_size) &&
5772 (!is_power_of_2(num_queues))) {
5773 dev_dbg(&pf->pdev->dev,
5774 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5775 num_queues, vsi->current_rss_size);
5776 return -EINVAL;
5777 }
5778 }
5779
5780 if (!is_power_of_2(num_queues)) {
5781
5782
5783
5784
5785
5786 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5787 if (num_queues < max_ch_queues) {
5788 dev_dbg(&pf->pdev->dev,
5789 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5790 num_queues, max_ch_queues);
5791 return -EINVAL;
5792 }
5793 *reconfig_rss = true;
5794 }
5795
5796 return 0;
5797}
5798
5799
5800
5801
5802
5803
5804
5805
5806static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5807{
5808 struct i40e_pf *pf = vsi->back;
5809 u8 seed[I40E_HKEY_ARRAY_SIZE];
5810 struct i40e_hw *hw = &pf->hw;
5811 int local_rss_size;
5812 u8 *lut;
5813 int ret;
5814
5815 if (!vsi->rss_size)
5816 return -EINVAL;
5817
5818 if (rss_size > vsi->rss_size)
5819 return -EINVAL;
5820
5821 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5822 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5823 if (!lut)
5824 return -ENOMEM;
5825
5826
5827 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5828
5829
5830
5831
5832 if (vsi->rss_hkey_user)
5833 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5834 else
5835 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5836
5837 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5838 if (ret) {
5839 dev_info(&pf->pdev->dev,
5840 "Cannot set RSS lut, err %s aq_err %s\n",
5841 i40e_stat_str(hw, ret),
5842 i40e_aq_str(hw, hw->aq.asq_last_status));
5843 kfree(lut);
5844 return ret;
5845 }
5846 kfree(lut);
5847
5848
5849 if (!vsi->orig_rss_size)
5850 vsi->orig_rss_size = vsi->rss_size;
5851 vsi->current_rss_size = local_rss_size;
5852
5853 return ret;
5854}
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5865 struct i40e_vsi_context *ctxt,
5866 struct i40e_channel *ch)
5867{
5868 u16 qcount, qmap, sections = 0;
5869 u8 offset = 0;
5870 int pow;
5871
5872 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5873 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5874
5875 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5876 ch->num_queue_pairs = qcount;
5877
5878
5879 pow = ilog2(qcount);
5880 if (!is_power_of_2(qcount))
5881 pow++;
5882
5883 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5884 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5885
5886
5887 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5888
5889 ctxt->info.up_enable_bits = 0x1;
5890 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5891 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5892 ctxt->info.valid_sections |= cpu_to_le16(sections);
5893}
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5904 struct i40e_channel *ch)
5905{
5906 struct i40e_hw *hw = &pf->hw;
5907 struct i40e_vsi_context ctxt;
5908 u8 enabled_tc = 0x1;
5909 int ret;
5910
5911 if (ch->type != I40E_VSI_VMDQ2) {
5912 dev_info(&pf->pdev->dev,
5913 "add new vsi failed, ch->type %d\n", ch->type);
5914 return -EINVAL;
5915 }
5916
5917 memset(&ctxt, 0, sizeof(ctxt));
5918 ctxt.pf_num = hw->pf_id;
5919 ctxt.vf_num = 0;
5920 ctxt.uplink_seid = uplink_seid;
5921 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5922 if (ch->type == I40E_VSI_VMDQ2)
5923 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5924
5925 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5926 ctxt.info.valid_sections |=
5927 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5928 ctxt.info.switch_id =
5929 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5930 }
5931
5932
5933 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5934
5935
5936 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5937 if (ret) {
5938 dev_info(&pf->pdev->dev,
5939 "add new vsi failed, err %s aq_err %s\n",
5940 i40e_stat_str(&pf->hw, ret),
5941 i40e_aq_str(&pf->hw,
5942 pf->hw.aq.asq_last_status));
5943 return -ENOENT;
5944 }
5945
5946
5947
5948
5949 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5950 ch->seid = ctxt.seid;
5951 ch->vsi_number = ctxt.vsi_number;
5952 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
5953
5954
5955
5956
5957
5958 ch->info.mapping_flags = ctxt.info.mapping_flags;
5959 memcpy(&ch->info.queue_mapping,
5960 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5961 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5962 sizeof(ctxt.info.tc_mapping));
5963
5964 return 0;
5965}
5966
5967static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5968 u8 *bw_share)
5969{
5970 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5971 i40e_status ret;
5972 int i;
5973
5974 memset(&bw_data, 0, sizeof(bw_data));
5975 bw_data.tc_valid_bits = ch->enabled_tc;
5976 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5977 bw_data.tc_bw_credits[i] = bw_share[i];
5978
5979 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5980 &bw_data, NULL);
5981 if (ret) {
5982 dev_info(&vsi->back->pdev->dev,
5983 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5984 vsi->back->hw.aq.asq_last_status, ch->seid);
5985 return -EINVAL;
5986 }
5987
5988 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5989 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5990
5991 return 0;
5992}
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6004 struct i40e_vsi *vsi,
6005 struct i40e_channel *ch)
6006{
6007 i40e_status ret;
6008 int i;
6009 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6010
6011
6012 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6013 if (ch->enabled_tc & BIT(i))
6014 bw_share[i] = 1;
6015 }
6016
6017
6018 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6019 if (ret) {
6020 dev_info(&vsi->back->pdev->dev,
6021 "Failed configuring TC map %d for channel (seid %u)\n",
6022 ch->enabled_tc, ch->seid);
6023 return ret;
6024 }
6025
6026 for (i = 0; i < ch->num_queue_pairs; i++) {
6027 struct i40e_ring *tx_ring, *rx_ring;
6028 u16 pf_q;
6029
6030 pf_q = ch->base_queue + i;
6031
6032
6033
6034
6035 tx_ring = vsi->tx_rings[pf_q];
6036 tx_ring->ch = ch;
6037
6038
6039 rx_ring = vsi->rx_rings[pf_q];
6040 rx_ring->ch = ch;
6041 }
6042
6043 return 0;
6044}
6045
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6058 struct i40e_vsi *vsi,
6059 struct i40e_channel *ch,
6060 u16 uplink_seid, u8 type)
6061{
6062 int ret;
6063
6064 ch->initialized = false;
6065 ch->base_queue = vsi->next_base_queue;
6066 ch->type = type;
6067
6068
6069 ret = i40e_add_channel(pf, uplink_seid, ch);
6070 if (ret) {
6071 dev_info(&pf->pdev->dev,
6072 "failed to add_channel using uplink_seid %u\n",
6073 uplink_seid);
6074 return ret;
6075 }
6076
6077
6078 ch->initialized = true;
6079
6080
6081 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6082 if (ret) {
6083 dev_info(&pf->pdev->dev,
6084 "failed to configure TX rings for channel %u\n",
6085 ch->seid);
6086 return ret;
6087 }
6088
6089
6090 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6091 dev_dbg(&pf->pdev->dev,
6092 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6093 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6094 ch->num_queue_pairs,
6095 vsi->next_base_queue);
6096 return ret;
6097}
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6109 struct i40e_channel *ch)
6110{
6111 u8 vsi_type;
6112 u16 seid;
6113 int ret;
6114
6115 if (vsi->type == I40E_VSI_MAIN) {
6116 vsi_type = I40E_VSI_VMDQ2;
6117 } else {
6118 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6119 vsi->type);
6120 return false;
6121 }
6122
6123
6124 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6125
6126
6127 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6128 if (ret) {
6129 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6130 return false;
6131 }
6132
6133 return ch->initialized ? true : false;
6134}
6135
6136
6137
6138
6139
6140
6141
6142
6143static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6144{
6145 u8 mode;
6146 struct i40e_pf *pf = vsi->back;
6147 struct i40e_hw *hw = &pf->hw;
6148 int ret;
6149
6150 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6151 if (ret)
6152 return -EINVAL;
6153
6154 if (hw->dev_caps.switch_mode) {
6155
6156
6157
6158 u32 switch_mode = hw->dev_caps.switch_mode &
6159 I40E_SWITCH_MODE_MASK;
6160 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6161 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6162 return 0;
6163 dev_err(&pf->pdev->dev,
6164 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6165 hw->dev_caps.switch_mode);
6166 return -EINVAL;
6167 }
6168 }
6169
6170
6171 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6172
6173
6174 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6175
6176
6177 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6178
6179
6180 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6181 pf->last_sw_conf_valid_flags,
6182 mode, NULL);
6183 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6184 dev_err(&pf->pdev->dev,
6185 "couldn't set switch config bits, err %s aq_err %s\n",
6186 i40e_stat_str(hw, ret),
6187 i40e_aq_str(hw,
6188 hw->aq.asq_last_status));
6189
6190 return ret;
6191}
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201int i40e_create_queue_channel(struct i40e_vsi *vsi,
6202 struct i40e_channel *ch)
6203{
6204 struct i40e_pf *pf = vsi->back;
6205 bool reconfig_rss;
6206 int err;
6207
6208 if (!ch)
6209 return -EINVAL;
6210
6211 if (!ch->num_queue_pairs) {
6212 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6213 ch->num_queue_pairs);
6214 return -EINVAL;
6215 }
6216
6217
6218 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6219 &reconfig_rss);
6220 if (err) {
6221 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6222 ch->num_queue_pairs);
6223 return -EINVAL;
6224 }
6225
6226
6227
6228
6229 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6230 (!i40e_is_any_channel(vsi))) {
6231 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6232 dev_dbg(&pf->pdev->dev,
6233 "Failed to create channel. Override queues (%u) not power of 2\n",
6234 vsi->tc_config.tc_info[0].qcount);
6235 return -EINVAL;
6236 }
6237
6238 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6239 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6240
6241 if (vsi->type == I40E_VSI_MAIN) {
6242 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6243 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6244 true);
6245 else
6246 i40e_do_reset_safe(pf,
6247 I40E_PF_RESET_FLAG);
6248 }
6249 }
6250
6251
6252
6253 }
6254
6255
6256
6257
6258 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6259 dev_dbg(&pf->pdev->dev,
6260 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6261 vsi->cnt_q_avail, ch->num_queue_pairs);
6262 return -EINVAL;
6263 }
6264
6265
6266 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6267 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6268 if (err) {
6269 dev_info(&pf->pdev->dev,
6270 "Error: unable to reconfig rss for num_queues (%u)\n",
6271 ch->num_queue_pairs);
6272 return -EINVAL;
6273 }
6274 }
6275
6276 if (!i40e_setup_channel(pf, vsi, ch)) {
6277 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6278 return -EINVAL;
6279 }
6280
6281 dev_info(&pf->pdev->dev,
6282 "Setup channel (id:%u) utilizing num_queues %d\n",
6283 ch->seid, ch->num_queue_pairs);
6284
6285
6286 if (ch->max_tx_rate) {
6287 u64 credits = ch->max_tx_rate;
6288
6289 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6290 return -EINVAL;
6291
6292 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6293 dev_dbg(&pf->pdev->dev,
6294 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6295 ch->max_tx_rate,
6296 credits,
6297 ch->seid);
6298 }
6299
6300
6301 ch->parent_vsi = vsi;
6302
6303
6304 vsi->cnt_q_avail -= ch->num_queue_pairs;
6305
6306 return 0;
6307}
6308
6309
6310
6311
6312
6313
6314
6315static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6316{
6317 struct i40e_channel *ch;
6318 u64 max_rate = 0;
6319 int ret = 0, i;
6320
6321
6322 vsi->tc_seid_map[0] = vsi->seid;
6323 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6324 if (vsi->tc_config.enabled_tc & BIT(i)) {
6325 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6326 if (!ch) {
6327 ret = -ENOMEM;
6328 goto err_free;
6329 }
6330
6331 INIT_LIST_HEAD(&ch->list);
6332 ch->num_queue_pairs =
6333 vsi->tc_config.tc_info[i].qcount;
6334 ch->base_queue =
6335 vsi->tc_config.tc_info[i].qoffset;
6336
6337
6338
6339
6340 max_rate = vsi->mqprio_qopt.max_rate[i];
6341 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6342 ch->max_tx_rate = max_rate;
6343
6344 list_add_tail(&ch->list, &vsi->ch_list);
6345
6346 ret = i40e_create_queue_channel(vsi, ch);
6347 if (ret) {
6348 dev_err(&vsi->back->pdev->dev,
6349 "Failed creating queue channel with TC%d: queues %d\n",
6350 i, ch->num_queue_pairs);
6351 goto err_free;
6352 }
6353 vsi->tc_seid_map[i] = ch->seid;
6354 }
6355 }
6356 return ret;
6357
6358err_free:
6359 i40e_remove_queue_channels(vsi);
6360 return ret;
6361}
6362
6363
6364
6365
6366
6367
6368
6369
6370int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6371{
6372 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6373 struct i40e_pf *pf = veb->pf;
6374 int ret = 0;
6375 int i;
6376
6377
6378 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6379 return ret;
6380
6381 bw_data.tc_valid_bits = enabled_tc;
6382
6383
6384
6385 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6386 if (enabled_tc & BIT(i))
6387 bw_data.tc_bw_share_credits[i] = 1;
6388 }
6389
6390 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6391 &bw_data, NULL);
6392 if (ret) {
6393 dev_info(&pf->pdev->dev,
6394 "VEB bw config failed, err %s aq_err %s\n",
6395 i40e_stat_str(&pf->hw, ret),
6396 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6397 goto out;
6398 }
6399
6400
6401 ret = i40e_veb_get_bw_info(veb);
6402 if (ret) {
6403 dev_info(&pf->pdev->dev,
6404 "Failed getting veb bw config, err %s aq_err %s\n",
6405 i40e_stat_str(&pf->hw, ret),
6406 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6407 }
6408
6409out:
6410 return ret;
6411}
6412
6413#ifdef CONFIG_I40E_DCB
6414
6415
6416
6417
6418
6419
6420
6421
6422static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6423{
6424 u8 tc_map = 0;
6425 int ret;
6426 u8 v;
6427
6428
6429 tc_map = i40e_pf_get_tc_map(pf);
6430 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6431 return;
6432
6433 for (v = 0; v < I40E_MAX_VEB; v++) {
6434 if (!pf->veb[v])
6435 continue;
6436 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6437 if (ret) {
6438 dev_info(&pf->pdev->dev,
6439 "Failed configuring TC for VEB seid=%d\n",
6440 pf->veb[v]->seid);
6441
6442 }
6443 }
6444
6445
6446 for (v = 0; v < pf->num_alloc_vsi; v++) {
6447 if (!pf->vsi[v])
6448 continue;
6449
6450
6451
6452
6453 if (v == pf->lan_vsi)
6454 tc_map = i40e_pf_get_tc_map(pf);
6455 else
6456 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6457
6458 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6459 if (ret) {
6460 dev_info(&pf->pdev->dev,
6461 "Failed configuring TC for VSI seid=%d\n",
6462 pf->vsi[v]->seid);
6463
6464 } else {
6465
6466 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6467 if (pf->vsi[v]->netdev)
6468 i40e_dcbnl_set_all(pf->vsi[v]);
6469 }
6470 }
6471}
6472
6473
6474
6475
6476
6477
6478
6479
6480static int i40e_resume_port_tx(struct i40e_pf *pf)
6481{
6482 struct i40e_hw *hw = &pf->hw;
6483 int ret;
6484
6485 ret = i40e_aq_resume_port_tx(hw, NULL);
6486 if (ret) {
6487 dev_info(&pf->pdev->dev,
6488 "Resume Port Tx failed, err %s aq_err %s\n",
6489 i40e_stat_str(&pf->hw, ret),
6490 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6491
6492 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6493 i40e_service_event_schedule(pf);
6494 }
6495
6496 return ret;
6497}
6498
6499
6500
6501
6502
6503
6504
6505static int i40e_suspend_port_tx(struct i40e_pf *pf)
6506{
6507 struct i40e_hw *hw = &pf->hw;
6508 int ret;
6509
6510 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6511 if (ret) {
6512 dev_info(&pf->pdev->dev,
6513 "Suspend Port Tx failed, err %s aq_err %s\n",
6514 i40e_stat_str(&pf->hw, ret),
6515 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6516
6517 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6518 i40e_service_event_schedule(pf);
6519 }
6520
6521 return ret;
6522}
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6533 struct i40e_dcbx_config *new_cfg)
6534{
6535 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6536 int ret;
6537
6538
6539 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6540 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6541 return 0;
6542 }
6543
6544
6545 i40e_pf_quiesce_all_vsi(pf);
6546
6547
6548 *old_cfg = *new_cfg;
6549 old_cfg->etsrec = old_cfg->etscfg;
6550 ret = i40e_set_dcb_config(&pf->hw);
6551 if (ret) {
6552 dev_info(&pf->pdev->dev,
6553 "Set DCB Config failed, err %s aq_err %s\n",
6554 i40e_stat_str(&pf->hw, ret),
6555 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6556 goto out;
6557 }
6558
6559
6560 i40e_dcb_reconfigure(pf);
6561out:
6562
6563 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6564
6565 ret = i40e_resume_port_tx(pf);
6566
6567 if (ret)
6568 goto err;
6569 i40e_pf_unquiesce_all_vsi(pf);
6570 }
6571err:
6572 return ret;
6573}
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6584{
6585 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6586 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6587 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6588 struct i40e_dcbx_config *old_cfg;
6589 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6590 struct i40e_rx_pb_config pb_cfg;
6591 struct i40e_hw *hw = &pf->hw;
6592 u8 num_ports = hw->num_ports;
6593 bool need_reconfig;
6594 int ret = -EINVAL;
6595 u8 lltc_map = 0;
6596 u8 tc_map = 0;
6597 u8 new_numtc;
6598 u8 i;
6599
6600 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6617
6618 memset(&ets_data, 0, sizeof(ets_data));
6619 for (i = 0; i < new_numtc; i++) {
6620 tc_map |= BIT(i);
6621 switch (new_cfg->etscfg.tsatable[i]) {
6622 case I40E_IEEE_TSA_ETS:
6623 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6624 ets_data.tc_bw_share_credits[i] =
6625 new_cfg->etscfg.tcbwtable[i];
6626 break;
6627 case I40E_IEEE_TSA_STRICT:
6628 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6629 lltc_map |= BIT(i);
6630 ets_data.tc_bw_share_credits[i] =
6631 I40E_DCB_STRICT_PRIO_CREDITS;
6632 break;
6633 default:
6634
6635 need_reconfig = false;
6636 goto out;
6637 }
6638 }
6639
6640 old_cfg = &hw->local_dcbx_config;
6641
6642 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6643
6644
6645
6646
6647 if (need_reconfig) {
6648
6649 if (new_numtc > 1)
6650 pf->flags |= I40E_FLAG_DCB_ENABLED;
6651 else
6652 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6653
6654 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6655
6656 i40e_pf_quiesce_all_vsi(pf);
6657 ret = i40e_suspend_port_tx(pf);
6658 if (ret)
6659 goto err;
6660 }
6661
6662
6663 ets_data.tc_valid_bits = tc_map;
6664 ets_data.tc_strict_priority_flags = lltc_map;
6665 ret = i40e_aq_config_switch_comp_ets
6666 (hw, pf->mac_seid, &ets_data,
6667 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6668 if (ret) {
6669 dev_info(&pf->pdev->dev,
6670 "Modify Port ETS failed, err %s aq_err %s\n",
6671 i40e_stat_str(&pf->hw, ret),
6672 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6673 goto out;
6674 }
6675
6676
6677 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
6678 i40e_dcb_hw_set_num_tc(hw, new_numtc);
6679 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
6680 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
6681 I40E_DCB_DEFAULT_MAX_EXPONENT,
6682 lltc_map);
6683 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
6684 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
6685 prio_type);
6686 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
6687 new_cfg->etscfg.prioritytable);
6688 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
6689
6690
6691 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6692 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
6693 mfs_tc[i] += I40E_PACKET_HDR_PAD;
6694 }
6695
6696 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
6697 false, new_cfg->pfc.pfcenable,
6698 mfs_tc, &pb_cfg);
6699 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
6700
6701
6702 pf->pb_cfg = pb_cfg;
6703
6704
6705 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
6706 if (ret) {
6707 dev_info(&pf->pdev->dev,
6708 "DCB Updated failed, err %s aq_err %s\n",
6709 i40e_stat_str(&pf->hw, ret),
6710 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6711 goto out;
6712 }
6713
6714
6715 *old_cfg = *new_cfg;
6716
6717
6718 i40e_dcb_reconfigure(pf);
6719out:
6720
6721 if (need_reconfig) {
6722 ret = i40e_resume_port_tx(pf);
6723
6724 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6725
6726 if (ret)
6727 goto err;
6728
6729
6730 ret = i40e_pf_wait_queues_disabled(pf);
6731 if (ret) {
6732
6733 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6734 i40e_service_event_schedule(pf);
6735 goto err;
6736 } else {
6737 i40e_pf_unquiesce_all_vsi(pf);
6738 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6739 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
6740 }
6741
6742 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
6743 ret = i40e_hw_set_dcb_config(pf, new_cfg);
6744 }
6745
6746err:
6747 return ret;
6748}
6749
6750
6751
6752
6753
6754
6755
6756int i40e_dcb_sw_default_config(struct i40e_pf *pf)
6757{
6758 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
6759 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6760 struct i40e_hw *hw = &pf->hw;
6761 int err;
6762
6763 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
6764
6765 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
6766 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6767 pf->tmp_cfg.etscfg.maxtcs = 0;
6768 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6769 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
6770 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
6771 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
6772
6773 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
6774 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
6775 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
6776 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
6777
6778 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
6779 }
6780
6781 memset(&ets_data, 0, sizeof(ets_data));
6782 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS;
6783 ets_data.tc_strict_priority_flags = 0;
6784 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6785
6786
6787 err = i40e_aq_config_switch_comp_ets
6788 (hw, pf->mac_seid, &ets_data,
6789 i40e_aqc_opc_enable_switching_comp_ets, NULL);
6790 if (err) {
6791 dev_info(&pf->pdev->dev,
6792 "Enable Port ETS failed, err %s aq_err %s\n",
6793 i40e_stat_str(&pf->hw, err),
6794 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6795 err = -ENOENT;
6796 goto out;
6797 }
6798
6799
6800 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6801 dcb_cfg->etscfg.cbs = 0;
6802 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
6803 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6804
6805out:
6806 return err;
6807}
6808
6809
6810
6811
6812
6813
6814
6815
6816static int i40e_init_pf_dcb(struct i40e_pf *pf)
6817{
6818 struct i40e_hw *hw = &pf->hw;
6819 int err;
6820
6821
6822
6823
6824 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
6825 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
6826 err = I40E_NOT_SUPPORTED;
6827 goto out;
6828 }
6829 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
6830 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
6831 err = i40e_dcb_sw_default_config(pf);
6832 if (err) {
6833 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
6834 goto out;
6835 }
6836 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
6837 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
6838 DCB_CAP_DCBX_VER_IEEE;
6839
6840 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6841 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6842 goto out;
6843 }
6844 err = i40e_init_dcb(hw, true);
6845 if (!err) {
6846
6847 if ((!hw->func_caps.dcb) ||
6848 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6849 dev_info(&pf->pdev->dev,
6850 "DCBX offload is not supported or is disabled for this PF.\n");
6851 } else {
6852
6853 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6854 DCB_CAP_DCBX_VER_IEEE;
6855
6856 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6857
6858
6859
6860 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6861 pf->flags |= I40E_FLAG_DCB_ENABLED;
6862 else
6863 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6864 dev_dbg(&pf->pdev->dev,
6865 "DCBX offload is supported for this PF.\n");
6866 }
6867 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6868 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6869 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6870 } else {
6871 dev_info(&pf->pdev->dev,
6872 "Query for DCB configuration failed, err %s aq_err %s\n",
6873 i40e_stat_str(&pf->hw, err),
6874 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6875 }
6876
6877out:
6878 return err;
6879}
6880#endif
6881
6882
6883
6884
6885
6886
6887void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6888{
6889 enum i40e_aq_link_speed new_speed;
6890 struct i40e_pf *pf = vsi->back;
6891 char *speed = "Unknown";
6892 char *fc = "Unknown";
6893 char *fec = "";
6894 char *req_fec = "";
6895 char *an = "";
6896
6897 if (isup)
6898 new_speed = pf->hw.phy.link_info.link_speed;
6899 else
6900 new_speed = I40E_LINK_SPEED_UNKNOWN;
6901
6902 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6903 return;
6904 vsi->current_isup = isup;
6905 vsi->current_speed = new_speed;
6906 if (!isup) {
6907 netdev_info(vsi->netdev, "NIC Link is Down\n");
6908 return;
6909 }
6910
6911
6912
6913
6914 if (pf->hw.func_caps.npar_enable &&
6915 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6916 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6917 netdev_warn(vsi->netdev,
6918 "The partition detected link speed that is less than 10Gbps\n");
6919
6920 switch (pf->hw.phy.link_info.link_speed) {
6921 case I40E_LINK_SPEED_40GB:
6922 speed = "40 G";
6923 break;
6924 case I40E_LINK_SPEED_20GB:
6925 speed = "20 G";
6926 break;
6927 case I40E_LINK_SPEED_25GB:
6928 speed = "25 G";
6929 break;
6930 case I40E_LINK_SPEED_10GB:
6931 speed = "10 G";
6932 break;
6933 case I40E_LINK_SPEED_5GB:
6934 speed = "5 G";
6935 break;
6936 case I40E_LINK_SPEED_2_5GB:
6937 speed = "2.5 G";
6938 break;
6939 case I40E_LINK_SPEED_1GB:
6940 speed = "1000 M";
6941 break;
6942 case I40E_LINK_SPEED_100MB:
6943 speed = "100 M";
6944 break;
6945 default:
6946 break;
6947 }
6948
6949 switch (pf->hw.fc.current_mode) {
6950 case I40E_FC_FULL:
6951 fc = "RX/TX";
6952 break;
6953 case I40E_FC_TX_PAUSE:
6954 fc = "TX";
6955 break;
6956 case I40E_FC_RX_PAUSE:
6957 fc = "RX";
6958 break;
6959 default:
6960 fc = "None";
6961 break;
6962 }
6963
6964 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6965 req_fec = "None";
6966 fec = "None";
6967 an = "False";
6968
6969 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6970 an = "True";
6971
6972 if (pf->hw.phy.link_info.fec_info &
6973 I40E_AQ_CONFIG_FEC_KR_ENA)
6974 fec = "CL74 FC-FEC/BASE-R";
6975 else if (pf->hw.phy.link_info.fec_info &
6976 I40E_AQ_CONFIG_FEC_RS_ENA)
6977 fec = "CL108 RS-FEC";
6978
6979
6980
6981
6982 if (vsi->back->hw.phy.link_info.req_fec_info &
6983 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6984 if (vsi->back->hw.phy.link_info.req_fec_info &
6985 I40E_AQ_REQUEST_FEC_RS)
6986 req_fec = "CL108 RS-FEC";
6987 else
6988 req_fec = "CL74 FC-FEC/BASE-R";
6989 }
6990 netdev_info(vsi->netdev,
6991 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6992 speed, req_fec, fec, an, fc);
6993 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
6994 req_fec = "None";
6995 fec = "None";
6996 an = "False";
6997
6998 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6999 an = "True";
7000
7001 if (pf->hw.phy.link_info.fec_info &
7002 I40E_AQ_CONFIG_FEC_KR_ENA)
7003 fec = "CL74 FC-FEC/BASE-R";
7004
7005 if (pf->hw.phy.link_info.req_fec_info &
7006 I40E_AQ_REQUEST_FEC_KR)
7007 req_fec = "CL74 FC-FEC/BASE-R";
7008
7009 netdev_info(vsi->netdev,
7010 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7011 speed, req_fec, fec, an, fc);
7012 } else {
7013 netdev_info(vsi->netdev,
7014 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7015 speed, fc);
7016 }
7017
7018}
7019
7020
7021
7022
7023
7024static int i40e_up_complete(struct i40e_vsi *vsi)
7025{
7026 struct i40e_pf *pf = vsi->back;
7027 int err;
7028
7029 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7030 i40e_vsi_configure_msix(vsi);
7031 else
7032 i40e_configure_msi_and_legacy(vsi);
7033
7034
7035 err = i40e_vsi_start_rings(vsi);
7036 if (err)
7037 return err;
7038
7039 clear_bit(__I40E_VSI_DOWN, vsi->state);
7040 i40e_napi_enable_all(vsi);
7041 i40e_vsi_enable_irq(vsi);
7042
7043 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7044 (vsi->netdev)) {
7045 i40e_print_link_message(vsi, true);
7046 netif_tx_start_all_queues(vsi->netdev);
7047 netif_carrier_on(vsi->netdev);
7048 }
7049
7050
7051 if (vsi->type == I40E_VSI_FDIR) {
7052
7053 pf->fd_add_err = 0;
7054 pf->fd_atr_cnt = 0;
7055 i40e_fdir_filter_restore(vsi);
7056 }
7057
7058
7059
7060
7061 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7062 i40e_service_event_schedule(pf);
7063
7064 return 0;
7065}
7066
7067
7068
7069
7070
7071
7072
7073
7074static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7075{
7076 struct i40e_pf *pf = vsi->back;
7077
7078 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7079 usleep_range(1000, 2000);
7080 i40e_down(vsi);
7081
7082 i40e_up(vsi);
7083 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7084}
7085
7086
7087
7088
7089
7090
7091static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7092{
7093 struct i40e_aq_get_phy_abilities_resp abilities;
7094 struct i40e_aq_set_phy_config config = {0};
7095 bool non_zero_phy_type = is_up;
7096 struct i40e_hw *hw = &pf->hw;
7097 i40e_status err;
7098 u64 mask;
7099 u8 speed;
7100
7101
7102
7103
7104
7105
7106
7107 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7108 NULL);
7109 if (err) {
7110 dev_err(&pf->pdev->dev,
7111 "failed to get phy cap., ret = %s last_status = %s\n",
7112 i40e_stat_str(hw, err),
7113 i40e_aq_str(hw, hw->aq.asq_last_status));
7114 return err;
7115 }
7116 speed = abilities.link_speed;
7117
7118
7119 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7120 NULL);
7121 if (err) {
7122 dev_err(&pf->pdev->dev,
7123 "failed to get phy cap., ret = %s last_status = %s\n",
7124 i40e_stat_str(hw, err),
7125 i40e_aq_str(hw, hw->aq.asq_last_status));
7126 return err;
7127 }
7128
7129
7130
7131
7132
7133 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7134 non_zero_phy_type = true;
7135 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7136 return I40E_SUCCESS;
7137
7138
7139
7140
7141
7142 mask = I40E_PHY_TYPES_BITMASK;
7143 config.phy_type =
7144 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7145 config.phy_type_ext =
7146 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7147
7148 config.abilities = abilities.abilities;
7149 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7150 if (is_up)
7151 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7152 else
7153 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7154 }
7155 if (abilities.link_speed != 0)
7156 config.link_speed = abilities.link_speed;
7157 else
7158 config.link_speed = speed;
7159 config.eee_capability = abilities.eee_capability;
7160 config.eeer = abilities.eeer_val;
7161 config.low_power_ctrl = abilities.d3_lpan;
7162 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7163 I40E_AQ_PHY_FEC_CONFIG_MASK;
7164 err = i40e_aq_set_phy_config(hw, &config, NULL);
7165
7166 if (err) {
7167 dev_err(&pf->pdev->dev,
7168 "set phy config ret = %s last_status = %s\n",
7169 i40e_stat_str(&pf->hw, err),
7170 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7171 return err;
7172 }
7173
7174
7175 err = i40e_update_link_info(hw);
7176 if (err) {
7177
7178
7179
7180
7181 msleep(1000);
7182 i40e_update_link_info(hw);
7183 }
7184
7185 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7186
7187 return I40E_SUCCESS;
7188}
7189
7190
7191
7192
7193
7194int i40e_up(struct i40e_vsi *vsi)
7195{
7196 int err;
7197
7198 if (vsi->type == I40E_VSI_MAIN &&
7199 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7200 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7201 i40e_force_link_state(vsi->back, true);
7202
7203 err = i40e_vsi_configure(vsi);
7204 if (!err)
7205 err = i40e_up_complete(vsi);
7206
7207 return err;
7208}
7209
7210
7211
7212
7213
7214void i40e_down(struct i40e_vsi *vsi)
7215{
7216 int i;
7217
7218
7219
7220
7221 if (vsi->netdev) {
7222 netif_carrier_off(vsi->netdev);
7223 netif_tx_disable(vsi->netdev);
7224 }
7225 i40e_vsi_disable_irq(vsi);
7226 i40e_vsi_stop_rings(vsi);
7227 if (vsi->type == I40E_VSI_MAIN &&
7228 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7229 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7230 i40e_force_link_state(vsi->back, false);
7231 i40e_napi_disable_all(vsi);
7232
7233 for (i = 0; i < vsi->num_queue_pairs; i++) {
7234 i40e_clean_tx_ring(vsi->tx_rings[i]);
7235 if (i40e_enabled_xdp_vsi(vsi)) {
7236
7237
7238
7239 synchronize_rcu();
7240 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7241 }
7242 i40e_clean_rx_ring(vsi->rx_rings[i]);
7243 }
7244
7245}
7246
7247
7248
7249
7250
7251
7252static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7253 struct tc_mqprio_qopt_offload *mqprio_qopt)
7254{
7255 u64 sum_max_rate = 0;
7256 u64 max_rate = 0;
7257 int i;
7258
7259 if (mqprio_qopt->qopt.offset[0] != 0 ||
7260 mqprio_qopt->qopt.num_tc < 1 ||
7261 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7262 return -EINVAL;
7263 for (i = 0; ; i++) {
7264 if (!mqprio_qopt->qopt.count[i])
7265 return -EINVAL;
7266 if (mqprio_qopt->min_rate[i]) {
7267 dev_err(&vsi->back->pdev->dev,
7268 "Invalid min tx rate (greater than 0) specified\n");
7269 return -EINVAL;
7270 }
7271 max_rate = mqprio_qopt->max_rate[i];
7272 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7273 sum_max_rate += max_rate;
7274
7275 if (i >= mqprio_qopt->qopt.num_tc - 1)
7276 break;
7277 if (mqprio_qopt->qopt.offset[i + 1] !=
7278 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7279 return -EINVAL;
7280 }
7281 if (vsi->num_queue_pairs <
7282 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7283 return -EINVAL;
7284 }
7285 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7286 dev_err(&vsi->back->pdev->dev,
7287 "Invalid max tx rate specified\n");
7288 return -EINVAL;
7289 }
7290 return 0;
7291}
7292
7293
7294
7295
7296
7297static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7298{
7299 u16 qcount;
7300 int i;
7301
7302
7303 vsi->tc_config.numtc = 1;
7304 vsi->tc_config.enabled_tc = 1;
7305 qcount = min_t(int, vsi->alloc_queue_pairs,
7306 i40e_pf_get_max_q_per_tc(vsi->back));
7307 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7308
7309
7310
7311 vsi->tc_config.tc_info[i].qoffset = 0;
7312 if (i == 0)
7313 vsi->tc_config.tc_info[i].qcount = qcount;
7314 else
7315 vsi->tc_config.tc_info[i].qcount = 1;
7316 vsi->tc_config.tc_info[i].netdev_tc = 0;
7317 }
7318}
7319
7320
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7331 const u8 *macaddr, int *aq_err)
7332{
7333 struct i40e_aqc_remove_macvlan_element_data element;
7334 i40e_status status;
7335
7336 memset(&element, 0, sizeof(element));
7337 ether_addr_copy(element.mac_addr, macaddr);
7338 element.vlan_tag = 0;
7339 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7340 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7341 *aq_err = hw->aq.asq_last_status;
7342
7343 return status;
7344}
7345
7346
7347
7348
7349
7350
7351
7352
7353
7354
7355
7356static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7357 const u8 *macaddr, int *aq_err)
7358{
7359 struct i40e_aqc_add_macvlan_element_data element;
7360 i40e_status status;
7361 u16 cmd_flags = 0;
7362
7363 ether_addr_copy(element.mac_addr, macaddr);
7364 element.vlan_tag = 0;
7365 element.queue_number = 0;
7366 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7367 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7368 element.flags = cpu_to_le16(cmd_flags);
7369 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7370 *aq_err = hw->aq.asq_last_status;
7371
7372 return status;
7373}
7374
7375
7376
7377
7378
7379
7380static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7381{
7382 struct i40e_ring *tx_ring, *rx_ring;
7383 u16 pf_q;
7384 int i;
7385
7386 for (i = 0; i < ch->num_queue_pairs; i++) {
7387 pf_q = ch->base_queue + i;
7388 tx_ring = vsi->tx_rings[pf_q];
7389 tx_ring->ch = NULL;
7390 rx_ring = vsi->rx_rings[pf_q];
7391 rx_ring->ch = NULL;
7392 }
7393}
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7404{
7405 struct i40e_channel *ch, *ch_tmp;
7406 int ret;
7407
7408 if (list_empty(&vsi->macvlan_list))
7409 return;
7410
7411 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7412 struct i40e_vsi *parent_vsi;
7413
7414 if (i40e_is_channel_macvlan(ch)) {
7415 i40e_reset_ch_rings(vsi, ch);
7416 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7417 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7418 netdev_set_sb_channel(ch->fwd->netdev, 0);
7419 kfree(ch->fwd);
7420 ch->fwd = NULL;
7421 }
7422
7423 list_del(&ch->list);
7424 parent_vsi = ch->parent_vsi;
7425 if (!parent_vsi || !ch->initialized) {
7426 kfree(ch);
7427 continue;
7428 }
7429
7430
7431 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7432 NULL);
7433 if (ret)
7434 dev_err(&vsi->back->pdev->dev,
7435 "unable to remove channel (%d) for parent VSI(%d)\n",
7436 ch->seid, parent_vsi->seid);
7437 kfree(ch);
7438 }
7439 vsi->macvlan_cnt = 0;
7440}
7441
7442
7443
7444
7445
7446
7447
7448static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7449 struct i40e_fwd_adapter *fwd)
7450{
7451 int ret = 0, num_tc = 1, i, aq_err;
7452 struct i40e_channel *ch, *ch_tmp;
7453 struct i40e_pf *pf = vsi->back;
7454 struct i40e_hw *hw = &pf->hw;
7455
7456 if (list_empty(&vsi->macvlan_list))
7457 return -EINVAL;
7458
7459
7460 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7461 if (!i40e_is_channel_macvlan(ch)) {
7462 ch->fwd = fwd;
7463
7464 for (i = 0; i < num_tc; i++)
7465 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7466 i,
7467 ch->num_queue_pairs,
7468 ch->base_queue);
7469 for (i = 0; i < ch->num_queue_pairs; i++) {
7470 struct i40e_ring *tx_ring, *rx_ring;
7471 u16 pf_q;
7472
7473 pf_q = ch->base_queue + i;
7474
7475
7476 tx_ring = vsi->tx_rings[pf_q];
7477 tx_ring->ch = ch;
7478
7479
7480 rx_ring = vsi->rx_rings[pf_q];
7481 rx_ring->ch = ch;
7482 }
7483 break;
7484 }
7485 }
7486
7487
7488
7489
7490 wmb();
7491
7492
7493 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7494 if (ret) {
7495
7496 macvlan_release_l2fw_offload(vdev);
7497 for (i = 0; i < ch->num_queue_pairs; i++) {
7498 struct i40e_ring *rx_ring;
7499 u16 pf_q;
7500
7501 pf_q = ch->base_queue + i;
7502 rx_ring = vsi->rx_rings[pf_q];
7503 rx_ring->netdev = NULL;
7504 }
7505 dev_info(&pf->pdev->dev,
7506 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7507 i40e_stat_str(hw, ret),
7508 i40e_aq_str(hw, aq_err));
7509 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7510 }
7511
7512 return ret;
7513}
7514
7515
7516
7517
7518
7519
7520
7521
7522static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7523 struct net_device *vdev)
7524{
7525 struct i40e_pf *pf = vsi->back;
7526 struct i40e_hw *hw = &pf->hw;
7527 struct i40e_vsi_context ctxt;
7528 u16 sections, qmap, num_qps;
7529 struct i40e_channel *ch;
7530 int i, pow, ret = 0;
7531 u8 offset = 0;
7532
7533 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7534 return -EINVAL;
7535
7536 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7537
7538
7539 pow = fls(roundup_pow_of_two(num_qps) - 1);
7540
7541 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7542 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7543
7544
7545 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7546 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7547 memset(&ctxt, 0, sizeof(ctxt));
7548 ctxt.seid = vsi->seid;
7549 ctxt.pf_num = vsi->back->hw.pf_id;
7550 ctxt.vf_num = 0;
7551 ctxt.uplink_seid = vsi->uplink_seid;
7552 ctxt.info = vsi->info;
7553 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7554 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7555 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7556 ctxt.info.valid_sections |= cpu_to_le16(sections);
7557
7558
7559 vsi->rss_size = max_t(u16, num_qps, qcnt);
7560 ret = i40e_vsi_config_rss(vsi);
7561 if (ret) {
7562 dev_info(&pf->pdev->dev,
7563 "Failed to reconfig RSS for num_queues (%u)\n",
7564 vsi->rss_size);
7565 return ret;
7566 }
7567 vsi->reconfig_rss = true;
7568 dev_dbg(&vsi->back->pdev->dev,
7569 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7570 vsi->next_base_queue = num_qps;
7571 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7572
7573
7574
7575
7576 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7577 if (ret) {
7578 dev_info(&pf->pdev->dev,
7579 "Update vsi tc config failed, err %s aq_err %s\n",
7580 i40e_stat_str(hw, ret),
7581 i40e_aq_str(hw, hw->aq.asq_last_status));
7582 return ret;
7583 }
7584
7585 i40e_vsi_update_queue_map(vsi, &ctxt);
7586 vsi->info.valid_sections = 0;
7587
7588
7589 INIT_LIST_HEAD(&vsi->macvlan_list);
7590 for (i = 0; i < macvlan_cnt; i++) {
7591 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7592 if (!ch) {
7593 ret = -ENOMEM;
7594 goto err_free;
7595 }
7596 INIT_LIST_HEAD(&ch->list);
7597 ch->num_queue_pairs = qcnt;
7598 if (!i40e_setup_channel(pf, vsi, ch)) {
7599 ret = -EINVAL;
7600 kfree(ch);
7601 goto err_free;
7602 }
7603 ch->parent_vsi = vsi;
7604 vsi->cnt_q_avail -= ch->num_queue_pairs;
7605 vsi->macvlan_cnt++;
7606 list_add_tail(&ch->list, &vsi->macvlan_list);
7607 }
7608
7609 return ret;
7610
7611err_free:
7612 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7613 i40e_free_macvlan_channels(vsi);
7614
7615 return ret;
7616}
7617
7618
7619
7620
7621
7622
7623static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7624{
7625 struct i40e_netdev_priv *np = netdev_priv(netdev);
7626 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7627 struct i40e_vsi *vsi = np->vsi;
7628 struct i40e_pf *pf = vsi->back;
7629 struct i40e_fwd_adapter *fwd;
7630 int avail_macvlan, ret;
7631
7632 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7633 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7634 return ERR_PTR(-EINVAL);
7635 }
7636 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7637 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7638 return ERR_PTR(-EINVAL);
7639 }
7640 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7641 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7642 return ERR_PTR(-EINVAL);
7643 }
7644
7645
7646
7647
7648 if (netif_is_multiqueue(vdev))
7649 return ERR_PTR(-ERANGE);
7650
7651 if (!vsi->macvlan_cnt) {
7652
7653 set_bit(0, vsi->fwd_bitmask);
7654
7655
7656
7657
7658
7659 vectors = pf->num_lan_msix;
7660 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7661
7662 q_per_macvlan = 4;
7663 macvlan_cnt = (vectors - 32) / 4;
7664 } else if (vectors <= 64 && vectors > 32) {
7665
7666 q_per_macvlan = 2;
7667 macvlan_cnt = (vectors - 16) / 2;
7668 } else if (vectors <= 32 && vectors > 16) {
7669
7670 q_per_macvlan = 1;
7671 macvlan_cnt = vectors - 16;
7672 } else if (vectors <= 16 && vectors > 8) {
7673
7674 q_per_macvlan = 1;
7675 macvlan_cnt = vectors - 8;
7676 } else {
7677
7678 q_per_macvlan = 1;
7679 macvlan_cnt = vectors - 1;
7680 }
7681
7682 if (macvlan_cnt == 0)
7683 return ERR_PTR(-EBUSY);
7684
7685
7686 i40e_quiesce_vsi(vsi);
7687
7688
7689 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7690 vdev);
7691 if (ret)
7692 return ERR_PTR(ret);
7693
7694
7695 i40e_unquiesce_vsi(vsi);
7696 }
7697 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7698 vsi->macvlan_cnt);
7699 if (avail_macvlan >= I40E_MAX_MACVLANS)
7700 return ERR_PTR(-EBUSY);
7701
7702
7703 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7704 if (!fwd)
7705 return ERR_PTR(-ENOMEM);
7706
7707 set_bit(avail_macvlan, vsi->fwd_bitmask);
7708 fwd->bit_no = avail_macvlan;
7709 netdev_set_sb_channel(vdev, avail_macvlan);
7710 fwd->netdev = vdev;
7711
7712 if (!netif_running(netdev))
7713 return fwd;
7714
7715
7716 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7717 if (ret) {
7718
7719 netdev_unbind_sb_channel(netdev, vdev);
7720 netdev_set_sb_channel(vdev, 0);
7721
7722 kfree(fwd);
7723 return ERR_PTR(-EINVAL);
7724 }
7725
7726 return fwd;
7727}
7728
7729
7730
7731
7732
7733static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7734{
7735 struct i40e_channel *ch, *ch_tmp;
7736 struct i40e_pf *pf = vsi->back;
7737 struct i40e_hw *hw = &pf->hw;
7738 int aq_err, ret = 0;
7739
7740 if (list_empty(&vsi->macvlan_list))
7741 return;
7742
7743 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7744 if (i40e_is_channel_macvlan(ch)) {
7745 ret = i40e_del_macvlan_filter(hw, ch->seid,
7746 i40e_channel_mac(ch),
7747 &aq_err);
7748 if (!ret) {
7749
7750 i40e_reset_ch_rings(vsi, ch);
7751 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7752 netdev_unbind_sb_channel(vsi->netdev,
7753 ch->fwd->netdev);
7754 netdev_set_sb_channel(ch->fwd->netdev, 0);
7755 kfree(ch->fwd);
7756 ch->fwd = NULL;
7757 }
7758 }
7759 }
7760}
7761
7762
7763
7764
7765
7766
7767static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7768{
7769 struct i40e_netdev_priv *np = netdev_priv(netdev);
7770 struct i40e_fwd_adapter *fwd = vdev;
7771 struct i40e_channel *ch, *ch_tmp;
7772 struct i40e_vsi *vsi = np->vsi;
7773 struct i40e_pf *pf = vsi->back;
7774 struct i40e_hw *hw = &pf->hw;
7775 int aq_err, ret = 0;
7776
7777
7778 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7779 if (i40e_is_channel_macvlan(ch) &&
7780 ether_addr_equal(i40e_channel_mac(ch),
7781 fwd->netdev->dev_addr)) {
7782 ret = i40e_del_macvlan_filter(hw, ch->seid,
7783 i40e_channel_mac(ch),
7784 &aq_err);
7785 if (!ret) {
7786
7787 i40e_reset_ch_rings(vsi, ch);
7788 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7789 netdev_unbind_sb_channel(netdev, fwd->netdev);
7790 netdev_set_sb_channel(fwd->netdev, 0);
7791 kfree(ch->fwd);
7792 ch->fwd = NULL;
7793 } else {
7794 dev_info(&pf->pdev->dev,
7795 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7796 i40e_stat_str(hw, ret),
7797 i40e_aq_str(hw, aq_err));
7798 }
7799 break;
7800 }
7801 }
7802}
7803
7804
7805
7806
7807
7808
7809static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7810{
7811 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7812 struct i40e_netdev_priv *np = netdev_priv(netdev);
7813 struct i40e_vsi *vsi = np->vsi;
7814 struct i40e_pf *pf = vsi->back;
7815 u8 enabled_tc = 0, num_tc, hw;
7816 bool need_reset = false;
7817 int old_queue_pairs;
7818 int ret = -EINVAL;
7819 u16 mode;
7820 int i;
7821
7822 old_queue_pairs = vsi->num_queue_pairs;
7823 num_tc = mqprio_qopt->qopt.num_tc;
7824 hw = mqprio_qopt->qopt.hw;
7825 mode = mqprio_qopt->mode;
7826 if (!hw) {
7827 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7828 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7829 goto config_tc;
7830 }
7831
7832
7833 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7834 netdev_info(netdev,
7835 "Configuring TC not supported in MFP mode\n");
7836 return ret;
7837 }
7838 switch (mode) {
7839 case TC_MQPRIO_MODE_DCB:
7840 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7841
7842
7843 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7844 netdev_info(netdev,
7845 "DCB is not enabled for adapter\n");
7846 return ret;
7847 }
7848
7849
7850 if (num_tc > i40e_pf_get_num_tc(pf)) {
7851 netdev_info(netdev,
7852 "TC count greater than enabled on link for adapter\n");
7853 return ret;
7854 }
7855 break;
7856 case TC_MQPRIO_MODE_CHANNEL:
7857 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7858 netdev_info(netdev,
7859 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7860 return ret;
7861 }
7862 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7863 return ret;
7864 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7865 if (ret)
7866 return ret;
7867 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7868 sizeof(*mqprio_qopt));
7869 pf->flags |= I40E_FLAG_TC_MQPRIO;
7870 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7871 break;
7872 default:
7873 return -EINVAL;
7874 }
7875
7876config_tc:
7877
7878 for (i = 0; i < num_tc; i++)
7879 enabled_tc |= BIT(i);
7880
7881
7882 if (enabled_tc == vsi->tc_config.enabled_tc &&
7883 mode != TC_MQPRIO_MODE_CHANNEL)
7884 return 0;
7885
7886
7887 i40e_quiesce_vsi(vsi);
7888
7889 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7890 i40e_remove_queue_channels(vsi);
7891
7892
7893 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7894 if (ret) {
7895 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7896 vsi->seid);
7897 need_reset = true;
7898 goto exit;
7899 } else {
7900 dev_info(&vsi->back->pdev->dev,
7901 "Setup channel (id:%u) utilizing num_queues %d\n",
7902 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7903 }
7904
7905 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7906 if (vsi->mqprio_qopt.max_rate[0]) {
7907 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7908
7909 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7910 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7911 if (!ret) {
7912 u64 credits = max_tx_rate;
7913
7914 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7915 dev_dbg(&vsi->back->pdev->dev,
7916 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7917 max_tx_rate,
7918 credits,
7919 vsi->seid);
7920 } else {
7921 need_reset = true;
7922 goto exit;
7923 }
7924 }
7925 ret = i40e_configure_queue_channels(vsi);
7926 if (ret) {
7927 vsi->num_queue_pairs = old_queue_pairs;
7928 netdev_info(netdev,
7929 "Failed configuring queue channels\n");
7930 need_reset = true;
7931 goto exit;
7932 }
7933 }
7934
7935exit:
7936
7937 if (need_reset) {
7938 i40e_vsi_set_default_tc_config(vsi);
7939 need_reset = false;
7940 }
7941
7942
7943 i40e_unquiesce_vsi(vsi);
7944 return ret;
7945}
7946
7947
7948
7949
7950
7951
7952
7953
7954static inline void
7955i40e_set_cld_element(struct i40e_cloud_filter *filter,
7956 struct i40e_aqc_cloud_filters_element_data *cld)
7957{
7958 u32 ipa;
7959 int i;
7960
7961 memset(cld, 0, sizeof(*cld));
7962 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7963 ether_addr_copy(cld->inner_mac, filter->src_mac);
7964
7965 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7966 return;
7967
7968 if (filter->n_proto == ETH_P_IPV6) {
7969#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7970 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
7971 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7972
7973 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
7974 }
7975 } else {
7976 ipa = be32_to_cpu(filter->dst_ipv4);
7977
7978 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7979 }
7980
7981 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7982
7983
7984
7985
7986 if (filter->tenant_id)
7987 return;
7988}
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8000 struct i40e_cloud_filter *filter, bool add)
8001{
8002 struct i40e_aqc_cloud_filters_element_data cld_filter;
8003 struct i40e_pf *pf = vsi->back;
8004 int ret;
8005 static const u16 flag_table[128] = {
8006 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8007 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8008 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8009 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8010 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8011 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8012 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8013 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8014 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8015 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8016 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8017 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8018 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8019 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8020 };
8021
8022 if (filter->flags >= ARRAY_SIZE(flag_table))
8023 return I40E_ERR_CONFIG;
8024
8025 memset(&cld_filter, 0, sizeof(cld_filter));
8026
8027
8028 i40e_set_cld_element(filter, &cld_filter);
8029
8030 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8031 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8032 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8033
8034 if (filter->n_proto == ETH_P_IPV6)
8035 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8036 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8037 else
8038 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8039 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8040
8041 if (add)
8042 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8043 &cld_filter, 1);
8044 else
8045 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8046 &cld_filter, 1);
8047 if (ret)
8048 dev_dbg(&pf->pdev->dev,
8049 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8050 add ? "add" : "delete", filter->dst_port, ret,
8051 pf->hw.aq.asq_last_status);
8052 else
8053 dev_info(&pf->pdev->dev,
8054 "%s cloud filter for VSI: %d\n",
8055 add ? "Added" : "Deleted", filter->seid);
8056 return ret;
8057}
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067
8068int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8069 struct i40e_cloud_filter *filter,
8070 bool add)
8071{
8072 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8073 struct i40e_pf *pf = vsi->back;
8074 int ret;
8075
8076
8077 if ((is_valid_ether_addr(filter->dst_mac) &&
8078 is_valid_ether_addr(filter->src_mac)) ||
8079 (is_multicast_ether_addr(filter->dst_mac) &&
8080 is_multicast_ether_addr(filter->src_mac)))
8081 return -EOPNOTSUPP;
8082
8083
8084
8085
8086 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8087 return -EOPNOTSUPP;
8088
8089
8090 if (filter->src_port ||
8091 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8092 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8093 return -EOPNOTSUPP;
8094
8095 memset(&cld_filter, 0, sizeof(cld_filter));
8096
8097
8098 i40e_set_cld_element(filter, &cld_filter.element);
8099
8100 if (is_valid_ether_addr(filter->dst_mac) ||
8101 is_valid_ether_addr(filter->src_mac) ||
8102 is_multicast_ether_addr(filter->dst_mac) ||
8103 is_multicast_ether_addr(filter->src_mac)) {
8104
8105 if (filter->dst_ipv4)
8106 return -EOPNOTSUPP;
8107
8108
8109
8110
8111
8112 cld_filter.element.flags =
8113 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8114
8115 if (filter->vlan_id) {
8116 cld_filter.element.flags =
8117 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8118 }
8119
8120 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8121 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8122 cld_filter.element.flags =
8123 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8124 if (filter->n_proto == ETH_P_IPV6)
8125 cld_filter.element.flags |=
8126 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8127 else
8128 cld_filter.element.flags |=
8129 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8130 } else {
8131 dev_err(&pf->pdev->dev,
8132 "either mac or ip has to be valid for cloud filter\n");
8133 return -EINVAL;
8134 }
8135
8136
8137 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8138 be16_to_cpu(filter->dst_port);
8139
8140 if (add) {
8141
8142 ret = i40e_validate_and_set_switch_mode(vsi);
8143 if (ret) {
8144 dev_err(&pf->pdev->dev,
8145 "failed to set switch mode, ret %d\n",
8146 ret);
8147 return ret;
8148 }
8149
8150 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8151 &cld_filter, 1);
8152 } else {
8153 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8154 &cld_filter, 1);
8155 }
8156
8157 if (ret)
8158 dev_dbg(&pf->pdev->dev,
8159 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8160 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8161 else
8162 dev_info(&pf->pdev->dev,
8163 "%s cloud filter for VSI: %d, L4 port: %d\n",
8164 add ? "add" : "delete", filter->seid,
8165 ntohs(filter->dst_port));
8166 return ret;
8167}
8168
8169
8170
8171
8172
8173
8174
8175
8176static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8177 struct flow_cls_offload *f,
8178 struct i40e_cloud_filter *filter)
8179{
8180 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8181 struct flow_dissector *dissector = rule->match.dissector;
8182 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8183 struct i40e_pf *pf = vsi->back;
8184 u8 field_flags = 0;
8185
8186 if (dissector->used_keys &
8187 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8188 BIT(FLOW_DISSECTOR_KEY_BASIC) |
8189 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8190 BIT(FLOW_DISSECTOR_KEY_VLAN) |
8191 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8192 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8193 BIT(FLOW_DISSECTOR_KEY_PORTS) |
8194 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8195 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8196 dissector->used_keys);
8197 return -EOPNOTSUPP;
8198 }
8199
8200 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8201 struct flow_match_enc_keyid match;
8202
8203 flow_rule_match_enc_keyid(rule, &match);
8204 if (match.mask->keyid != 0)
8205 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8206
8207 filter->tenant_id = be32_to_cpu(match.key->keyid);
8208 }
8209
8210 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8211 struct flow_match_basic match;
8212
8213 flow_rule_match_basic(rule, &match);
8214 n_proto_key = ntohs(match.key->n_proto);
8215 n_proto_mask = ntohs(match.mask->n_proto);
8216
8217 if (n_proto_key == ETH_P_ALL) {
8218 n_proto_key = 0;
8219 n_proto_mask = 0;
8220 }
8221 filter->n_proto = n_proto_key & n_proto_mask;
8222 filter->ip_proto = match.key->ip_proto;
8223 }
8224
8225 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8226 struct flow_match_eth_addrs match;
8227
8228 flow_rule_match_eth_addrs(rule, &match);
8229
8230
8231 if (!is_zero_ether_addr(match.mask->dst)) {
8232 if (is_broadcast_ether_addr(match.mask->dst)) {
8233 field_flags |= I40E_CLOUD_FIELD_OMAC;
8234 } else {
8235 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8236 match.mask->dst);
8237 return I40E_ERR_CONFIG;
8238 }
8239 }
8240
8241 if (!is_zero_ether_addr(match.mask->src)) {
8242 if (is_broadcast_ether_addr(match.mask->src)) {
8243 field_flags |= I40E_CLOUD_FIELD_IMAC;
8244 } else {
8245 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8246 match.mask->src);
8247 return I40E_ERR_CONFIG;
8248 }
8249 }
8250 ether_addr_copy(filter->dst_mac, match.key->dst);
8251 ether_addr_copy(filter->src_mac, match.key->src);
8252 }
8253
8254 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8255 struct flow_match_vlan match;
8256
8257 flow_rule_match_vlan(rule, &match);
8258 if (match.mask->vlan_id) {
8259 if (match.mask->vlan_id == VLAN_VID_MASK) {
8260 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8261
8262 } else {
8263 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8264 match.mask->vlan_id);
8265 return I40E_ERR_CONFIG;
8266 }
8267 }
8268
8269 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8270 }
8271
8272 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8273 struct flow_match_control match;
8274
8275 flow_rule_match_control(rule, &match);
8276 addr_type = match.key->addr_type;
8277 }
8278
8279 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8280 struct flow_match_ipv4_addrs match;
8281
8282 flow_rule_match_ipv4_addrs(rule, &match);
8283 if (match.mask->dst) {
8284 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8285 field_flags |= I40E_CLOUD_FIELD_IIP;
8286 } else {
8287 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8288 &match.mask->dst);
8289 return I40E_ERR_CONFIG;
8290 }
8291 }
8292
8293 if (match.mask->src) {
8294 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8295 field_flags |= I40E_CLOUD_FIELD_IIP;
8296 } else {
8297 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8298 &match.mask->src);
8299 return I40E_ERR_CONFIG;
8300 }
8301 }
8302
8303 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8304 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8305 return I40E_ERR_CONFIG;
8306 }
8307 filter->dst_ipv4 = match.key->dst;
8308 filter->src_ipv4 = match.key->src;
8309 }
8310
8311 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8312 struct flow_match_ipv6_addrs match;
8313
8314 flow_rule_match_ipv6_addrs(rule, &match);
8315
8316
8317
8318
8319 if (ipv6_addr_loopback(&match.key->dst) ||
8320 ipv6_addr_loopback(&match.key->src)) {
8321 dev_err(&pf->pdev->dev,
8322 "Bad ipv6, addr is LOOPBACK\n");
8323 return I40E_ERR_CONFIG;
8324 }
8325 if (!ipv6_addr_any(&match.mask->dst) ||
8326 !ipv6_addr_any(&match.mask->src))
8327 field_flags |= I40E_CLOUD_FIELD_IIP;
8328
8329 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8330 sizeof(filter->src_ipv6));
8331 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8332 sizeof(filter->dst_ipv6));
8333 }
8334
8335 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8336 struct flow_match_ports match;
8337
8338 flow_rule_match_ports(rule, &match);
8339 if (match.mask->src) {
8340 if (match.mask->src == cpu_to_be16(0xffff)) {
8341 field_flags |= I40E_CLOUD_FIELD_IIP;
8342 } else {
8343 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8344 be16_to_cpu(match.mask->src));
8345 return I40E_ERR_CONFIG;
8346 }
8347 }
8348
8349 if (match.mask->dst) {
8350 if (match.mask->dst == cpu_to_be16(0xffff)) {
8351 field_flags |= I40E_CLOUD_FIELD_IIP;
8352 } else {
8353 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8354 be16_to_cpu(match.mask->dst));
8355 return I40E_ERR_CONFIG;
8356 }
8357 }
8358
8359 filter->dst_port = match.key->dst;
8360 filter->src_port = match.key->src;
8361
8362 switch (filter->ip_proto) {
8363 case IPPROTO_TCP:
8364 case IPPROTO_UDP:
8365 break;
8366 default:
8367 dev_err(&pf->pdev->dev,
8368 "Only UDP and TCP transport are supported\n");
8369 return -EINVAL;
8370 }
8371 }
8372 filter->flags = field_flags;
8373 return 0;
8374}
8375
8376
8377
8378
8379
8380
8381
8382
8383static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8384 struct i40e_cloud_filter *filter)
8385{
8386 struct i40e_channel *ch, *ch_tmp;
8387
8388
8389 if (tc == 0) {
8390 filter->seid = vsi->seid;
8391 return 0;
8392 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8393 if (!filter->dst_port) {
8394 dev_err(&vsi->back->pdev->dev,
8395 "Specify destination port to direct to traffic class that is not default\n");
8396 return -EINVAL;
8397 }
8398 if (list_empty(&vsi->ch_list))
8399 return -EINVAL;
8400 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8401 list) {
8402 if (ch->seid == vsi->tc_seid_map[tc])
8403 filter->seid = ch->seid;
8404 }
8405 return 0;
8406 }
8407 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8408 return -EINVAL;
8409}
8410
8411
8412
8413
8414
8415
8416
8417static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8418 struct flow_cls_offload *cls_flower)
8419{
8420 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8421 struct i40e_cloud_filter *filter = NULL;
8422 struct i40e_pf *pf = vsi->back;
8423 int err = 0;
8424
8425 if (tc < 0) {
8426 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8427 return -EOPNOTSUPP;
8428 }
8429
8430 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8431 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8432 return -EBUSY;
8433
8434 if (pf->fdir_pf_active_filters ||
8435 (!hlist_empty(&pf->fdir_filter_list))) {
8436 dev_err(&vsi->back->pdev->dev,
8437 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8438 return -EINVAL;
8439 }
8440
8441 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8442 dev_err(&vsi->back->pdev->dev,
8443 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8444 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8445 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8446 }
8447
8448 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8449 if (!filter)
8450 return -ENOMEM;
8451
8452 filter->cookie = cls_flower->cookie;
8453
8454 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8455 if (err < 0)
8456 goto err;
8457
8458 err = i40e_handle_tclass(vsi, tc, filter);
8459 if (err < 0)
8460 goto err;
8461
8462
8463 if (filter->dst_port)
8464 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8465 else
8466 err = i40e_add_del_cloud_filter(vsi, filter, true);
8467
8468 if (err) {
8469 dev_err(&pf->pdev->dev,
8470 "Failed to add cloud filter, err %s\n",
8471 i40e_stat_str(&pf->hw, err));
8472 goto err;
8473 }
8474
8475
8476 INIT_HLIST_NODE(&filter->cloud_node);
8477
8478 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8479
8480 pf->num_cloud_filters++;
8481
8482 return err;
8483err:
8484 kfree(filter);
8485 return err;
8486}
8487
8488
8489
8490
8491
8492
8493
8494static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8495 unsigned long *cookie)
8496{
8497 struct i40e_cloud_filter *filter = NULL;
8498 struct hlist_node *node2;
8499
8500 hlist_for_each_entry_safe(filter, node2,
8501 &vsi->back->cloud_filter_list, cloud_node)
8502 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8503 return filter;
8504 return NULL;
8505}
8506
8507
8508
8509
8510
8511
8512
8513static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8514 struct flow_cls_offload *cls_flower)
8515{
8516 struct i40e_cloud_filter *filter = NULL;
8517 struct i40e_pf *pf = vsi->back;
8518 int err = 0;
8519
8520 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8521
8522 if (!filter)
8523 return -EINVAL;
8524
8525 hash_del(&filter->cloud_node);
8526
8527 if (filter->dst_port)
8528 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8529 else
8530 err = i40e_add_del_cloud_filter(vsi, filter, false);
8531
8532 kfree(filter);
8533 if (err) {
8534 dev_err(&pf->pdev->dev,
8535 "Failed to delete cloud filter, err %s\n",
8536 i40e_stat_str(&pf->hw, err));
8537 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8538 }
8539
8540 pf->num_cloud_filters--;
8541 if (!pf->num_cloud_filters)
8542 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8543 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8544 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8545 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8546 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8547 }
8548 return 0;
8549}
8550
8551
8552
8553
8554
8555
8556static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8557 struct flow_cls_offload *cls_flower)
8558{
8559 struct i40e_vsi *vsi = np->vsi;
8560
8561 switch (cls_flower->command) {
8562 case FLOW_CLS_REPLACE:
8563 return i40e_configure_clsflower(vsi, cls_flower);
8564 case FLOW_CLS_DESTROY:
8565 return i40e_delete_clsflower(vsi, cls_flower);
8566 case FLOW_CLS_STATS:
8567 return -EOPNOTSUPP;
8568 default:
8569 return -EOPNOTSUPP;
8570 }
8571}
8572
8573static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8574 void *cb_priv)
8575{
8576 struct i40e_netdev_priv *np = cb_priv;
8577
8578 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8579 return -EOPNOTSUPP;
8580
8581 switch (type) {
8582 case TC_SETUP_CLSFLOWER:
8583 return i40e_setup_tc_cls_flower(np, type_data);
8584
8585 default:
8586 return -EOPNOTSUPP;
8587 }
8588}
8589
8590static LIST_HEAD(i40e_block_cb_list);
8591
8592static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8593 void *type_data)
8594{
8595 struct i40e_netdev_priv *np = netdev_priv(netdev);
8596
8597 switch (type) {
8598 case TC_SETUP_QDISC_MQPRIO:
8599 return i40e_setup_tc(netdev, type_data);
8600 case TC_SETUP_BLOCK:
8601 return flow_block_cb_setup_simple(type_data,
8602 &i40e_block_cb_list,
8603 i40e_setup_tc_block_cb,
8604 np, np, true);
8605 default:
8606 return -EOPNOTSUPP;
8607 }
8608}
8609
8610
8611
8612
8613
8614
8615
8616
8617
8618
8619
8620
8621
8622int i40e_open(struct net_device *netdev)
8623{
8624 struct i40e_netdev_priv *np = netdev_priv(netdev);
8625 struct i40e_vsi *vsi = np->vsi;
8626 struct i40e_pf *pf = vsi->back;
8627 int err;
8628
8629
8630 if (test_bit(__I40E_TESTING, pf->state) ||
8631 test_bit(__I40E_BAD_EEPROM, pf->state))
8632 return -EBUSY;
8633
8634 netif_carrier_off(netdev);
8635
8636 if (i40e_force_link_state(pf, true))
8637 return -EAGAIN;
8638
8639 err = i40e_vsi_open(vsi);
8640 if (err)
8641 return err;
8642
8643
8644 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8645 TCP_FLAG_FIN) >> 16);
8646 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8647 TCP_FLAG_FIN |
8648 TCP_FLAG_CWR) >> 16);
8649 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8650 udp_tunnel_get_rx_info(netdev);
8651
8652 return 0;
8653}
8654
8655
8656
8657
8658
8659
8660
8661
8662
8663
8664
8665int i40e_vsi_open(struct i40e_vsi *vsi)
8666{
8667 struct i40e_pf *pf = vsi->back;
8668 char int_name[I40E_INT_NAME_STR_LEN];
8669 int err;
8670
8671
8672 err = i40e_vsi_setup_tx_resources(vsi);
8673 if (err)
8674 goto err_setup_tx;
8675 err = i40e_vsi_setup_rx_resources(vsi);
8676 if (err)
8677 goto err_setup_rx;
8678
8679 err = i40e_vsi_configure(vsi);
8680 if (err)
8681 goto err_setup_rx;
8682
8683 if (vsi->netdev) {
8684 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8685 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8686 err = i40e_vsi_request_irq(vsi, int_name);
8687 if (err)
8688 goto err_setup_rx;
8689
8690
8691 err = netif_set_real_num_tx_queues(vsi->netdev,
8692 vsi->num_queue_pairs);
8693 if (err)
8694 goto err_set_queues;
8695
8696 err = netif_set_real_num_rx_queues(vsi->netdev,
8697 vsi->num_queue_pairs);
8698 if (err)
8699 goto err_set_queues;
8700
8701 } else if (vsi->type == I40E_VSI_FDIR) {
8702 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8703 dev_driver_string(&pf->pdev->dev),
8704 dev_name(&pf->pdev->dev));
8705 err = i40e_vsi_request_irq(vsi, int_name);
8706
8707 } else {
8708 err = -EINVAL;
8709 goto err_setup_rx;
8710 }
8711
8712 err = i40e_up_complete(vsi);
8713 if (err)
8714 goto err_up_complete;
8715
8716 return 0;
8717
8718err_up_complete:
8719 i40e_down(vsi);
8720err_set_queues:
8721 i40e_vsi_free_irq(vsi);
8722err_setup_rx:
8723 i40e_vsi_free_rx_resources(vsi);
8724err_setup_tx:
8725 i40e_vsi_free_tx_resources(vsi);
8726 if (vsi == pf->vsi[pf->lan_vsi])
8727 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8728
8729 return err;
8730}
8731
8732
8733
8734
8735
8736
8737
8738
8739static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8740{
8741 struct i40e_fdir_filter *filter;
8742 struct i40e_flex_pit *pit_entry, *tmp;
8743 struct hlist_node *node2;
8744
8745 hlist_for_each_entry_safe(filter, node2,
8746 &pf->fdir_filter_list, fdir_node) {
8747 hlist_del(&filter->fdir_node);
8748 kfree(filter);
8749 }
8750
8751 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8752 list_del(&pit_entry->list);
8753 kfree(pit_entry);
8754 }
8755 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8756
8757 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8758 list_del(&pit_entry->list);
8759 kfree(pit_entry);
8760 }
8761 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8762
8763 pf->fdir_pf_active_filters = 0;
8764 i40e_reset_fdir_filter_cnt(pf);
8765
8766
8767 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8768 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8769 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8770
8771
8772 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8773 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8774 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8775
8776
8777 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8778 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8779 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8780
8781
8782 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8783 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8784 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8785
8786
8787 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8788 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8789 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8790
8791
8792 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8793 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8794 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8795
8796
8797 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8798 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8799
8800 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8801 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8802
8803
8804 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8805 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8806
8807 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
8808 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8809}
8810
8811
8812
8813
8814
8815
8816
8817
8818static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8819{
8820 struct i40e_cloud_filter *cfilter;
8821 struct hlist_node *node;
8822
8823 hlist_for_each_entry_safe(cfilter, node,
8824 &pf->cloud_filter_list, cloud_node) {
8825 hlist_del(&cfilter->cloud_node);
8826 kfree(cfilter);
8827 }
8828 pf->num_cloud_filters = 0;
8829
8830 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8831 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8832 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8833 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8834 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8835 }
8836}
8837
8838
8839
8840
8841
8842
8843
8844
8845
8846
8847
8848int i40e_close(struct net_device *netdev)
8849{
8850 struct i40e_netdev_priv *np = netdev_priv(netdev);
8851 struct i40e_vsi *vsi = np->vsi;
8852
8853 i40e_vsi_close(vsi);
8854
8855 return 0;
8856}
8857
8858
8859
8860
8861
8862
8863
8864
8865
8866
8867
8868
8869void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8870{
8871 u32 val;
8872
8873
8874 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8875
8876
8877
8878
8879
8880
8881
8882
8883
8884 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8885 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8886 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8887 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8888
8889 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8890
8891
8892
8893
8894
8895 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8896 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8897 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8898 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8899 i40e_flush(&pf->hw);
8900
8901 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8902
8903
8904
8905
8906
8907
8908
8909
8910
8911 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8912 i40e_handle_reset_warning(pf, lock_acquired);
8913
8914 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
8915
8916
8917
8918
8919 i40e_prep_for_reset(pf);
8920 i40e_reset_and_rebuild(pf, true, lock_acquired);
8921 dev_info(&pf->pdev->dev,
8922 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8923 "FW LLDP is disabled\n" :
8924 "FW LLDP is enabled\n");
8925
8926 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8927 int v;
8928
8929
8930 dev_info(&pf->pdev->dev,
8931 "VSI reinit requested\n");
8932 for (v = 0; v < pf->num_alloc_vsi; v++) {
8933 struct i40e_vsi *vsi = pf->vsi[v];
8934
8935 if (vsi != NULL &&
8936 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8937 vsi->state))
8938 i40e_vsi_reinit_locked(pf->vsi[v]);
8939 }
8940 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8941 int v;
8942
8943
8944 dev_info(&pf->pdev->dev, "VSI down requested\n");
8945 for (v = 0; v < pf->num_alloc_vsi; v++) {
8946 struct i40e_vsi *vsi = pf->vsi[v];
8947
8948 if (vsi != NULL &&
8949 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8950 vsi->state)) {
8951 set_bit(__I40E_VSI_DOWN, vsi->state);
8952 i40e_down(vsi);
8953 }
8954 }
8955 } else {
8956 dev_info(&pf->pdev->dev,
8957 "bad reset request 0x%08x\n", reset_flags);
8958 }
8959}
8960
8961#ifdef CONFIG_I40E_DCB
8962
8963
8964
8965
8966
8967
8968bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8969 struct i40e_dcbx_config *old_cfg,
8970 struct i40e_dcbx_config *new_cfg)
8971{
8972 bool need_reconfig = false;
8973
8974
8975 if (memcmp(&new_cfg->etscfg,
8976 &old_cfg->etscfg,
8977 sizeof(new_cfg->etscfg))) {
8978
8979 if (memcmp(&new_cfg->etscfg.prioritytable,
8980 &old_cfg->etscfg.prioritytable,
8981 sizeof(new_cfg->etscfg.prioritytable))) {
8982 need_reconfig = true;
8983 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8984 }
8985
8986 if (memcmp(&new_cfg->etscfg.tcbwtable,
8987 &old_cfg->etscfg.tcbwtable,
8988 sizeof(new_cfg->etscfg.tcbwtable)))
8989 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8990
8991 if (memcmp(&new_cfg->etscfg.tsatable,
8992 &old_cfg->etscfg.tsatable,
8993 sizeof(new_cfg->etscfg.tsatable)))
8994 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8995 }
8996
8997
8998 if (memcmp(&new_cfg->pfc,
8999 &old_cfg->pfc,
9000 sizeof(new_cfg->pfc))) {
9001 need_reconfig = true;
9002 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9003 }
9004
9005
9006 if (memcmp(&new_cfg->app,
9007 &old_cfg->app,
9008 sizeof(new_cfg->app))) {
9009 need_reconfig = true;
9010 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9011 }
9012
9013 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9014 return need_reconfig;
9015}
9016
9017
9018
9019
9020
9021
9022static int i40e_handle_lldp_event(struct i40e_pf *pf,
9023 struct i40e_arq_event_info *e)
9024{
9025 struct i40e_aqc_lldp_get_mib *mib =
9026 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9027 struct i40e_hw *hw = &pf->hw;
9028 struct i40e_dcbx_config tmp_dcbx_cfg;
9029 bool need_reconfig = false;
9030 int ret = 0;
9031 u8 type;
9032
9033
9034 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9035 (hw->phy.link_info.link_speed &
9036 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9037 !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9038
9039 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9040
9041
9042 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9043 return ret;
9044
9045
9046 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9047 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9048 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9049 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9050 return ret;
9051
9052
9053 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9054 dev_dbg(&pf->pdev->dev,
9055 "LLDP event mib type %s\n", type ? "remote" : "local");
9056 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9057
9058 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9059 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9060 &hw->remote_dcbx_config);
9061 goto exit;
9062 }
9063
9064
9065 tmp_dcbx_cfg = hw->local_dcbx_config;
9066
9067
9068 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9069
9070 ret = i40e_get_dcb_config(&pf->hw);
9071 if (ret) {
9072
9073 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9074 (hw->phy.link_info.link_speed &
9075 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9076 dev_warn(&pf->pdev->dev,
9077 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9078 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9079 } else {
9080 dev_info(&pf->pdev->dev,
9081 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9082 i40e_stat_str(&pf->hw, ret),
9083 i40e_aq_str(&pf->hw,
9084 pf->hw.aq.asq_last_status));
9085 }
9086 goto exit;
9087 }
9088
9089
9090 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9091 sizeof(tmp_dcbx_cfg))) {
9092 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9093 goto exit;
9094 }
9095
9096 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9097 &hw->local_dcbx_config);
9098
9099 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9100
9101 if (!need_reconfig)
9102 goto exit;
9103
9104
9105 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9106 pf->flags |= I40E_FLAG_DCB_ENABLED;
9107 else
9108 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9109
9110 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9111
9112 i40e_pf_quiesce_all_vsi(pf);
9113
9114
9115 i40e_dcb_reconfigure(pf);
9116
9117 ret = i40e_resume_port_tx(pf);
9118
9119 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9120
9121 if (ret)
9122 goto exit;
9123
9124
9125 ret = i40e_pf_wait_queues_disabled(pf);
9126 if (ret) {
9127
9128 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9129 i40e_service_event_schedule(pf);
9130 } else {
9131 i40e_pf_unquiesce_all_vsi(pf);
9132 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9133 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9134 }
9135
9136exit:
9137 return ret;
9138}
9139#endif
9140
9141
9142
9143
9144
9145
9146
9147void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9148{
9149 rtnl_lock();
9150 i40e_do_reset(pf, reset_flags, true);
9151 rtnl_unlock();
9152}
9153
9154
9155
9156
9157
9158
9159
9160
9161
9162static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9163 struct i40e_arq_event_info *e)
9164{
9165 struct i40e_aqc_lan_overflow *data =
9166 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9167 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9168 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9169 struct i40e_hw *hw = &pf->hw;
9170 struct i40e_vf *vf;
9171 u16 vf_id;
9172
9173 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9174 queue, qtx_ctl);
9175
9176
9177 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9178 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9179 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9180 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9181 vf_id -= hw->func_caps.vf_base_id;
9182 vf = &pf->vf[vf_id];
9183 i40e_vc_notify_vf_reset(vf);
9184
9185 msleep(20);
9186 i40e_reset_vf(vf, false);
9187 }
9188}
9189
9190
9191
9192
9193
9194u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9195{
9196 u32 val, fcnt_prog;
9197
9198 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9199 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9200 return fcnt_prog;
9201}
9202
9203
9204
9205
9206
9207u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9208{
9209 u32 val, fcnt_prog;
9210
9211 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9212 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9213 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9214 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9215 return fcnt_prog;
9216}
9217
9218
9219
9220
9221
9222u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9223{
9224 u32 val, fcnt_prog;
9225
9226 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9227 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9228 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9229 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9230 return fcnt_prog;
9231}
9232
9233
9234
9235
9236
9237static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9238{
9239 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9240 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9241 (I40E_DEBUG_FD & pf->hw.debug_mask))
9242 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9243}
9244
9245
9246
9247
9248
9249static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9250{
9251 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9252
9253
9254
9255
9256
9257 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9258 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9259 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9260
9261 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9262 (I40E_DEBUG_FD & pf->hw.debug_mask))
9263 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9264 }
9265}
9266
9267
9268
9269
9270
9271
9272static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9273 struct i40e_fdir_filter *filter)
9274{
9275
9276 pf->fdir_pf_active_filters--;
9277 pf->fd_inv = 0;
9278
9279 switch (filter->flow_type) {
9280 case TCP_V4_FLOW:
9281 pf->fd_tcp4_filter_cnt--;
9282 break;
9283 case UDP_V4_FLOW:
9284 pf->fd_udp4_filter_cnt--;
9285 break;
9286 case SCTP_V4_FLOW:
9287 pf->fd_sctp4_filter_cnt--;
9288 break;
9289 case TCP_V6_FLOW:
9290 pf->fd_tcp6_filter_cnt--;
9291 break;
9292 case UDP_V6_FLOW:
9293 pf->fd_udp6_filter_cnt--;
9294 break;
9295 case SCTP_V6_FLOW:
9296 pf->fd_udp6_filter_cnt--;
9297 break;
9298 case IP_USER_FLOW:
9299 switch (filter->ipl4_proto) {
9300 case IPPROTO_TCP:
9301 pf->fd_tcp4_filter_cnt--;
9302 break;
9303 case IPPROTO_UDP:
9304 pf->fd_udp4_filter_cnt--;
9305 break;
9306 case IPPROTO_SCTP:
9307 pf->fd_sctp4_filter_cnt--;
9308 break;
9309 case IPPROTO_IP:
9310 pf->fd_ip4_filter_cnt--;
9311 break;
9312 }
9313 break;
9314 case IPV6_USER_FLOW:
9315 switch (filter->ipl4_proto) {
9316 case IPPROTO_TCP:
9317 pf->fd_tcp6_filter_cnt--;
9318 break;
9319 case IPPROTO_UDP:
9320 pf->fd_udp6_filter_cnt--;
9321 break;
9322 case IPPROTO_SCTP:
9323 pf->fd_sctp6_filter_cnt--;
9324 break;
9325 case IPPROTO_IP:
9326 pf->fd_ip6_filter_cnt--;
9327 break;
9328 }
9329 break;
9330 }
9331
9332
9333 hlist_del(&filter->fdir_node);
9334 kfree(filter);
9335}
9336
9337
9338
9339
9340
9341void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9342{
9343 struct i40e_fdir_filter *filter;
9344 u32 fcnt_prog, fcnt_avail;
9345 struct hlist_node *node;
9346
9347 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9348 return;
9349
9350
9351 fcnt_prog = i40e_get_global_fd_count(pf);
9352 fcnt_avail = pf->fdir_pf_filter_count;
9353 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9354 (pf->fd_add_err == 0) ||
9355 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9356 i40e_reenable_fdir_sb(pf);
9357
9358
9359
9360
9361
9362 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9363 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9364 i40e_reenable_fdir_atr(pf);
9365
9366
9367 if (pf->fd_inv > 0) {
9368 hlist_for_each_entry_safe(filter, node,
9369 &pf->fdir_filter_list, fdir_node)
9370 if (filter->fd_id == pf->fd_inv)
9371 i40e_delete_invalid_filter(pf, filter);
9372 }
9373}
9374
9375#define I40E_MIN_FD_FLUSH_INTERVAL 10
9376#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9377
9378
9379
9380
9381static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9382{
9383 unsigned long min_flush_time;
9384 int flush_wait_retry = 50;
9385 bool disable_atr = false;
9386 int fd_room;
9387 int reg;
9388
9389 if (!time_after(jiffies, pf->fd_flush_timestamp +
9390 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9391 return;
9392
9393
9394
9395
9396 min_flush_time = pf->fd_flush_timestamp +
9397 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9398 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9399
9400 if (!(time_after(jiffies, min_flush_time)) &&
9401 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9402 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9403 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9404 disable_atr = true;
9405 }
9406
9407 pf->fd_flush_timestamp = jiffies;
9408 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9409
9410 wr32(&pf->hw, I40E_PFQF_CTL_1,
9411 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9412 i40e_flush(&pf->hw);
9413 pf->fd_flush_cnt++;
9414 pf->fd_add_err = 0;
9415 do {
9416
9417 usleep_range(5000, 6000);
9418 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9419 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9420 break;
9421 } while (flush_wait_retry--);
9422 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9423 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9424 } else {
9425
9426 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9427 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9428 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9429 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9430 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9431 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9432 }
9433}
9434
9435
9436
9437
9438
9439u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9440{
9441 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9442}
9443
9444
9445
9446
9447
9448static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9449{
9450
9451
9452 if (test_bit(__I40E_DOWN, pf->state))
9453 return;
9454
9455 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9456 i40e_fdir_flush_and_replay(pf);
9457
9458 i40e_fdir_check_and_reenable(pf);
9459
9460}
9461
9462
9463
9464
9465
9466
9467static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9468{
9469 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9470 return;
9471
9472 switch (vsi->type) {
9473 case I40E_VSI_MAIN:
9474 if (!vsi->netdev || !vsi->netdev_registered)
9475 break;
9476
9477 if (link_up) {
9478 netif_carrier_on(vsi->netdev);
9479 netif_tx_wake_all_queues(vsi->netdev);
9480 } else {
9481 netif_carrier_off(vsi->netdev);
9482 netif_tx_stop_all_queues(vsi->netdev);
9483 }
9484 break;
9485
9486 case I40E_VSI_SRIOV:
9487 case I40E_VSI_VMDQ2:
9488 case I40E_VSI_CTRL:
9489 case I40E_VSI_IWARP:
9490 case I40E_VSI_MIRROR:
9491 default:
9492
9493 break;
9494 }
9495}
9496
9497
9498
9499
9500
9501
9502static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9503{
9504 struct i40e_pf *pf;
9505 int i;
9506
9507 if (!veb || !veb->pf)
9508 return;
9509 pf = veb->pf;
9510
9511
9512 for (i = 0; i < I40E_MAX_VEB; i++)
9513 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9514 i40e_veb_link_event(pf->veb[i], link_up);
9515
9516
9517 for (i = 0; i < pf->num_alloc_vsi; i++)
9518 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9519 i40e_vsi_link_event(pf->vsi[i], link_up);
9520}
9521
9522
9523
9524
9525
9526static void i40e_link_event(struct i40e_pf *pf)
9527{
9528 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9529 u8 new_link_speed, old_link_speed;
9530 i40e_status status;
9531 bool new_link, old_link;
9532#ifdef CONFIG_I40E_DCB
9533 int err;
9534#endif
9535
9536
9537 pf->hw.phy.get_link_info = true;
9538 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9539 status = i40e_get_link_status(&pf->hw, &new_link);
9540
9541
9542 if (status == I40E_SUCCESS) {
9543 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9544 } else {
9545
9546
9547
9548 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9549 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9550 status);
9551 return;
9552 }
9553
9554 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9555 new_link_speed = pf->hw.phy.link_info.link_speed;
9556
9557 if (new_link == old_link &&
9558 new_link_speed == old_link_speed &&
9559 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9560 new_link == netif_carrier_ok(vsi->netdev)))
9561 return;
9562
9563 i40e_print_link_message(vsi, new_link);
9564
9565
9566
9567
9568 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9569 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9570 else
9571 i40e_vsi_link_event(vsi, new_link);
9572
9573 if (pf->vf)
9574 i40e_vc_notify_link_state(pf);
9575
9576 if (pf->flags & I40E_FLAG_PTP)
9577 i40e_ptp_set_increment(pf);
9578#ifdef CONFIG_I40E_DCB
9579 if (new_link == old_link)
9580 return;
9581
9582 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9583 return;
9584
9585
9586
9587
9588 if (!new_link) {
9589 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9590 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9591 err = i40e_dcb_sw_default_config(pf);
9592 if (err) {
9593 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9594 I40E_FLAG_DCB_ENABLED);
9595 } else {
9596 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9597 DCB_CAP_DCBX_VER_IEEE;
9598 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9599 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9600 }
9601 }
9602#endif
9603}
9604
9605
9606
9607
9608
9609static void i40e_watchdog_subtask(struct i40e_pf *pf)
9610{
9611 int i;
9612
9613
9614 if (test_bit(__I40E_DOWN, pf->state) ||
9615 test_bit(__I40E_CONFIG_BUSY, pf->state))
9616 return;
9617
9618
9619 if (time_before(jiffies, (pf->service_timer_previous +
9620 pf->service_timer_period)))
9621 return;
9622 pf->service_timer_previous = jiffies;
9623
9624 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9625 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9626 i40e_link_event(pf);
9627
9628
9629
9630
9631 for (i = 0; i < pf->num_alloc_vsi; i++)
9632 if (pf->vsi[i] && pf->vsi[i]->netdev)
9633 i40e_update_stats(pf->vsi[i]);
9634
9635 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9636
9637 for (i = 0; i < I40E_MAX_VEB; i++)
9638 if (pf->veb[i])
9639 i40e_update_veb_stats(pf->veb[i]);
9640 }
9641
9642 i40e_ptp_rx_hang(pf);
9643 i40e_ptp_tx_hang(pf);
9644}
9645
9646
9647
9648
9649
9650static void i40e_reset_subtask(struct i40e_pf *pf)
9651{
9652 u32 reset_flags = 0;
9653
9654 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9655 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9656 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9657 }
9658 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9659 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9660 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9661 }
9662 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9663 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9664 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9665 }
9666 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9667 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9668 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9669 }
9670 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9671 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9672 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9673 }
9674
9675
9676
9677
9678 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9679 i40e_prep_for_reset(pf);
9680 i40e_reset(pf);
9681 i40e_rebuild(pf, false, false);
9682 }
9683
9684
9685 if (reset_flags &&
9686 !test_bit(__I40E_DOWN, pf->state) &&
9687 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9688 i40e_do_reset(pf, reset_flags, false);
9689 }
9690}
9691
9692
9693
9694
9695
9696
9697static void i40e_handle_link_event(struct i40e_pf *pf,
9698 struct i40e_arq_event_info *e)
9699{
9700 struct i40e_aqc_get_link_status *status =
9701 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9702
9703
9704
9705
9706
9707
9708
9709 i40e_link_event(pf);
9710
9711
9712 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9713 dev_err(&pf->pdev->dev,
9714 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9715 dev_err(&pf->pdev->dev,
9716 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9717 } else {
9718
9719
9720
9721 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9722 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9723 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9724 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9725 dev_err(&pf->pdev->dev,
9726 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9727 dev_err(&pf->pdev->dev,
9728 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9729 }
9730 }
9731}
9732
9733
9734
9735
9736
9737static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9738{
9739 struct i40e_arq_event_info event;
9740 struct i40e_hw *hw = &pf->hw;
9741 u16 pending, i = 0;
9742 i40e_status ret;
9743 u16 opcode;
9744 u32 oldval;
9745 u32 val;
9746
9747
9748 if (test_bit(__I40E_RESET_FAILED, pf->state))
9749 return;
9750
9751
9752 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9753 oldval = val;
9754 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9755 if (hw->debug_mask & I40E_DEBUG_AQ)
9756 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9757 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9758 }
9759 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9760 if (hw->debug_mask & I40E_DEBUG_AQ)
9761 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9762 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9763 pf->arq_overflows++;
9764 }
9765 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9766 if (hw->debug_mask & I40E_DEBUG_AQ)
9767 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9768 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9769 }
9770 if (oldval != val)
9771 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9772
9773 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9774 oldval = val;
9775 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9776 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9777 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9778 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9779 }
9780 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9781 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9782 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9783 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9784 }
9785 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9786 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9787 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9788 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9789 }
9790 if (oldval != val)
9791 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9792
9793 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9794 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9795 if (!event.msg_buf)
9796 return;
9797
9798 do {
9799 ret = i40e_clean_arq_element(hw, &event, &pending);
9800 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9801 break;
9802 else if (ret) {
9803 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9804 break;
9805 }
9806
9807 opcode = le16_to_cpu(event.desc.opcode);
9808 switch (opcode) {
9809
9810 case i40e_aqc_opc_get_link_status:
9811 rtnl_lock();
9812 i40e_handle_link_event(pf, &event);
9813 rtnl_unlock();
9814 break;
9815 case i40e_aqc_opc_send_msg_to_pf:
9816 ret = i40e_vc_process_vf_msg(pf,
9817 le16_to_cpu(event.desc.retval),
9818 le32_to_cpu(event.desc.cookie_high),
9819 le32_to_cpu(event.desc.cookie_low),
9820 event.msg_buf,
9821 event.msg_len);
9822 break;
9823 case i40e_aqc_opc_lldp_update_mib:
9824 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9825#ifdef CONFIG_I40E_DCB
9826 rtnl_lock();
9827 i40e_handle_lldp_event(pf, &event);
9828 rtnl_unlock();
9829#endif
9830 break;
9831 case i40e_aqc_opc_event_lan_overflow:
9832 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9833 i40e_handle_lan_overflow_event(pf, &event);
9834 break;
9835 case i40e_aqc_opc_send_msg_to_peer:
9836 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9837 break;
9838 case i40e_aqc_opc_nvm_erase:
9839 case i40e_aqc_opc_nvm_update:
9840 case i40e_aqc_opc_oem_post_update:
9841 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9842 "ARQ NVM operation 0x%04x completed\n",
9843 opcode);
9844 break;
9845 default:
9846 dev_info(&pf->pdev->dev,
9847 "ARQ: Unknown event 0x%04x ignored\n",
9848 opcode);
9849 break;
9850 }
9851 } while (i++ < pf->adminq_work_limit);
9852
9853 if (i < pf->adminq_work_limit)
9854 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9855
9856
9857 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9858 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9859 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9860 i40e_flush(hw);
9861
9862 kfree(event.msg_buf);
9863}
9864
9865
9866
9867
9868
9869static void i40e_verify_eeprom(struct i40e_pf *pf)
9870{
9871 int err;
9872
9873 err = i40e_diag_eeprom_test(&pf->hw);
9874 if (err) {
9875
9876 err = i40e_diag_eeprom_test(&pf->hw);
9877 if (err) {
9878 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9879 err);
9880 set_bit(__I40E_BAD_EEPROM, pf->state);
9881 }
9882 }
9883
9884 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9885 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9886 clear_bit(__I40E_BAD_EEPROM, pf->state);
9887 }
9888}
9889
9890
9891
9892
9893
9894
9895
9896static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9897{
9898 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9899 struct i40e_vsi_context ctxt;
9900 int ret;
9901
9902 ctxt.seid = pf->main_vsi_seid;
9903 ctxt.pf_num = pf->hw.pf_id;
9904 ctxt.vf_num = 0;
9905 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9906 if (ret) {
9907 dev_info(&pf->pdev->dev,
9908 "couldn't get PF vsi config, err %s aq_err %s\n",
9909 i40e_stat_str(&pf->hw, ret),
9910 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9911 return;
9912 }
9913 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9914 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9915 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9916
9917 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9918 if (ret) {
9919 dev_info(&pf->pdev->dev,
9920 "update vsi switch failed, err %s aq_err %s\n",
9921 i40e_stat_str(&pf->hw, ret),
9922 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9923 }
9924}
9925
9926
9927
9928
9929
9930
9931
9932static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9933{
9934 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9935 struct i40e_vsi_context ctxt;
9936 int ret;
9937
9938 ctxt.seid = pf->main_vsi_seid;
9939 ctxt.pf_num = pf->hw.pf_id;
9940 ctxt.vf_num = 0;
9941 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9942 if (ret) {
9943 dev_info(&pf->pdev->dev,
9944 "couldn't get PF vsi config, err %s aq_err %s\n",
9945 i40e_stat_str(&pf->hw, ret),
9946 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9947 return;
9948 }
9949 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9950 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9951 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9952
9953 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9954 if (ret) {
9955 dev_info(&pf->pdev->dev,
9956 "update vsi switch failed, err %s aq_err %s\n",
9957 i40e_stat_str(&pf->hw, ret),
9958 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9959 }
9960}
9961
9962
9963
9964
9965
9966
9967
9968
9969
9970static void i40e_config_bridge_mode(struct i40e_veb *veb)
9971{
9972 struct i40e_pf *pf = veb->pf;
9973
9974 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9975 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9976 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9977 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9978 i40e_disable_pf_switch_lb(pf);
9979 else
9980 i40e_enable_pf_switch_lb(pf);
9981}
9982
9983
9984
9985
9986
9987
9988
9989
9990
9991
9992static int i40e_reconstitute_veb(struct i40e_veb *veb)
9993{
9994 struct i40e_vsi *ctl_vsi = NULL;
9995 struct i40e_pf *pf = veb->pf;
9996 int v, veb_idx;
9997 int ret;
9998
9999
10000 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10001 if (pf->vsi[v] &&
10002 pf->vsi[v]->veb_idx == veb->idx &&
10003 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10004 ctl_vsi = pf->vsi[v];
10005 break;
10006 }
10007 }
10008 if (!ctl_vsi) {
10009 dev_info(&pf->pdev->dev,
10010 "missing owner VSI for veb_idx %d\n", veb->idx);
10011 ret = -ENOENT;
10012 goto end_reconstitute;
10013 }
10014 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10015 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10016 ret = i40e_add_vsi(ctl_vsi);
10017 if (ret) {
10018 dev_info(&pf->pdev->dev,
10019 "rebuild of veb_idx %d owner VSI failed: %d\n",
10020 veb->idx, ret);
10021 goto end_reconstitute;
10022 }
10023 i40e_vsi_reset_stats(ctl_vsi);
10024
10025
10026 ret = i40e_add_veb(veb, ctl_vsi);
10027 if (ret)
10028 goto end_reconstitute;
10029
10030 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10031 veb->bridge_mode = BRIDGE_MODE_VEB;
10032 else
10033 veb->bridge_mode = BRIDGE_MODE_VEPA;
10034 i40e_config_bridge_mode(veb);
10035
10036
10037 for (v = 0; v < pf->num_alloc_vsi; v++) {
10038 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10039 continue;
10040
10041 if (pf->vsi[v]->veb_idx == veb->idx) {
10042 struct i40e_vsi *vsi = pf->vsi[v];
10043
10044 vsi->uplink_seid = veb->seid;
10045 ret = i40e_add_vsi(vsi);
10046 if (ret) {
10047 dev_info(&pf->pdev->dev,
10048 "rebuild of vsi_idx %d failed: %d\n",
10049 v, ret);
10050 goto end_reconstitute;
10051 }
10052 i40e_vsi_reset_stats(vsi);
10053 }
10054 }
10055
10056
10057 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10058 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10059 pf->veb[veb_idx]->uplink_seid = veb->seid;
10060 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10061 if (ret)
10062 break;
10063 }
10064 }
10065
10066end_reconstitute:
10067 return ret;
10068}
10069
10070
10071
10072
10073
10074
10075static int i40e_get_capabilities(struct i40e_pf *pf,
10076 enum i40e_admin_queue_opc list_type)
10077{
10078 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10079 u16 data_size;
10080 int buf_len;
10081 int err;
10082
10083 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10084 do {
10085 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10086 if (!cap_buf)
10087 return -ENOMEM;
10088
10089
10090 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10091 &data_size, list_type,
10092 NULL);
10093
10094 kfree(cap_buf);
10095
10096 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10097
10098 buf_len = data_size;
10099 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
10100 dev_info(&pf->pdev->dev,
10101 "capability discovery failed, err %s aq_err %s\n",
10102 i40e_stat_str(&pf->hw, err),
10103 i40e_aq_str(&pf->hw,
10104 pf->hw.aq.asq_last_status));
10105 return -ENODEV;
10106 }
10107 } while (err);
10108
10109 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10110 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10111 dev_info(&pf->pdev->dev,
10112 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10113 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10114 pf->hw.func_caps.num_msix_vectors,
10115 pf->hw.func_caps.num_msix_vectors_vf,
10116 pf->hw.func_caps.fd_filters_guaranteed,
10117 pf->hw.func_caps.fd_filters_best_effort,
10118 pf->hw.func_caps.num_tx_qp,
10119 pf->hw.func_caps.num_vsis);
10120 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10121 dev_info(&pf->pdev->dev,
10122 "switch_mode=0x%04x, function_valid=0x%08x\n",
10123 pf->hw.dev_caps.switch_mode,
10124 pf->hw.dev_caps.valid_functions);
10125 dev_info(&pf->pdev->dev,
10126 "SR-IOV=%d, num_vfs for all function=%u\n",
10127 pf->hw.dev_caps.sr_iov_1_1,
10128 pf->hw.dev_caps.num_vfs);
10129 dev_info(&pf->pdev->dev,
10130 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10131 pf->hw.dev_caps.num_vsis,
10132 pf->hw.dev_caps.num_rx_qp,
10133 pf->hw.dev_caps.num_tx_qp);
10134 }
10135 }
10136 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10137#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10138 + pf->hw.func_caps.num_vfs)
10139 if (pf->hw.revision_id == 0 &&
10140 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10141 dev_info(&pf->pdev->dev,
10142 "got num_vsis %d, setting num_vsis to %d\n",
10143 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10144 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10145 }
10146 }
10147 return 0;
10148}
10149
10150static int i40e_vsi_clear(struct i40e_vsi *vsi);
10151
10152
10153
10154
10155
10156static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10157{
10158 struct i40e_vsi *vsi;
10159
10160
10161
10162
10163 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10164 static const u32 hkey[] = {
10165 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10166 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10167 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10168 0x95b3a76d};
10169 int i;
10170
10171 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10172 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10173 }
10174
10175 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10176 return;
10177
10178
10179 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10180
10181
10182 if (!vsi) {
10183 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10184 pf->vsi[pf->lan_vsi]->seid, 0);
10185 if (!vsi) {
10186 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10187 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10188 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10189 return;
10190 }
10191 }
10192
10193 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10194}
10195
10196
10197
10198
10199
10200static void i40e_fdir_teardown(struct i40e_pf *pf)
10201{
10202 struct i40e_vsi *vsi;
10203
10204 i40e_fdir_filter_exit(pf);
10205 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10206 if (vsi)
10207 i40e_vsi_release(vsi);
10208}
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10219{
10220 struct i40e_cloud_filter *cfilter;
10221 struct i40e_pf *pf = vsi->back;
10222 struct hlist_node *node;
10223 i40e_status ret;
10224
10225
10226 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10227 cloud_node) {
10228 if (cfilter->seid != seid)
10229 continue;
10230
10231 if (cfilter->dst_port)
10232 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10233 true);
10234 else
10235 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10236
10237 if (ret) {
10238 dev_dbg(&pf->pdev->dev,
10239 "Failed to rebuild cloud filter, err %s aq_err %s\n",
10240 i40e_stat_str(&pf->hw, ret),
10241 i40e_aq_str(&pf->hw,
10242 pf->hw.aq.asq_last_status));
10243 return ret;
10244 }
10245 }
10246 return 0;
10247}
10248
10249
10250
10251
10252
10253
10254
10255static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10256{
10257 struct i40e_channel *ch, *ch_tmp;
10258 i40e_status ret;
10259
10260 if (list_empty(&vsi->ch_list))
10261 return 0;
10262
10263 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10264 if (!ch->initialized)
10265 break;
10266
10267 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10268 if (ret) {
10269 dev_info(&vsi->back->pdev->dev,
10270 "failed to rebuild channels using uplink_seid %u\n",
10271 vsi->uplink_seid);
10272 return ret;
10273 }
10274
10275 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10276 if (ret) {
10277 dev_info(&vsi->back->pdev->dev,
10278 "failed to configure TX rings for channel %u\n",
10279 ch->seid);
10280 return ret;
10281 }
10282
10283 vsi->next_base_queue = vsi->next_base_queue +
10284 ch->num_queue_pairs;
10285 if (ch->max_tx_rate) {
10286 u64 credits = ch->max_tx_rate;
10287
10288 if (i40e_set_bw_limit(vsi, ch->seid,
10289 ch->max_tx_rate))
10290 return -EINVAL;
10291
10292 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10293 dev_dbg(&vsi->back->pdev->dev,
10294 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10295 ch->max_tx_rate,
10296 credits,
10297 ch->seid);
10298 }
10299 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10300 if (ret) {
10301 dev_dbg(&vsi->back->pdev->dev,
10302 "Failed to rebuild cloud filters for channel VSI %u\n",
10303 ch->seid);
10304 return ret;
10305 }
10306 }
10307 return 0;
10308}
10309
10310
10311
10312
10313
10314
10315
10316static void i40e_prep_for_reset(struct i40e_pf *pf)
10317{
10318 struct i40e_hw *hw = &pf->hw;
10319 i40e_status ret = 0;
10320 u32 v;
10321
10322 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10323 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10324 return;
10325 if (i40e_check_asq_alive(&pf->hw))
10326 i40e_vc_notify_reset(pf);
10327
10328 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10329
10330
10331 i40e_pf_quiesce_all_vsi(pf);
10332
10333 for (v = 0; v < pf->num_alloc_vsi; v++) {
10334 if (pf->vsi[v])
10335 pf->vsi[v]->seid = 0;
10336 }
10337
10338 i40e_shutdown_adminq(&pf->hw);
10339
10340
10341 if (hw->hmc.hmc_obj) {
10342 ret = i40e_shutdown_lan_hmc(hw);
10343 if (ret)
10344 dev_warn(&pf->pdev->dev,
10345 "shutdown_lan_hmc failed: %d\n", ret);
10346 }
10347
10348
10349
10350
10351 i40e_ptp_save_hw_time(pf);
10352}
10353
10354
10355
10356
10357
10358static void i40e_send_version(struct i40e_pf *pf)
10359{
10360 struct i40e_driver_version dv;
10361
10362 dv.major_version = 0xff;
10363 dv.minor_version = 0xff;
10364 dv.build_version = 0xff;
10365 dv.subbuild_version = 0;
10366 strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10367 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10368}
10369
10370
10371
10372
10373
10374static void i40e_get_oem_version(struct i40e_hw *hw)
10375{
10376 u16 block_offset = 0xffff;
10377 u16 block_length = 0;
10378 u16 capabilities = 0;
10379 u16 gen_snap = 0;
10380 u16 release = 0;
10381
10382#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10383#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10384#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10385#define I40E_NVM_OEM_GEN_OFFSET 0x02
10386#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10387#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10388#define I40E_NVM_OEM_LENGTH 3
10389
10390
10391 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10392 if (block_offset == 0xffff)
10393 return;
10394
10395
10396 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10397 &block_length);
10398 if (block_length < I40E_NVM_OEM_LENGTH)
10399 return;
10400
10401
10402 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10403 &capabilities);
10404 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10405 return;
10406
10407 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10408 &gen_snap);
10409 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10410 &release);
10411 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10412 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10413}
10414
10415
10416
10417
10418
10419static int i40e_reset(struct i40e_pf *pf)
10420{
10421 struct i40e_hw *hw = &pf->hw;
10422 i40e_status ret;
10423
10424 ret = i40e_pf_reset(hw);
10425 if (ret) {
10426 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10427 set_bit(__I40E_RESET_FAILED, pf->state);
10428 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10429 } else {
10430 pf->pfr_count++;
10431 }
10432 return ret;
10433}
10434
10435
10436
10437
10438
10439
10440
10441
10442static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10443{
10444 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10445 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10446 struct i40e_hw *hw = &pf->hw;
10447 i40e_status ret;
10448 u32 val;
10449 int v;
10450
10451 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10452 i40e_check_recovery_mode(pf)) {
10453 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10454 }
10455
10456 if (test_bit(__I40E_DOWN, pf->state) &&
10457 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10458 !old_recovery_mode_bit)
10459 goto clear_recovery;
10460 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10461
10462
10463 ret = i40e_init_adminq(&pf->hw);
10464 if (ret) {
10465 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10466 i40e_stat_str(&pf->hw, ret),
10467 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10468 goto clear_recovery;
10469 }
10470 i40e_get_oem_version(&pf->hw);
10471
10472 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10473 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
10474 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
10475
10476
10477
10478
10479
10480 mdelay(300);
10481 }
10482
10483
10484 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10485 i40e_verify_eeprom(pf);
10486
10487
10488
10489
10490
10491 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10492 old_recovery_mode_bit) {
10493 if (i40e_get_capabilities(pf,
10494 i40e_aqc_opc_list_func_capabilities))
10495 goto end_unlock;
10496
10497 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10498
10499
10500
10501 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10502 goto end_unlock;
10503 } else {
10504 if (!lock_acquired)
10505 rtnl_lock();
10506
10507
10508
10509
10510 free_irq(pf->pdev->irq, pf);
10511 i40e_clear_interrupt_scheme(pf);
10512 if (i40e_restore_interrupt_scheme(pf))
10513 goto end_unlock;
10514 }
10515
10516
10517 i40e_send_version(pf);
10518
10519
10520
10521
10522 goto end_unlock;
10523 }
10524
10525 i40e_clear_pxe_mode(hw);
10526 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10527 if (ret)
10528 goto end_core_reset;
10529
10530 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10531 hw->func_caps.num_rx_qp, 0, 0);
10532 if (ret) {
10533 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10534 goto end_core_reset;
10535 }
10536 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10537 if (ret) {
10538 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10539 goto end_core_reset;
10540 }
10541
10542#ifdef CONFIG_I40E_DCB
10543
10544
10545
10546
10547 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
10548 i40e_aq_set_dcb_parameters(hw, false, NULL);
10549 } else {
10550 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10551 (hw->phy.link_info.link_speed &
10552 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10553 i40e_aq_set_dcb_parameters(hw, false, NULL);
10554 dev_warn(&pf->pdev->dev,
10555 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10556 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10557 } else {
10558 i40e_aq_set_dcb_parameters(hw, true, NULL);
10559 ret = i40e_init_pf_dcb(pf);
10560 if (ret) {
10561 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10562 ret);
10563 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10564
10565 }
10566 }
10567 }
10568
10569#endif
10570 if (!lock_acquired)
10571 rtnl_lock();
10572 ret = i40e_setup_pf_switch(pf, reinit);
10573 if (ret)
10574 goto end_unlock;
10575
10576
10577
10578
10579 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10580 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10581 I40E_AQ_EVENT_MEDIA_NA |
10582 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10583 if (ret)
10584 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10585 i40e_stat_str(&pf->hw, ret),
10586 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10587
10588
10589
10590
10591
10592
10593
10594
10595 if (vsi->uplink_seid != pf->mac_seid) {
10596 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10597
10598 for (v = 0; v < I40E_MAX_VEB; v++) {
10599 if (!pf->veb[v])
10600 continue;
10601
10602 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10603 pf->veb[v]->uplink_seid == 0) {
10604 ret = i40e_reconstitute_veb(pf->veb[v]);
10605
10606 if (!ret)
10607 continue;
10608
10609
10610
10611
10612
10613
10614
10615 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10616 dev_info(&pf->pdev->dev,
10617 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10618 ret);
10619 vsi->uplink_seid = pf->mac_seid;
10620 break;
10621 } else if (pf->veb[v]->uplink_seid == 0) {
10622 dev_info(&pf->pdev->dev,
10623 "rebuild of orphan VEB failed: %d\n",
10624 ret);
10625 }
10626 }
10627 }
10628 }
10629
10630 if (vsi->uplink_seid == pf->mac_seid) {
10631 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10632
10633 ret = i40e_add_vsi(vsi);
10634 if (ret) {
10635 dev_info(&pf->pdev->dev,
10636 "rebuild of Main VSI failed: %d\n", ret);
10637 goto end_unlock;
10638 }
10639 }
10640
10641 if (vsi->mqprio_qopt.max_rate[0]) {
10642 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10643 u64 credits = 0;
10644
10645 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10646 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10647 if (ret)
10648 goto end_unlock;
10649
10650 credits = max_tx_rate;
10651 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10652 dev_dbg(&vsi->back->pdev->dev,
10653 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10654 max_tx_rate,
10655 credits,
10656 vsi->seid);
10657 }
10658
10659 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10660 if (ret)
10661 goto end_unlock;
10662
10663
10664
10665
10666 ret = i40e_rebuild_channels(vsi);
10667 if (ret)
10668 goto end_unlock;
10669
10670
10671
10672
10673
10674#define I40E_REG_MSS 0x000E64DC
10675#define I40E_REG_MSS_MIN_MASK 0x3FF0000
10676#define I40E_64BYTE_MSS 0x400000
10677 val = rd32(hw, I40E_REG_MSS);
10678 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10679 val &= ~I40E_REG_MSS_MIN_MASK;
10680 val |= I40E_64BYTE_MSS;
10681 wr32(hw, I40E_REG_MSS, val);
10682 }
10683
10684 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10685 msleep(75);
10686 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10687 if (ret)
10688 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10689 i40e_stat_str(&pf->hw, ret),
10690 i40e_aq_str(&pf->hw,
10691 pf->hw.aq.asq_last_status));
10692 }
10693
10694 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10695 ret = i40e_setup_misc_vector(pf);
10696
10697
10698
10699
10700
10701
10702
10703 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10704 pf->main_vsi_seid);
10705
10706
10707 i40e_pf_unquiesce_all_vsi(pf);
10708
10709
10710 if (!lock_acquired)
10711 rtnl_unlock();
10712
10713
10714 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10715 if (ret)
10716 dev_warn(&pf->pdev->dev,
10717 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10718 pf->cur_promisc ? "on" : "off",
10719 i40e_stat_str(&pf->hw, ret),
10720 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10721
10722 i40e_reset_all_vfs(pf, true);
10723
10724
10725 i40e_send_version(pf);
10726
10727
10728 goto end_core_reset;
10729
10730end_unlock:
10731 if (!lock_acquired)
10732 rtnl_unlock();
10733end_core_reset:
10734 clear_bit(__I40E_RESET_FAILED, pf->state);
10735clear_recovery:
10736 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10737 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10738}
10739
10740
10741
10742
10743
10744
10745
10746
10747static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10748 bool lock_acquired)
10749{
10750 int ret;
10751
10752
10753
10754
10755 ret = i40e_reset(pf);
10756 if (!ret)
10757 i40e_rebuild(pf, reinit, lock_acquired);
10758}
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10770{
10771 i40e_prep_for_reset(pf);
10772 i40e_reset_and_rebuild(pf, false, lock_acquired);
10773}
10774
10775
10776
10777
10778
10779
10780
10781static void i40e_handle_mdd_event(struct i40e_pf *pf)
10782{
10783 struct i40e_hw *hw = &pf->hw;
10784 bool mdd_detected = false;
10785 struct i40e_vf *vf;
10786 u32 reg;
10787 int i;
10788
10789 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10790 return;
10791
10792
10793 reg = rd32(hw, I40E_GL_MDET_TX);
10794 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10795 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10796 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10797 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10798 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10799 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10800 I40E_GL_MDET_TX_EVENT_SHIFT;
10801 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10802 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10803 pf->hw.func_caps.base_queue;
10804 if (netif_msg_tx_err(pf))
10805 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10806 event, queue, pf_num, vf_num);
10807 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10808 mdd_detected = true;
10809 }
10810 reg = rd32(hw, I40E_GL_MDET_RX);
10811 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10812 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10813 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10814 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10815 I40E_GL_MDET_RX_EVENT_SHIFT;
10816 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10817 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10818 pf->hw.func_caps.base_queue;
10819 if (netif_msg_rx_err(pf))
10820 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10821 event, queue, func);
10822 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10823 mdd_detected = true;
10824 }
10825
10826 if (mdd_detected) {
10827 reg = rd32(hw, I40E_PF_MDET_TX);
10828 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10829 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10830 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10831 }
10832 reg = rd32(hw, I40E_PF_MDET_RX);
10833 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10834 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10835 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10836 }
10837 }
10838
10839
10840 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10841 vf = &(pf->vf[i]);
10842 reg = rd32(hw, I40E_VP_MDET_TX(i));
10843 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10844 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10845 vf->num_mdd_events++;
10846 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10847 i);
10848 dev_info(&pf->pdev->dev,
10849 "Use PF Control I/F to re-enable the VF\n");
10850 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10851 }
10852
10853 reg = rd32(hw, I40E_VP_MDET_RX(i));
10854 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10855 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10856 vf->num_mdd_events++;
10857 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10858 i);
10859 dev_info(&pf->pdev->dev,
10860 "Use PF Control I/F to re-enable the VF\n");
10861 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10862 }
10863 }
10864
10865
10866 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10867 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10868 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10869 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10870 i40e_flush(hw);
10871}
10872
10873
10874
10875
10876
10877static void i40e_service_task(struct work_struct *work)
10878{
10879 struct i40e_pf *pf = container_of(work,
10880 struct i40e_pf,
10881 service_task);
10882 unsigned long start_time = jiffies;
10883
10884
10885 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10886 test_bit(__I40E_SUSPENDED, pf->state))
10887 return;
10888
10889 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10890 return;
10891
10892 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10893 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10894 i40e_sync_filters_subtask(pf);
10895 i40e_reset_subtask(pf);
10896 i40e_handle_mdd_event(pf);
10897 i40e_vc_process_vflr_event(pf);
10898 i40e_watchdog_subtask(pf);
10899 i40e_fdir_reinit_subtask(pf);
10900 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10901
10902 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10903 true);
10904 } else {
10905 i40e_client_subtask(pf);
10906 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10907 pf->state))
10908 i40e_notify_client_of_l2_param_changes(
10909 pf->vsi[pf->lan_vsi]);
10910 }
10911 i40e_sync_filters_subtask(pf);
10912 } else {
10913 i40e_reset_subtask(pf);
10914 }
10915
10916 i40e_clean_adminq_subtask(pf);
10917
10918
10919 smp_mb__before_atomic();
10920 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10921
10922
10923
10924
10925
10926 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10927 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10928 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10929 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10930 i40e_service_event_schedule(pf);
10931}
10932
10933
10934
10935
10936
10937static void i40e_service_timer(struct timer_list *t)
10938{
10939 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10940
10941 mod_timer(&pf->service_timer,
10942 round_jiffies(jiffies + pf->service_timer_period));
10943 i40e_service_event_schedule(pf);
10944}
10945
10946
10947
10948
10949
10950static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10951{
10952 struct i40e_pf *pf = vsi->back;
10953
10954 switch (vsi->type) {
10955 case I40E_VSI_MAIN:
10956 vsi->alloc_queue_pairs = pf->num_lan_qps;
10957 if (!vsi->num_tx_desc)
10958 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10959 I40E_REQ_DESCRIPTOR_MULTIPLE);
10960 if (!vsi->num_rx_desc)
10961 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10962 I40E_REQ_DESCRIPTOR_MULTIPLE);
10963 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10964 vsi->num_q_vectors = pf->num_lan_msix;
10965 else
10966 vsi->num_q_vectors = 1;
10967
10968 break;
10969
10970 case I40E_VSI_FDIR:
10971 vsi->alloc_queue_pairs = 1;
10972 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10973 I40E_REQ_DESCRIPTOR_MULTIPLE);
10974 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10975 I40E_REQ_DESCRIPTOR_MULTIPLE);
10976 vsi->num_q_vectors = pf->num_fdsb_msix;
10977 break;
10978
10979 case I40E_VSI_VMDQ2:
10980 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10981 if (!vsi->num_tx_desc)
10982 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10983 I40E_REQ_DESCRIPTOR_MULTIPLE);
10984 if (!vsi->num_rx_desc)
10985 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10986 I40E_REQ_DESCRIPTOR_MULTIPLE);
10987 vsi->num_q_vectors = pf->num_vmdq_msix;
10988 break;
10989
10990 case I40E_VSI_SRIOV:
10991 vsi->alloc_queue_pairs = pf->num_vf_qps;
10992 if (!vsi->num_tx_desc)
10993 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10994 I40E_REQ_DESCRIPTOR_MULTIPLE);
10995 if (!vsi->num_rx_desc)
10996 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10997 I40E_REQ_DESCRIPTOR_MULTIPLE);
10998 break;
10999
11000 default:
11001 WARN_ON(1);
11002 return -ENODATA;
11003 }
11004
11005 if (is_kdump_kernel()) {
11006 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11007 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11008 }
11009
11010 return 0;
11011}
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11022{
11023 struct i40e_ring **next_rings;
11024 int size;
11025 int ret = 0;
11026
11027
11028 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11029 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11030 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11031 if (!vsi->tx_rings)
11032 return -ENOMEM;
11033 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11034 if (i40e_enabled_xdp_vsi(vsi)) {
11035 vsi->xdp_rings = next_rings;
11036 next_rings += vsi->alloc_queue_pairs;
11037 }
11038 vsi->rx_rings = next_rings;
11039
11040 if (alloc_qvectors) {
11041
11042 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11043 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11044 if (!vsi->q_vectors) {
11045 ret = -ENOMEM;
11046 goto err_vectors;
11047 }
11048 }
11049 return ret;
11050
11051err_vectors:
11052 kfree(vsi->tx_rings);
11053 return ret;
11054}
11055
11056
11057
11058
11059
11060
11061
11062
11063
11064static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11065{
11066 int ret = -ENODEV;
11067 struct i40e_vsi *vsi;
11068 int vsi_idx;
11069 int i;
11070
11071
11072 mutex_lock(&pf->switch_mutex);
11073
11074
11075
11076
11077
11078
11079
11080 i = pf->next_vsi;
11081 while (i < pf->num_alloc_vsi && pf->vsi[i])
11082 i++;
11083 if (i >= pf->num_alloc_vsi) {
11084 i = 0;
11085 while (i < pf->next_vsi && pf->vsi[i])
11086 i++;
11087 }
11088
11089 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11090 vsi_idx = i;
11091 } else {
11092 ret = -ENODEV;
11093 goto unlock_pf;
11094 }
11095 pf->next_vsi = ++i;
11096
11097 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11098 if (!vsi) {
11099 ret = -ENOMEM;
11100 goto unlock_pf;
11101 }
11102 vsi->type = type;
11103 vsi->back = pf;
11104 set_bit(__I40E_VSI_DOWN, vsi->state);
11105 vsi->flags = 0;
11106 vsi->idx = vsi_idx;
11107 vsi->int_rate_limit = 0;
11108 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11109 pf->rss_table_size : 64;
11110 vsi->netdev_registered = false;
11111 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11112 hash_init(vsi->mac_filter_hash);
11113 vsi->irqs_ready = false;
11114
11115 if (type == I40E_VSI_MAIN) {
11116 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11117 if (!vsi->af_xdp_zc_qps)
11118 goto err_rings;
11119 }
11120
11121 ret = i40e_set_num_rings_in_vsi(vsi);
11122 if (ret)
11123 goto err_rings;
11124
11125 ret = i40e_vsi_alloc_arrays(vsi, true);
11126 if (ret)
11127 goto err_rings;
11128
11129
11130 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11131
11132
11133 spin_lock_init(&vsi->mac_filter_hash_lock);
11134 pf->vsi[vsi_idx] = vsi;
11135 ret = vsi_idx;
11136 goto unlock_pf;
11137
11138err_rings:
11139 bitmap_free(vsi->af_xdp_zc_qps);
11140 pf->next_vsi = i - 1;
11141 kfree(vsi);
11142unlock_pf:
11143 mutex_unlock(&pf->switch_mutex);
11144 return ret;
11145}
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11156{
11157
11158 if (free_qvectors) {
11159 kfree(vsi->q_vectors);
11160 vsi->q_vectors = NULL;
11161 }
11162 kfree(vsi->tx_rings);
11163 vsi->tx_rings = NULL;
11164 vsi->rx_rings = NULL;
11165 vsi->xdp_rings = NULL;
11166}
11167
11168
11169
11170
11171
11172
11173static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11174{
11175 if (!vsi)
11176 return;
11177
11178 kfree(vsi->rss_hkey_user);
11179 vsi->rss_hkey_user = NULL;
11180
11181 kfree(vsi->rss_lut_user);
11182 vsi->rss_lut_user = NULL;
11183}
11184
11185
11186
11187
11188
11189static int i40e_vsi_clear(struct i40e_vsi *vsi)
11190{
11191 struct i40e_pf *pf;
11192
11193 if (!vsi)
11194 return 0;
11195
11196 if (!vsi->back)
11197 goto free_vsi;
11198 pf = vsi->back;
11199
11200 mutex_lock(&pf->switch_mutex);
11201 if (!pf->vsi[vsi->idx]) {
11202 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11203 vsi->idx, vsi->idx, vsi->type);
11204 goto unlock_vsi;
11205 }
11206
11207 if (pf->vsi[vsi->idx] != vsi) {
11208 dev_err(&pf->pdev->dev,
11209 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11210 pf->vsi[vsi->idx]->idx,
11211 pf->vsi[vsi->idx]->type,
11212 vsi->idx, vsi->type);
11213 goto unlock_vsi;
11214 }
11215
11216
11217 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11218 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11219
11220 bitmap_free(vsi->af_xdp_zc_qps);
11221 i40e_vsi_free_arrays(vsi, true);
11222 i40e_clear_rss_config_user(vsi);
11223
11224 pf->vsi[vsi->idx] = NULL;
11225 if (vsi->idx < pf->next_vsi)
11226 pf->next_vsi = vsi->idx;
11227
11228unlock_vsi:
11229 mutex_unlock(&pf->switch_mutex);
11230free_vsi:
11231 kfree(vsi);
11232
11233 return 0;
11234}
11235
11236
11237
11238
11239
11240static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11241{
11242 int i;
11243
11244 if (vsi->tx_rings && vsi->tx_rings[0]) {
11245 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11246 kfree_rcu(vsi->tx_rings[i], rcu);
11247 WRITE_ONCE(vsi->tx_rings[i], NULL);
11248 WRITE_ONCE(vsi->rx_rings[i], NULL);
11249 if (vsi->xdp_rings)
11250 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11251 }
11252 }
11253}
11254
11255
11256
11257
11258
11259static int i40e_alloc_rings(struct i40e_vsi *vsi)
11260{
11261 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11262 struct i40e_pf *pf = vsi->back;
11263 struct i40e_ring *ring;
11264
11265
11266 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11267
11268 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11269 if (!ring)
11270 goto err_out;
11271
11272 ring->queue_index = i;
11273 ring->reg_idx = vsi->base_queue + i;
11274 ring->ring_active = false;
11275 ring->vsi = vsi;
11276 ring->netdev = vsi->netdev;
11277 ring->dev = &pf->pdev->dev;
11278 ring->count = vsi->num_tx_desc;
11279 ring->size = 0;
11280 ring->dcb_tc = 0;
11281 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11282 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11283 ring->itr_setting = pf->tx_itr_default;
11284 WRITE_ONCE(vsi->tx_rings[i], ring++);
11285
11286 if (!i40e_enabled_xdp_vsi(vsi))
11287 goto setup_rx;
11288
11289 ring->queue_index = vsi->alloc_queue_pairs + i;
11290 ring->reg_idx = vsi->base_queue + ring->queue_index;
11291 ring->ring_active = false;
11292 ring->vsi = vsi;
11293 ring->netdev = NULL;
11294 ring->dev = &pf->pdev->dev;
11295 ring->count = vsi->num_tx_desc;
11296 ring->size = 0;
11297 ring->dcb_tc = 0;
11298 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11299 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11300 set_ring_xdp(ring);
11301 ring->itr_setting = pf->tx_itr_default;
11302 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11303
11304setup_rx:
11305 ring->queue_index = i;
11306 ring->reg_idx = vsi->base_queue + i;
11307 ring->ring_active = false;
11308 ring->vsi = vsi;
11309 ring->netdev = vsi->netdev;
11310 ring->dev = &pf->pdev->dev;
11311 ring->count = vsi->num_rx_desc;
11312 ring->size = 0;
11313 ring->dcb_tc = 0;
11314 ring->itr_setting = pf->rx_itr_default;
11315 WRITE_ONCE(vsi->rx_rings[i], ring);
11316 }
11317
11318 return 0;
11319
11320err_out:
11321 i40e_vsi_clear_rings(vsi);
11322 return -ENOMEM;
11323}
11324
11325
11326
11327
11328
11329
11330
11331
11332static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11333{
11334 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11335 I40E_MIN_MSIX, vectors);
11336 if (vectors < 0) {
11337 dev_info(&pf->pdev->dev,
11338 "MSI-X vector reservation failed: %d\n", vectors);
11339 vectors = 0;
11340 }
11341
11342 return vectors;
11343}
11344
11345
11346
11347
11348
11349
11350
11351
11352
11353static int i40e_init_msix(struct i40e_pf *pf)
11354{
11355 struct i40e_hw *hw = &pf->hw;
11356 int cpus, extra_vectors;
11357 int vectors_left;
11358 int v_budget, i;
11359 int v_actual;
11360 int iwarp_requested = 0;
11361
11362 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11363 return -ENODEV;
11364
11365
11366
11367
11368
11369
11370
11371
11372
11373
11374
11375
11376
11377
11378
11379
11380 vectors_left = hw->func_caps.num_msix_vectors;
11381 v_budget = 0;
11382
11383
11384 if (vectors_left) {
11385 v_budget++;
11386 vectors_left--;
11387 }
11388
11389
11390
11391
11392
11393
11394
11395
11396 cpus = num_online_cpus();
11397 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11398 vectors_left -= pf->num_lan_msix;
11399
11400
11401 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11402 if (vectors_left) {
11403 pf->num_fdsb_msix = 1;
11404 v_budget++;
11405 vectors_left--;
11406 } else {
11407 pf->num_fdsb_msix = 0;
11408 }
11409 }
11410
11411
11412 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11413 iwarp_requested = pf->num_iwarp_msix;
11414
11415 if (!vectors_left)
11416 pf->num_iwarp_msix = 0;
11417 else if (vectors_left < pf->num_iwarp_msix)
11418 pf->num_iwarp_msix = 1;
11419 v_budget += pf->num_iwarp_msix;
11420 vectors_left -= pf->num_iwarp_msix;
11421 }
11422
11423
11424 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11425 if (!vectors_left) {
11426 pf->num_vmdq_msix = 0;
11427 pf->num_vmdq_qps = 0;
11428 } else {
11429 int vmdq_vecs_wanted =
11430 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11431 int vmdq_vecs =
11432 min_t(int, vectors_left, vmdq_vecs_wanted);
11433
11434
11435
11436
11437
11438
11439
11440 if (vectors_left < vmdq_vecs_wanted) {
11441 pf->num_vmdq_qps = 1;
11442 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11443 vmdq_vecs = min_t(int,
11444 vectors_left,
11445 vmdq_vecs_wanted);
11446 }
11447 pf->num_vmdq_msix = pf->num_vmdq_qps;
11448
11449 v_budget += vmdq_vecs;
11450 vectors_left -= vmdq_vecs;
11451 }
11452 }
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11464 pf->num_lan_msix += extra_vectors;
11465 vectors_left -= extra_vectors;
11466
11467 WARN(vectors_left < 0,
11468 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11469
11470 v_budget += pf->num_lan_msix;
11471 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11472 GFP_KERNEL);
11473 if (!pf->msix_entries)
11474 return -ENOMEM;
11475
11476 for (i = 0; i < v_budget; i++)
11477 pf->msix_entries[i].entry = i;
11478 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11479
11480 if (v_actual < I40E_MIN_MSIX) {
11481 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11482 kfree(pf->msix_entries);
11483 pf->msix_entries = NULL;
11484 pci_disable_msix(pf->pdev);
11485 return -ENODEV;
11486
11487 } else if (v_actual == I40E_MIN_MSIX) {
11488
11489 pf->num_vmdq_vsis = 0;
11490 pf->num_vmdq_qps = 0;
11491 pf->num_lan_qps = 1;
11492 pf->num_lan_msix = 1;
11493
11494 } else if (v_actual != v_budget) {
11495
11496
11497
11498
11499
11500 int vec;
11501
11502 dev_info(&pf->pdev->dev,
11503 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11504 v_actual, v_budget);
11505
11506 vec = v_actual - 1;
11507
11508
11509 pf->num_vmdq_msix = 1;
11510 pf->num_vmdq_vsis = 1;
11511 pf->num_vmdq_qps = 1;
11512
11513
11514 switch (vec) {
11515 case 2:
11516 pf->num_lan_msix = 1;
11517 break;
11518 case 3:
11519 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11520 pf->num_lan_msix = 1;
11521 pf->num_iwarp_msix = 1;
11522 } else {
11523 pf->num_lan_msix = 2;
11524 }
11525 break;
11526 default:
11527 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11528 pf->num_iwarp_msix = min_t(int, (vec / 3),
11529 iwarp_requested);
11530 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11531 I40E_DEFAULT_NUM_VMDQ_VSI);
11532 } else {
11533 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11534 I40E_DEFAULT_NUM_VMDQ_VSI);
11535 }
11536 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11537 pf->num_fdsb_msix = 1;
11538 vec--;
11539 }
11540 pf->num_lan_msix = min_t(int,
11541 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11542 pf->num_lan_msix);
11543 pf->num_lan_qps = pf->num_lan_msix;
11544 break;
11545 }
11546 }
11547
11548 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11549 (pf->num_fdsb_msix == 0)) {
11550 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11551 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11552 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11553 }
11554 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11555 (pf->num_vmdq_msix == 0)) {
11556 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11557 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11558 }
11559
11560 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11561 (pf->num_iwarp_msix == 0)) {
11562 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11563 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11564 }
11565 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11566 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11567 pf->num_lan_msix,
11568 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11569 pf->num_fdsb_msix,
11570 pf->num_iwarp_msix);
11571
11572 return v_actual;
11573}
11574
11575
11576
11577
11578
11579
11580
11581
11582static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11583{
11584 struct i40e_q_vector *q_vector;
11585
11586
11587 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11588 if (!q_vector)
11589 return -ENOMEM;
11590
11591 q_vector->vsi = vsi;
11592 q_vector->v_idx = v_idx;
11593 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11594
11595 if (vsi->netdev)
11596 netif_napi_add(vsi->netdev, &q_vector->napi,
11597 i40e_napi_poll, NAPI_POLL_WEIGHT);
11598
11599
11600 vsi->q_vectors[v_idx] = q_vector;
11601
11602 return 0;
11603}
11604
11605
11606
11607
11608
11609
11610
11611
11612static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11613{
11614 struct i40e_pf *pf = vsi->back;
11615 int err, v_idx, num_q_vectors;
11616
11617
11618 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11619 num_q_vectors = vsi->num_q_vectors;
11620 else if (vsi == pf->vsi[pf->lan_vsi])
11621 num_q_vectors = 1;
11622 else
11623 return -EINVAL;
11624
11625 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11626 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11627 if (err)
11628 goto err_out;
11629 }
11630
11631 return 0;
11632
11633err_out:
11634 while (v_idx--)
11635 i40e_free_q_vector(vsi, v_idx);
11636
11637 return err;
11638}
11639
11640
11641
11642
11643
11644static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11645{
11646 int vectors = 0;
11647 ssize_t size;
11648
11649 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11650 vectors = i40e_init_msix(pf);
11651 if (vectors < 0) {
11652 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11653 I40E_FLAG_IWARP_ENABLED |
11654 I40E_FLAG_RSS_ENABLED |
11655 I40E_FLAG_DCB_CAPABLE |
11656 I40E_FLAG_DCB_ENABLED |
11657 I40E_FLAG_SRIOV_ENABLED |
11658 I40E_FLAG_FD_SB_ENABLED |
11659 I40E_FLAG_FD_ATR_ENABLED |
11660 I40E_FLAG_VMDQ_ENABLED);
11661 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11662
11663
11664 i40e_determine_queue_usage(pf);
11665 }
11666 }
11667
11668 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11669 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11670 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11671 vectors = pci_enable_msi(pf->pdev);
11672 if (vectors < 0) {
11673 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11674 vectors);
11675 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11676 }
11677 vectors = 1;
11678 }
11679
11680 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11681 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11682
11683
11684 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11685 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11686 if (!pf->irq_pile)
11687 return -ENOMEM;
11688
11689 pf->irq_pile->num_entries = vectors;
11690 pf->irq_pile->search_hint = 0;
11691
11692
11693 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11694
11695 return 0;
11696}
11697
11698
11699
11700
11701
11702
11703
11704
11705
11706static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11707{
11708 int err, i;
11709
11710
11711
11712
11713
11714 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11715
11716 err = i40e_init_interrupt_scheme(pf);
11717 if (err)
11718 return err;
11719
11720
11721
11722
11723 for (i = 0; i < pf->num_alloc_vsi; i++) {
11724 if (pf->vsi[i]) {
11725 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11726 if (err)
11727 goto err_unwind;
11728 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11729 }
11730 }
11731
11732 err = i40e_setup_misc_vector(pf);
11733 if (err)
11734 goto err_unwind;
11735
11736 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11737 i40e_client_update_msix_info(pf);
11738
11739 return 0;
11740
11741err_unwind:
11742 while (i--) {
11743 if (pf->vsi[i])
11744 i40e_vsi_free_q_vectors(pf->vsi[i]);
11745 }
11746
11747 return err;
11748}
11749
11750
11751
11752
11753
11754
11755
11756
11757
11758
11759
11760static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11761{
11762 int err;
11763
11764 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11765 err = i40e_setup_misc_vector(pf);
11766
11767 if (err) {
11768 dev_info(&pf->pdev->dev,
11769 "MSI-X misc vector request failed, error %d\n",
11770 err);
11771 return err;
11772 }
11773 } else {
11774 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11775
11776 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11777 pf->int_name, pf);
11778
11779 if (err) {
11780 dev_info(&pf->pdev->dev,
11781 "MSI/legacy misc vector request failed, error %d\n",
11782 err);
11783 return err;
11784 }
11785 i40e_enable_misc_int_causes(pf);
11786 i40e_irq_dynamic_enable_icr0(pf);
11787 }
11788
11789 return 0;
11790}
11791
11792
11793
11794
11795
11796
11797
11798
11799
11800static int i40e_setup_misc_vector(struct i40e_pf *pf)
11801{
11802 struct i40e_hw *hw = &pf->hw;
11803 int err = 0;
11804
11805
11806 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11807 err = request_irq(pf->msix_entries[0].vector,
11808 i40e_intr, 0, pf->int_name, pf);
11809 if (err) {
11810 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11811 dev_info(&pf->pdev->dev,
11812 "request_irq for %s failed: %d\n",
11813 pf->int_name, err);
11814 return -EFAULT;
11815 }
11816 }
11817
11818 i40e_enable_misc_int_causes(pf);
11819
11820
11821 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11822 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11823
11824 i40e_flush(hw);
11825
11826 i40e_irq_dynamic_enable_icr0(pf);
11827
11828 return err;
11829}
11830
11831
11832
11833
11834
11835
11836
11837
11838
11839
11840static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11841 u8 *lut, u16 lut_size)
11842{
11843 struct i40e_pf *pf = vsi->back;
11844 struct i40e_hw *hw = &pf->hw;
11845 int ret = 0;
11846
11847 if (seed) {
11848 ret = i40e_aq_get_rss_key(hw, vsi->id,
11849 (struct i40e_aqc_get_set_rss_key_data *)seed);
11850 if (ret) {
11851 dev_info(&pf->pdev->dev,
11852 "Cannot get RSS key, err %s aq_err %s\n",
11853 i40e_stat_str(&pf->hw, ret),
11854 i40e_aq_str(&pf->hw,
11855 pf->hw.aq.asq_last_status));
11856 return ret;
11857 }
11858 }
11859
11860 if (lut) {
11861 bool pf_lut = vsi->type == I40E_VSI_MAIN;
11862
11863 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11864 if (ret) {
11865 dev_info(&pf->pdev->dev,
11866 "Cannot get RSS lut, err %s aq_err %s\n",
11867 i40e_stat_str(&pf->hw, ret),
11868 i40e_aq_str(&pf->hw,
11869 pf->hw.aq.asq_last_status));
11870 return ret;
11871 }
11872 }
11873
11874 return ret;
11875}
11876
11877
11878
11879
11880
11881
11882
11883
11884
11885
11886static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11887 const u8 *lut, u16 lut_size)
11888{
11889 struct i40e_pf *pf = vsi->back;
11890 struct i40e_hw *hw = &pf->hw;
11891 u16 vf_id = vsi->vf_id;
11892 u8 i;
11893
11894
11895 if (seed) {
11896 u32 *seed_dw = (u32 *)seed;
11897
11898 if (vsi->type == I40E_VSI_MAIN) {
11899 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11900 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11901 } else if (vsi->type == I40E_VSI_SRIOV) {
11902 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11903 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11904 } else {
11905 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11906 }
11907 }
11908
11909 if (lut) {
11910 u32 *lut_dw = (u32 *)lut;
11911
11912 if (vsi->type == I40E_VSI_MAIN) {
11913 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11914 return -EINVAL;
11915 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11916 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11917 } else if (vsi->type == I40E_VSI_SRIOV) {
11918 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11919 return -EINVAL;
11920 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11921 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11922 } else {
11923 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11924 }
11925 }
11926 i40e_flush(hw);
11927
11928 return 0;
11929}
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939
11940static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11941 u8 *lut, u16 lut_size)
11942{
11943 struct i40e_pf *pf = vsi->back;
11944 struct i40e_hw *hw = &pf->hw;
11945 u16 i;
11946
11947 if (seed) {
11948 u32 *seed_dw = (u32 *)seed;
11949
11950 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11951 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11952 }
11953 if (lut) {
11954 u32 *lut_dw = (u32 *)lut;
11955
11956 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11957 return -EINVAL;
11958 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11959 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11960 }
11961
11962 return 0;
11963}
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11975{
11976 struct i40e_pf *pf = vsi->back;
11977
11978 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11979 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11980 else
11981 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11982}
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11994{
11995 struct i40e_pf *pf = vsi->back;
11996
11997 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11998 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11999 else
12000 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12001}
12002
12003
12004
12005
12006
12007
12008
12009
12010void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12011 u16 rss_table_size, u16 rss_size)
12012{
12013 u16 i;
12014
12015 for (i = 0; i < rss_table_size; i++)
12016 lut[i] = i % rss_size;
12017}
12018
12019
12020
12021
12022
12023static int i40e_pf_config_rss(struct i40e_pf *pf)
12024{
12025 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12026 u8 seed[I40E_HKEY_ARRAY_SIZE];
12027 u8 *lut;
12028 struct i40e_hw *hw = &pf->hw;
12029 u32 reg_val;
12030 u64 hena;
12031 int ret;
12032
12033
12034 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12035 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12036 hena |= i40e_pf_get_default_rss_hena(pf);
12037
12038 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12039 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12040
12041
12042 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12043 reg_val = (pf->rss_table_size == 512) ?
12044 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12045 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12046 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12047
12048
12049 if (!vsi->rss_size) {
12050 u16 qcount;
12051
12052
12053
12054
12055
12056 qcount = vsi->num_queue_pairs /
12057 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12058 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12059 }
12060 if (!vsi->rss_size)
12061 return -EINVAL;
12062
12063 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12064 if (!lut)
12065 return -ENOMEM;
12066
12067
12068 if (vsi->rss_lut_user)
12069 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12070 else
12071 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12072
12073
12074
12075
12076 if (vsi->rss_hkey_user)
12077 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12078 else
12079 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12080 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12081 kfree(lut);
12082
12083 return ret;
12084}
12085
12086
12087
12088
12089
12090
12091
12092
12093
12094
12095int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12096{
12097 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12098 int new_rss_size;
12099
12100 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12101 return 0;
12102
12103 queue_count = min_t(int, queue_count, num_online_cpus());
12104 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12105
12106 if (queue_count != vsi->num_queue_pairs) {
12107 u16 qcount;
12108
12109 vsi->req_queue_pairs = queue_count;
12110 i40e_prep_for_reset(pf);
12111
12112 pf->alloc_rss_size = new_rss_size;
12113
12114 i40e_reset_and_rebuild(pf, true, true);
12115
12116
12117
12118
12119 if (queue_count < vsi->rss_size) {
12120 i40e_clear_rss_config_user(vsi);
12121 dev_dbg(&pf->pdev->dev,
12122 "discard user configured hash keys and lut\n");
12123 }
12124
12125
12126 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12127 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12128
12129 i40e_pf_config_rss(pf);
12130 }
12131 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12132 vsi->req_queue_pairs, pf->rss_size_max);
12133 return pf->alloc_rss_size;
12134}
12135
12136
12137
12138
12139
12140i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12141{
12142 i40e_status status;
12143 bool min_valid, max_valid;
12144 u32 max_bw, min_bw;
12145
12146 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12147 &min_valid, &max_valid);
12148
12149 if (!status) {
12150 if (min_valid)
12151 pf->min_bw = min_bw;
12152 if (max_valid)
12153 pf->max_bw = max_bw;
12154 }
12155
12156 return status;
12157}
12158
12159
12160
12161
12162
12163i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12164{
12165 struct i40e_aqc_configure_partition_bw_data bw_data;
12166 i40e_status status;
12167
12168 memset(&bw_data, 0, sizeof(bw_data));
12169
12170
12171 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12172 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12173 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12174
12175
12176 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12177
12178 return status;
12179}
12180
12181
12182
12183
12184
12185i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12186{
12187
12188 enum i40e_admin_queue_err last_aq_status;
12189 i40e_status ret;
12190 u16 nvm_word;
12191
12192 if (pf->hw.partition_id != 1) {
12193 dev_info(&pf->pdev->dev,
12194 "Commit BW only works on partition 1! This is partition %d",
12195 pf->hw.partition_id);
12196 ret = I40E_NOT_SUPPORTED;
12197 goto bw_commit_out;
12198 }
12199
12200
12201 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12202 last_aq_status = pf->hw.aq.asq_last_status;
12203 if (ret) {
12204 dev_info(&pf->pdev->dev,
12205 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12206 i40e_stat_str(&pf->hw, ret),
12207 i40e_aq_str(&pf->hw, last_aq_status));
12208 goto bw_commit_out;
12209 }
12210
12211
12212 ret = i40e_aq_read_nvm(&pf->hw,
12213 I40E_SR_NVM_CONTROL_WORD,
12214 0x10, sizeof(nvm_word), &nvm_word,
12215 false, NULL);
12216
12217
12218
12219 last_aq_status = pf->hw.aq.asq_last_status;
12220 i40e_release_nvm(&pf->hw);
12221 if (ret) {
12222 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12223 i40e_stat_str(&pf->hw, ret),
12224 i40e_aq_str(&pf->hw, last_aq_status));
12225 goto bw_commit_out;
12226 }
12227
12228
12229 msleep(50);
12230
12231
12232 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12233 last_aq_status = pf->hw.aq.asq_last_status;
12234 if (ret) {
12235 dev_info(&pf->pdev->dev,
12236 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12237 i40e_stat_str(&pf->hw, ret),
12238 i40e_aq_str(&pf->hw, last_aq_status));
12239 goto bw_commit_out;
12240 }
12241
12242
12243
12244
12245 ret = i40e_aq_update_nvm(&pf->hw,
12246 I40E_SR_NVM_CONTROL_WORD,
12247 0x10, sizeof(nvm_word),
12248 &nvm_word, true, 0, NULL);
12249
12250
12251
12252 last_aq_status = pf->hw.aq.asq_last_status;
12253 i40e_release_nvm(&pf->hw);
12254 if (ret)
12255 dev_info(&pf->pdev->dev,
12256 "BW settings NOT SAVED, err %s aq_err %s\n",
12257 i40e_stat_str(&pf->hw, ret),
12258 i40e_aq_str(&pf->hw, last_aq_status));
12259bw_commit_out:
12260
12261 return ret;
12262}
12263
12264
12265
12266
12267
12268
12269static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12270{
12271#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12272#define I40E_FEATURES_ENABLE_PTR 0x2A
12273#define I40E_CURRENT_SETTING_PTR 0x2B
12274#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12275#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12276#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12277#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12278 i40e_status read_status = I40E_SUCCESS;
12279 u16 sr_emp_sr_settings_ptr = 0;
12280 u16 features_enable = 0;
12281 u16 link_behavior = 0;
12282 bool ret = false;
12283
12284 read_status = i40e_read_nvm_word(&pf->hw,
12285 I40E_SR_EMP_SR_SETTINGS_PTR,
12286 &sr_emp_sr_settings_ptr);
12287 if (read_status)
12288 goto err_nvm;
12289 read_status = i40e_read_nvm_word(&pf->hw,
12290 sr_emp_sr_settings_ptr +
12291 I40E_FEATURES_ENABLE_PTR,
12292 &features_enable);
12293 if (read_status)
12294 goto err_nvm;
12295 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12296 read_status = i40e_read_nvm_module_data(&pf->hw,
12297 I40E_SR_EMP_SR_SETTINGS_PTR,
12298 I40E_CURRENT_SETTING_PTR,
12299 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12300 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12301 &link_behavior);
12302 if (read_status)
12303 goto err_nvm;
12304 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12305 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12306 }
12307 return ret;
12308
12309err_nvm:
12310 dev_warn(&pf->pdev->dev,
12311 "total-port-shutdown feature is off due to read nvm error: %s\n",
12312 i40e_stat_str(&pf->hw, read_status));
12313 return ret;
12314}
12315
12316
12317
12318
12319
12320
12321
12322
12323
12324static int i40e_sw_init(struct i40e_pf *pf)
12325{
12326 int err = 0;
12327 int size;
12328 u16 pow;
12329
12330
12331 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12332 I40E_FLAG_MSI_ENABLED |
12333 I40E_FLAG_MSIX_ENABLED;
12334
12335
12336 pf->rx_itr_default = I40E_ITR_RX_DEF;
12337 pf->tx_itr_default = I40E_ITR_TX_DEF;
12338
12339
12340
12341
12342 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12343 pf->alloc_rss_size = 1;
12344 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12345 pf->rss_size_max = min_t(int, pf->rss_size_max,
12346 pf->hw.func_caps.num_tx_qp);
12347
12348
12349 pow = roundup_pow_of_two(num_online_cpus());
12350 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12351
12352 if (pf->hw.func_caps.rss) {
12353 pf->flags |= I40E_FLAG_RSS_ENABLED;
12354 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12355 num_online_cpus());
12356 }
12357
12358
12359 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12360 pf->flags |= I40E_FLAG_MFP_ENABLED;
12361 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12362 if (i40e_get_partition_bw_setting(pf)) {
12363 dev_warn(&pf->pdev->dev,
12364 "Could not get partition bw settings\n");
12365 } else {
12366 dev_info(&pf->pdev->dev,
12367 "Partition BW Min = %8.8x, Max = %8.8x\n",
12368 pf->min_bw, pf->max_bw);
12369
12370
12371 i40e_set_partition_bw_setting(pf);
12372 }
12373 }
12374
12375 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12376 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12377 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12378 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12379 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12380 pf->hw.num_partitions > 1)
12381 dev_info(&pf->pdev->dev,
12382 "Flow Director Sideband mode Disabled in MFP mode\n");
12383 else
12384 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12385 pf->fdir_pf_filter_count =
12386 pf->hw.func_caps.fd_filters_guaranteed;
12387 pf->hw.fdir_shared_filter_count =
12388 pf->hw.func_caps.fd_filters_best_effort;
12389 }
12390
12391 if (pf->hw.mac.type == I40E_MAC_X722) {
12392 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12393 I40E_HW_128_QP_RSS_CAPABLE |
12394 I40E_HW_ATR_EVICT_CAPABLE |
12395 I40E_HW_WB_ON_ITR_CAPABLE |
12396 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12397 I40E_HW_NO_PCI_LINK_CHECK |
12398 I40E_HW_USE_SET_LLDP_MIB |
12399 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12400 I40E_HW_PTP_L4_CAPABLE |
12401 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12402 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12403
12404#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12405 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12406 I40E_FDEVICT_PCTYPE_DEFAULT) {
12407 dev_warn(&pf->pdev->dev,
12408 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12409 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12410 }
12411 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12412 ((pf->hw.aq.api_maj_ver == 1) &&
12413 (pf->hw.aq.api_min_ver > 4))) {
12414
12415 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12416 }
12417
12418
12419 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12420 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12421
12422 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12423 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12424 (pf->hw.aq.fw_maj_ver < 4))) {
12425 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12426
12427 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12428 }
12429
12430
12431 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12432 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12433 (pf->hw.aq.fw_maj_ver < 4)))
12434 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12435
12436
12437 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12438 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12439 (pf->hw.aq.fw_maj_ver >= 5)))
12440 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12441
12442
12443 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12444 pf->hw.aq.fw_maj_ver >= 6)
12445 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12446
12447 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12448 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12449 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12450 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12451 }
12452
12453 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12454 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12455
12456 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12457 }
12458
12459
12460
12461
12462
12463 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12464 pf->hw.func_caps.npar_enable &&
12465 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12466 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12467
12468#ifdef CONFIG_PCI_IOV
12469 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12470 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12471 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12472 pf->num_req_vfs = min_t(int,
12473 pf->hw.func_caps.num_vfs,
12474 I40E_MAX_VF_COUNT);
12475 }
12476#endif
12477 pf->eeprom_version = 0xDEAD;
12478 pf->lan_veb = I40E_NO_VEB;
12479 pf->lan_vsi = I40E_NO_VSI;
12480
12481
12482 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12483
12484
12485 size = sizeof(struct i40e_lump_tracking)
12486 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12487 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12488 if (!pf->qp_pile) {
12489 err = -ENOMEM;
12490 goto sw_init_done;
12491 }
12492 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12493 pf->qp_pile->search_hint = 0;
12494
12495 pf->tx_timeout_recovery_level = 1;
12496
12497 if (pf->hw.mac.type != I40E_MAC_X722 &&
12498 i40e_is_total_port_shutdown_enabled(pf)) {
12499
12500
12501
12502 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12503 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12504 dev_info(&pf->pdev->dev,
12505 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12506 }
12507 mutex_init(&pf->switch_mutex);
12508
12509sw_init_done:
12510 return err;
12511}
12512
12513
12514
12515
12516
12517
12518
12519
12520bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12521{
12522 bool need_reset = false;
12523
12524
12525
12526
12527 if (features & NETIF_F_NTUPLE) {
12528
12529 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12530 need_reset = true;
12531
12532
12533
12534 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12535 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12536 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12537 }
12538 } else {
12539
12540 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12541 need_reset = true;
12542 i40e_fdir_filter_exit(pf);
12543 }
12544 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12545 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12546 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12547
12548
12549 pf->fd_add_err = 0;
12550 pf->fd_atr_cnt = 0;
12551
12552 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12553 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12554 (I40E_DEBUG_FD & pf->hw.debug_mask))
12555 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12556 }
12557 return need_reset;
12558}
12559
12560
12561
12562
12563
12564static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12565{
12566 struct i40e_pf *pf = vsi->back;
12567 struct i40e_hw *hw = &pf->hw;
12568 u16 vf_id = vsi->vf_id;
12569 u8 i;
12570
12571 if (vsi->type == I40E_VSI_MAIN) {
12572 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12573 wr32(hw, I40E_PFQF_HLUT(i), 0);
12574 } else if (vsi->type == I40E_VSI_SRIOV) {
12575 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12576 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12577 } else {
12578 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12579 }
12580}
12581
12582
12583
12584
12585
12586
12587
12588static int i40e_set_features(struct net_device *netdev,
12589 netdev_features_t features)
12590{
12591 struct i40e_netdev_priv *np = netdev_priv(netdev);
12592 struct i40e_vsi *vsi = np->vsi;
12593 struct i40e_pf *pf = vsi->back;
12594 bool need_reset;
12595
12596 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12597 i40e_pf_config_rss(pf);
12598 else if (!(features & NETIF_F_RXHASH) &&
12599 netdev->features & NETIF_F_RXHASH)
12600 i40e_clear_rss_lut(vsi);
12601
12602 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12603 i40e_vlan_stripping_enable(vsi);
12604 else
12605 i40e_vlan_stripping_disable(vsi);
12606
12607 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12608 dev_err(&pf->pdev->dev,
12609 "Offloaded tc filters active, can't turn hw_tc_offload off");
12610 return -EINVAL;
12611 }
12612
12613 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12614 i40e_del_all_macvlans(vsi);
12615
12616 need_reset = i40e_set_ntuple(pf, features);
12617
12618 if (need_reset)
12619 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12620
12621 return 0;
12622}
12623
12624static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12625 unsigned int table, unsigned int idx,
12626 struct udp_tunnel_info *ti)
12627{
12628 struct i40e_netdev_priv *np = netdev_priv(netdev);
12629 struct i40e_hw *hw = &np->vsi->back->hw;
12630 u8 type, filter_index;
12631 i40e_status ret;
12632
12633 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12634 I40E_AQC_TUNNEL_TYPE_NGE;
12635
12636 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12637 NULL);
12638 if (ret) {
12639 netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12640 i40e_stat_str(hw, ret),
12641 i40e_aq_str(hw, hw->aq.asq_last_status));
12642 return -EIO;
12643 }
12644
12645 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12646 return 0;
12647}
12648
12649static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12650 unsigned int table, unsigned int idx,
12651 struct udp_tunnel_info *ti)
12652{
12653 struct i40e_netdev_priv *np = netdev_priv(netdev);
12654 struct i40e_hw *hw = &np->vsi->back->hw;
12655 i40e_status ret;
12656
12657 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12658 if (ret) {
12659 netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12660 i40e_stat_str(hw, ret),
12661 i40e_aq_str(hw, hw->aq.asq_last_status));
12662 return -EIO;
12663 }
12664
12665 return 0;
12666}
12667
12668static int i40e_get_phys_port_id(struct net_device *netdev,
12669 struct netdev_phys_item_id *ppid)
12670{
12671 struct i40e_netdev_priv *np = netdev_priv(netdev);
12672 struct i40e_pf *pf = np->vsi->back;
12673 struct i40e_hw *hw = &pf->hw;
12674
12675 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12676 return -EOPNOTSUPP;
12677
12678 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12679 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12680
12681 return 0;
12682}
12683
12684
12685
12686
12687
12688
12689
12690
12691
12692
12693
12694static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12695 struct net_device *dev,
12696 const unsigned char *addr, u16 vid,
12697 u16 flags,
12698 struct netlink_ext_ack *extack)
12699{
12700 struct i40e_netdev_priv *np = netdev_priv(dev);
12701 struct i40e_pf *pf = np->vsi->back;
12702 int err = 0;
12703
12704 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12705 return -EOPNOTSUPP;
12706
12707 if (vid) {
12708 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12709 return -EINVAL;
12710 }
12711
12712
12713
12714
12715 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12716 netdev_info(dev, "FDB only supports static addresses\n");
12717 return -EINVAL;
12718 }
12719
12720 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12721 err = dev_uc_add_excl(dev, addr);
12722 else if (is_multicast_ether_addr(addr))
12723 err = dev_mc_add_excl(dev, addr);
12724 else
12725 err = -EINVAL;
12726
12727
12728 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12729 err = 0;
12730
12731 return err;
12732}
12733
12734
12735
12736
12737
12738
12739
12740
12741
12742
12743
12744
12745
12746
12747
12748
12749
12750static int i40e_ndo_bridge_setlink(struct net_device *dev,
12751 struct nlmsghdr *nlh,
12752 u16 flags,
12753 struct netlink_ext_ack *extack)
12754{
12755 struct i40e_netdev_priv *np = netdev_priv(dev);
12756 struct i40e_vsi *vsi = np->vsi;
12757 struct i40e_pf *pf = vsi->back;
12758 struct i40e_veb *veb = NULL;
12759 struct nlattr *attr, *br_spec;
12760 int i, rem;
12761
12762
12763 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12764 return -EOPNOTSUPP;
12765
12766
12767 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12768 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12769 veb = pf->veb[i];
12770 }
12771
12772 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12773
12774 nla_for_each_nested(attr, br_spec, rem) {
12775 __u16 mode;
12776
12777 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12778 continue;
12779
12780 mode = nla_get_u16(attr);
12781 if ((mode != BRIDGE_MODE_VEPA) &&
12782 (mode != BRIDGE_MODE_VEB))
12783 return -EINVAL;
12784
12785
12786 if (!veb) {
12787 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12788 vsi->tc_config.enabled_tc);
12789 if (veb) {
12790 veb->bridge_mode = mode;
12791 i40e_config_bridge_mode(veb);
12792 } else {
12793
12794 return -ENOENT;
12795 }
12796 break;
12797 } else if (mode != veb->bridge_mode) {
12798
12799 veb->bridge_mode = mode;
12800
12801 if (mode == BRIDGE_MODE_VEB)
12802 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12803 else
12804 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12805 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12806 break;
12807 }
12808 }
12809
12810 return 0;
12811}
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822
12823
12824
12825static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12826 struct net_device *dev,
12827 u32 __always_unused filter_mask,
12828 int nlflags)
12829{
12830 struct i40e_netdev_priv *np = netdev_priv(dev);
12831 struct i40e_vsi *vsi = np->vsi;
12832 struct i40e_pf *pf = vsi->back;
12833 struct i40e_veb *veb = NULL;
12834 int i;
12835
12836
12837 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12838 return -EOPNOTSUPP;
12839
12840
12841 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12842 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12843 veb = pf->veb[i];
12844 }
12845
12846 if (!veb)
12847 return 0;
12848
12849 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12850 0, 0, nlflags, filter_mask, NULL);
12851}
12852
12853
12854
12855
12856
12857
12858
12859static netdev_features_t i40e_features_check(struct sk_buff *skb,
12860 struct net_device *dev,
12861 netdev_features_t features)
12862{
12863 size_t len;
12864
12865
12866
12867
12868
12869 if (skb->ip_summed != CHECKSUM_PARTIAL)
12870 return features;
12871
12872
12873
12874
12875 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12876 features &= ~NETIF_F_GSO_MASK;
12877
12878
12879 len = skb_network_header(skb) - skb->data;
12880 if (len & ~(63 * 2))
12881 goto out_err;
12882
12883
12884 len = skb_transport_header(skb) - skb_network_header(skb);
12885 if (len & ~(127 * 4))
12886 goto out_err;
12887
12888 if (skb->encapsulation) {
12889
12890 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12891 if (len & ~(127 * 2))
12892 goto out_err;
12893
12894
12895 len = skb_inner_transport_header(skb) -
12896 skb_inner_network_header(skb);
12897 if (len & ~(127 * 4))
12898 goto out_err;
12899 }
12900
12901
12902
12903
12904
12905
12906 return features;
12907out_err:
12908 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12909}
12910
12911
12912
12913
12914
12915
12916
12917static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
12918 struct netlink_ext_ack *extack)
12919{
12920 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12921 struct i40e_pf *pf = vsi->back;
12922 struct bpf_prog *old_prog;
12923 bool need_reset;
12924 int i;
12925
12926
12927 if (frame_size > vsi->rx_buf_len) {
12928 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
12929 return -EINVAL;
12930 }
12931
12932
12933 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12934
12935 if (need_reset)
12936 i40e_prep_for_reset(pf);
12937
12938 old_prog = xchg(&vsi->xdp_prog, prog);
12939
12940 if (need_reset) {
12941 if (!prog)
12942
12943 synchronize_rcu();
12944 i40e_reset_and_rebuild(pf, true, true);
12945 }
12946
12947 for (i = 0; i < vsi->num_queue_pairs; i++)
12948 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12949
12950 if (old_prog)
12951 bpf_prog_put(old_prog);
12952
12953
12954
12955
12956 if (need_reset && prog)
12957 for (i = 0; i < vsi->num_queue_pairs; i++)
12958 if (vsi->xdp_rings[i]->xsk_pool)
12959 (void)i40e_xsk_wakeup(vsi->netdev, i,
12960 XDP_WAKEUP_RX);
12961
12962 return 0;
12963}
12964
12965
12966
12967
12968
12969
12970
12971static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12972{
12973 struct i40e_pf *pf = vsi->back;
12974 int timeout = 50;
12975
12976 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12977 timeout--;
12978 if (!timeout)
12979 return -EBUSY;
12980 usleep_range(1000, 2000);
12981 }
12982
12983 return 0;
12984}
12985
12986
12987
12988
12989
12990static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12991{
12992 struct i40e_pf *pf = vsi->back;
12993
12994 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12995}
12996
12997
12998
12999
13000
13001
13002static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13003{
13004 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13005 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13006 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13007 sizeof(vsi->tx_rings[queue_pair]->stats));
13008 if (i40e_enabled_xdp_vsi(vsi)) {
13009 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13010 sizeof(vsi->xdp_rings[queue_pair]->stats));
13011 }
13012}
13013
13014
13015
13016
13017
13018
13019static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13020{
13021 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13022 if (i40e_enabled_xdp_vsi(vsi)) {
13023
13024
13025
13026 synchronize_rcu();
13027 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13028 }
13029 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13030}
13031
13032
13033
13034
13035
13036
13037
13038static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13039 bool enable)
13040{
13041 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13042 struct i40e_q_vector *q_vector = rxr->q_vector;
13043
13044 if (!vsi->netdev)
13045 return;
13046
13047
13048 if (q_vector->rx.ring || q_vector->tx.ring) {
13049 if (enable)
13050 napi_enable(&q_vector->napi);
13051 else
13052 napi_disable(&q_vector->napi);
13053 }
13054}
13055
13056
13057
13058
13059
13060
13061
13062
13063
13064static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13065 bool enable)
13066{
13067 struct i40e_pf *pf = vsi->back;
13068 int pf_q, ret = 0;
13069
13070 pf_q = vsi->base_queue + queue_pair;
13071 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13072 false , enable);
13073 if (ret) {
13074 dev_info(&pf->pdev->dev,
13075 "VSI seid %d Tx ring %d %sable timeout\n",
13076 vsi->seid, pf_q, (enable ? "en" : "dis"));
13077 return ret;
13078 }
13079
13080 i40e_control_rx_q(pf, pf_q, enable);
13081 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13082 if (ret) {
13083 dev_info(&pf->pdev->dev,
13084 "VSI seid %d Rx ring %d %sable timeout\n",
13085 vsi->seid, pf_q, (enable ? "en" : "dis"));
13086 return ret;
13087 }
13088
13089
13090
13091
13092 if (!enable)
13093 mdelay(50);
13094
13095 if (!i40e_enabled_xdp_vsi(vsi))
13096 return ret;
13097
13098 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13099 pf_q + vsi->alloc_queue_pairs,
13100 true , enable);
13101 if (ret) {
13102 dev_info(&pf->pdev->dev,
13103 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13104 vsi->seid, pf_q, (enable ? "en" : "dis"));
13105 }
13106
13107 return ret;
13108}
13109
13110
13111
13112
13113
13114
13115static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13116{
13117 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13118 struct i40e_pf *pf = vsi->back;
13119 struct i40e_hw *hw = &pf->hw;
13120
13121
13122 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13123 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13124 else
13125 i40e_irq_dynamic_enable_icr0(pf);
13126
13127 i40e_flush(hw);
13128}
13129
13130
13131
13132
13133
13134
13135static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13136{
13137 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13138 struct i40e_pf *pf = vsi->back;
13139 struct i40e_hw *hw = &pf->hw;
13140
13141
13142
13143
13144
13145
13146
13147 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13148 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13149
13150 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13151 i40e_flush(hw);
13152 synchronize_irq(pf->msix_entries[intpf].vector);
13153 } else {
13154
13155 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13156 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13157 i40e_flush(hw);
13158 synchronize_irq(pf->pdev->irq);
13159 }
13160}
13161
13162
13163
13164
13165
13166
13167
13168
13169int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13170{
13171 int err;
13172
13173 err = i40e_enter_busy_conf(vsi);
13174 if (err)
13175 return err;
13176
13177 i40e_queue_pair_disable_irq(vsi, queue_pair);
13178 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false );
13179 i40e_queue_pair_toggle_napi(vsi, queue_pair, false );
13180 i40e_queue_pair_clean_rings(vsi, queue_pair);
13181 i40e_queue_pair_reset_stats(vsi, queue_pair);
13182
13183 return err;
13184}
13185
13186
13187
13188
13189
13190
13191
13192
13193int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13194{
13195 int err;
13196
13197 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13198 if (err)
13199 return err;
13200
13201 if (i40e_enabled_xdp_vsi(vsi)) {
13202 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13203 if (err)
13204 return err;
13205 }
13206
13207 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13208 if (err)
13209 return err;
13210
13211 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true );
13212 i40e_queue_pair_toggle_napi(vsi, queue_pair, true );
13213 i40e_queue_pair_enable_irq(vsi, queue_pair);
13214
13215 i40e_exit_busy_conf(vsi);
13216
13217 return err;
13218}
13219
13220
13221
13222
13223
13224
13225static int i40e_xdp(struct net_device *dev,
13226 struct netdev_bpf *xdp)
13227{
13228 struct i40e_netdev_priv *np = netdev_priv(dev);
13229 struct i40e_vsi *vsi = np->vsi;
13230
13231 if (vsi->type != I40E_VSI_MAIN)
13232 return -EINVAL;
13233
13234 switch (xdp->command) {
13235 case XDP_SETUP_PROG:
13236 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13237 case XDP_SETUP_XSK_POOL:
13238 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13239 xdp->xsk.queue_id);
13240 default:
13241 return -EINVAL;
13242 }
13243}
13244
13245static const struct net_device_ops i40e_netdev_ops = {
13246 .ndo_open = i40e_open,
13247 .ndo_stop = i40e_close,
13248 .ndo_start_xmit = i40e_lan_xmit_frame,
13249 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13250 .ndo_set_rx_mode = i40e_set_rx_mode,
13251 .ndo_validate_addr = eth_validate_addr,
13252 .ndo_set_mac_address = i40e_set_mac,
13253 .ndo_change_mtu = i40e_change_mtu,
13254 .ndo_do_ioctl = i40e_ioctl,
13255 .ndo_tx_timeout = i40e_tx_timeout,
13256 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13257 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13258#ifdef CONFIG_NET_POLL_CONTROLLER
13259 .ndo_poll_controller = i40e_netpoll,
13260#endif
13261 .ndo_setup_tc = __i40e_setup_tc,
13262 .ndo_set_features = i40e_set_features,
13263 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13264 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13265 .ndo_get_vf_stats = i40e_get_vf_stats,
13266 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13267 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13268 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13269 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13270 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13271 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13272 .ndo_fdb_add = i40e_ndo_fdb_add,
13273 .ndo_features_check = i40e_features_check,
13274 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13275 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13276 .ndo_bpf = i40e_xdp,
13277 .ndo_xdp_xmit = i40e_xdp_xmit,
13278 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13279 .ndo_dfwd_add_station = i40e_fwd_add,
13280 .ndo_dfwd_del_station = i40e_fwd_del,
13281};
13282
13283
13284
13285
13286
13287
13288
13289static int i40e_config_netdev(struct i40e_vsi *vsi)
13290{
13291 struct i40e_pf *pf = vsi->back;
13292 struct i40e_hw *hw = &pf->hw;
13293 struct i40e_netdev_priv *np;
13294 struct net_device *netdev;
13295 u8 broadcast[ETH_ALEN];
13296 u8 mac_addr[ETH_ALEN];
13297 int etherdev_size;
13298 netdev_features_t hw_enc_features;
13299 netdev_features_t hw_features;
13300
13301 etherdev_size = sizeof(struct i40e_netdev_priv);
13302 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13303 if (!netdev)
13304 return -ENOMEM;
13305
13306 vsi->netdev = netdev;
13307 np = netdev_priv(netdev);
13308 np->vsi = vsi;
13309
13310 hw_enc_features = NETIF_F_SG |
13311 NETIF_F_IP_CSUM |
13312 NETIF_F_IPV6_CSUM |
13313 NETIF_F_HIGHDMA |
13314 NETIF_F_SOFT_FEATURES |
13315 NETIF_F_TSO |
13316 NETIF_F_TSO_ECN |
13317 NETIF_F_TSO6 |
13318 NETIF_F_GSO_GRE |
13319 NETIF_F_GSO_GRE_CSUM |
13320 NETIF_F_GSO_PARTIAL |
13321 NETIF_F_GSO_IPXIP4 |
13322 NETIF_F_GSO_IPXIP6 |
13323 NETIF_F_GSO_UDP_TUNNEL |
13324 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13325 NETIF_F_GSO_UDP_L4 |
13326 NETIF_F_SCTP_CRC |
13327 NETIF_F_RXHASH |
13328 NETIF_F_RXCSUM |
13329 0;
13330
13331 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13332 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13333
13334 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13335
13336 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13337
13338 netdev->hw_enc_features |= hw_enc_features;
13339
13340
13341 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13342
13343
13344 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13345
13346 hw_features = hw_enc_features |
13347 NETIF_F_HW_VLAN_CTAG_TX |
13348 NETIF_F_HW_VLAN_CTAG_RX;
13349
13350 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13351 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13352
13353 netdev->hw_features |= hw_features;
13354
13355 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13356 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13357
13358 if (vsi->type == I40E_VSI_MAIN) {
13359 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13360 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13361
13362
13363
13364
13365
13366
13367
13368
13369
13370
13371 i40e_rm_default_mac_filter(vsi, mac_addr);
13372 spin_lock_bh(&vsi->mac_filter_hash_lock);
13373 i40e_add_mac_filter(vsi, mac_addr);
13374 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13375 } else {
13376
13377
13378
13379
13380
13381 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13382 IFNAMSIZ - 4,
13383 pf->vsi[pf->lan_vsi]->netdev->name);
13384 eth_random_addr(mac_addr);
13385
13386 spin_lock_bh(&vsi->mac_filter_hash_lock);
13387 i40e_add_mac_filter(vsi, mac_addr);
13388 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13389 }
13390
13391
13392
13393
13394
13395
13396
13397
13398
13399
13400
13401
13402
13403
13404 eth_broadcast_addr(broadcast);
13405 spin_lock_bh(&vsi->mac_filter_hash_lock);
13406 i40e_add_mac_filter(vsi, broadcast);
13407 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13408
13409 ether_addr_copy(netdev->dev_addr, mac_addr);
13410 ether_addr_copy(netdev->perm_addr, mac_addr);
13411
13412
13413 netdev->neigh_priv_len = sizeof(u32) * 4;
13414
13415 netdev->priv_flags |= IFF_UNICAST_FLT;
13416 netdev->priv_flags |= IFF_SUPP_NOFCS;
13417
13418 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13419
13420 netdev->netdev_ops = &i40e_netdev_ops;
13421 netdev->watchdog_timeo = 5 * HZ;
13422 i40e_set_ethtool_ops(netdev);
13423
13424
13425 netdev->min_mtu = ETH_MIN_MTU;
13426 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13427
13428 return 0;
13429}
13430
13431
13432
13433
13434
13435
13436
13437static void i40e_vsi_delete(struct i40e_vsi *vsi)
13438{
13439
13440 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13441 return;
13442
13443 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13444}
13445
13446
13447
13448
13449
13450
13451
13452int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13453{
13454 struct i40e_veb *veb;
13455 struct i40e_pf *pf = vsi->back;
13456
13457
13458 if (vsi->veb_idx >= I40E_MAX_VEB)
13459 return 1;
13460
13461 veb = pf->veb[vsi->veb_idx];
13462 if (!veb) {
13463 dev_info(&pf->pdev->dev,
13464 "There is no veb associated with the bridge\n");
13465 return -ENOENT;
13466 }
13467
13468
13469 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13470 return 0;
13471 } else {
13472
13473 return 1;
13474 }
13475
13476
13477 return 0;
13478}
13479
13480
13481
13482
13483
13484
13485
13486
13487static int i40e_add_vsi(struct i40e_vsi *vsi)
13488{
13489 int ret = -ENODEV;
13490 struct i40e_pf *pf = vsi->back;
13491 struct i40e_hw *hw = &pf->hw;
13492 struct i40e_vsi_context ctxt;
13493 struct i40e_mac_filter *f;
13494 struct hlist_node *h;
13495 int bkt;
13496
13497 u8 enabled_tc = 0x1;
13498 int f_count = 0;
13499
13500 memset(&ctxt, 0, sizeof(ctxt));
13501 switch (vsi->type) {
13502 case I40E_VSI_MAIN:
13503
13504
13505
13506
13507
13508 ctxt.seid = pf->main_vsi_seid;
13509 ctxt.pf_num = pf->hw.pf_id;
13510 ctxt.vf_num = 0;
13511 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13512 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13513 if (ret) {
13514 dev_info(&pf->pdev->dev,
13515 "couldn't get PF vsi config, err %s aq_err %s\n",
13516 i40e_stat_str(&pf->hw, ret),
13517 i40e_aq_str(&pf->hw,
13518 pf->hw.aq.asq_last_status));
13519 return -ENOENT;
13520 }
13521 vsi->info = ctxt.info;
13522 vsi->info.valid_sections = 0;
13523
13524 vsi->seid = ctxt.seid;
13525 vsi->id = ctxt.vsi_number;
13526
13527 enabled_tc = i40e_pf_get_tc_map(pf);
13528
13529
13530
13531
13532
13533 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13534 memset(&ctxt, 0, sizeof(ctxt));
13535 ctxt.seid = pf->main_vsi_seid;
13536 ctxt.pf_num = pf->hw.pf_id;
13537 ctxt.vf_num = 0;
13538 ctxt.info.valid_sections |=
13539 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13540 ctxt.info.switch_id =
13541 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13542 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13543 if (ret) {
13544 dev_info(&pf->pdev->dev,
13545 "update vsi failed, err %s aq_err %s\n",
13546 i40e_stat_str(&pf->hw, ret),
13547 i40e_aq_str(&pf->hw,
13548 pf->hw.aq.asq_last_status));
13549 ret = -ENOENT;
13550 goto err;
13551 }
13552 }
13553
13554
13555 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13556 !(pf->hw.func_caps.iscsi)) {
13557 memset(&ctxt, 0, sizeof(ctxt));
13558 ctxt.seid = pf->main_vsi_seid;
13559 ctxt.pf_num = pf->hw.pf_id;
13560 ctxt.vf_num = 0;
13561 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13562 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13563 if (ret) {
13564 dev_info(&pf->pdev->dev,
13565 "update vsi failed, err %s aq_err %s\n",
13566 i40e_stat_str(&pf->hw, ret),
13567 i40e_aq_str(&pf->hw,
13568 pf->hw.aq.asq_last_status));
13569 ret = -ENOENT;
13570 goto err;
13571 }
13572
13573 i40e_vsi_update_queue_map(vsi, &ctxt);
13574 vsi->info.valid_sections = 0;
13575 } else {
13576
13577
13578
13579
13580
13581
13582 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13583 if (ret) {
13584
13585
13586
13587 dev_info(&pf->pdev->dev,
13588 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13589 enabled_tc,
13590 i40e_stat_str(&pf->hw, ret),
13591 i40e_aq_str(&pf->hw,
13592 pf->hw.aq.asq_last_status));
13593 }
13594 }
13595 break;
13596
13597 case I40E_VSI_FDIR:
13598 ctxt.pf_num = hw->pf_id;
13599 ctxt.vf_num = 0;
13600 ctxt.uplink_seid = vsi->uplink_seid;
13601 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13602 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13603 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13604 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13605 ctxt.info.valid_sections |=
13606 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13607 ctxt.info.switch_id =
13608 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13609 }
13610 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13611 break;
13612
13613 case I40E_VSI_VMDQ2:
13614 ctxt.pf_num = hw->pf_id;
13615 ctxt.vf_num = 0;
13616 ctxt.uplink_seid = vsi->uplink_seid;
13617 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13618 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13619
13620
13621
13622
13623 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13624 ctxt.info.valid_sections |=
13625 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13626 ctxt.info.switch_id =
13627 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13628 }
13629
13630
13631 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13632 break;
13633
13634 case I40E_VSI_SRIOV:
13635 ctxt.pf_num = hw->pf_id;
13636 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13637 ctxt.uplink_seid = vsi->uplink_seid;
13638 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13639 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13640
13641
13642
13643
13644 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13645 ctxt.info.valid_sections |=
13646 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13647 ctxt.info.switch_id =
13648 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13649 }
13650
13651 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13652 ctxt.info.valid_sections |=
13653 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13654 ctxt.info.queueing_opt_flags |=
13655 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13656 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13657 }
13658
13659 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13660 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13661 if (pf->vf[vsi->vf_id].spoofchk) {
13662 ctxt.info.valid_sections |=
13663 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13664 ctxt.info.sec_flags |=
13665 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13666 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13667 }
13668
13669 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13670 break;
13671
13672 case I40E_VSI_IWARP:
13673
13674 break;
13675
13676 default:
13677 return -ENODEV;
13678 }
13679
13680 if (vsi->type != I40E_VSI_MAIN) {
13681 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13682 if (ret) {
13683 dev_info(&vsi->back->pdev->dev,
13684 "add vsi failed, err %s aq_err %s\n",
13685 i40e_stat_str(&pf->hw, ret),
13686 i40e_aq_str(&pf->hw,
13687 pf->hw.aq.asq_last_status));
13688 ret = -ENOENT;
13689 goto err;
13690 }
13691 vsi->info = ctxt.info;
13692 vsi->info.valid_sections = 0;
13693 vsi->seid = ctxt.seid;
13694 vsi->id = ctxt.vsi_number;
13695 }
13696
13697 vsi->active_filters = 0;
13698 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13699 spin_lock_bh(&vsi->mac_filter_hash_lock);
13700
13701 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13702 f->state = I40E_FILTER_NEW;
13703 f_count++;
13704 }
13705 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13706
13707 if (f_count) {
13708 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13709 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13710 }
13711
13712
13713 ret = i40e_vsi_get_bw_info(vsi);
13714 if (ret) {
13715 dev_info(&pf->pdev->dev,
13716 "couldn't get vsi bw info, err %s aq_err %s\n",
13717 i40e_stat_str(&pf->hw, ret),
13718 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13719
13720 ret = 0;
13721 }
13722
13723err:
13724 return ret;
13725}
13726
13727
13728
13729
13730
13731
13732
13733int i40e_vsi_release(struct i40e_vsi *vsi)
13734{
13735 struct i40e_mac_filter *f;
13736 struct hlist_node *h;
13737 struct i40e_veb *veb = NULL;
13738 struct i40e_pf *pf;
13739 u16 uplink_seid;
13740 int i, n, bkt;
13741
13742 pf = vsi->back;
13743
13744
13745 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13746 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13747 vsi->seid, vsi->uplink_seid);
13748 return -ENODEV;
13749 }
13750 if (vsi == pf->vsi[pf->lan_vsi] &&
13751 !test_bit(__I40E_DOWN, pf->state)) {
13752 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13753 return -ENODEV;
13754 }
13755
13756 uplink_seid = vsi->uplink_seid;
13757 if (vsi->type != I40E_VSI_SRIOV) {
13758 if (vsi->netdev_registered) {
13759 vsi->netdev_registered = false;
13760 if (vsi->netdev) {
13761
13762 unregister_netdev(vsi->netdev);
13763 }
13764 } else {
13765 i40e_vsi_close(vsi);
13766 }
13767 i40e_vsi_disable_irq(vsi);
13768 }
13769
13770 spin_lock_bh(&vsi->mac_filter_hash_lock);
13771
13772
13773 if (vsi->netdev) {
13774 __dev_uc_unsync(vsi->netdev, NULL);
13775 __dev_mc_unsync(vsi->netdev, NULL);
13776 }
13777
13778
13779 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13780 __i40e_del_filter(vsi, f);
13781
13782 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13783
13784 i40e_sync_vsi_filters(vsi);
13785
13786 i40e_vsi_delete(vsi);
13787 i40e_vsi_free_q_vectors(vsi);
13788 if (vsi->netdev) {
13789 free_netdev(vsi->netdev);
13790 vsi->netdev = NULL;
13791 }
13792 i40e_vsi_clear_rings(vsi);
13793 i40e_vsi_clear(vsi);
13794
13795
13796
13797
13798
13799
13800
13801
13802
13803 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13804 if (pf->vsi[i] &&
13805 pf->vsi[i]->uplink_seid == uplink_seid &&
13806 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13807 n++;
13808 }
13809 }
13810 for (i = 0; i < I40E_MAX_VEB; i++) {
13811 if (!pf->veb[i])
13812 continue;
13813 if (pf->veb[i]->uplink_seid == uplink_seid)
13814 n++;
13815 if (pf->veb[i]->seid == uplink_seid)
13816 veb = pf->veb[i];
13817 }
13818 if (n == 0 && veb && veb->uplink_seid != 0)
13819 i40e_veb_release(veb);
13820
13821 return 0;
13822}
13823
13824
13825
13826
13827
13828
13829
13830
13831
13832
13833
13834static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13835{
13836 int ret = -ENOENT;
13837 struct i40e_pf *pf = vsi->back;
13838
13839 if (vsi->q_vectors[0]) {
13840 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13841 vsi->seid);
13842 return -EEXIST;
13843 }
13844
13845 if (vsi->base_vector) {
13846 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13847 vsi->seid, vsi->base_vector);
13848 return -EEXIST;
13849 }
13850
13851 ret = i40e_vsi_alloc_q_vectors(vsi);
13852 if (ret) {
13853 dev_info(&pf->pdev->dev,
13854 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13855 vsi->num_q_vectors, vsi->seid, ret);
13856 vsi->num_q_vectors = 0;
13857 goto vector_setup_out;
13858 }
13859
13860
13861
13862
13863 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13864 return ret;
13865 if (vsi->num_q_vectors)
13866 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13867 vsi->num_q_vectors, vsi->idx);
13868 if (vsi->base_vector < 0) {
13869 dev_info(&pf->pdev->dev,
13870 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13871 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13872 i40e_vsi_free_q_vectors(vsi);
13873 ret = -ENOENT;
13874 goto vector_setup_out;
13875 }
13876
13877vector_setup_out:
13878 return ret;
13879}
13880
13881
13882
13883
13884
13885
13886
13887
13888
13889
13890static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13891{
13892 u16 alloc_queue_pairs;
13893 struct i40e_pf *pf;
13894 u8 enabled_tc;
13895 int ret;
13896
13897 if (!vsi)
13898 return NULL;
13899
13900 pf = vsi->back;
13901
13902 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13903 i40e_vsi_clear_rings(vsi);
13904
13905 i40e_vsi_free_arrays(vsi, false);
13906 i40e_set_num_rings_in_vsi(vsi);
13907 ret = i40e_vsi_alloc_arrays(vsi, false);
13908 if (ret)
13909 goto err_vsi;
13910
13911 alloc_queue_pairs = vsi->alloc_queue_pairs *
13912 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13913
13914 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13915 if (ret < 0) {
13916 dev_info(&pf->pdev->dev,
13917 "failed to get tracking for %d queues for VSI %d err %d\n",
13918 alloc_queue_pairs, vsi->seid, ret);
13919 goto err_vsi;
13920 }
13921 vsi->base_queue = ret;
13922
13923
13924
13925
13926 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13927 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13928 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13929 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13930 if (vsi->type == I40E_VSI_MAIN)
13931 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13932
13933
13934 ret = i40e_alloc_rings(vsi);
13935 if (ret)
13936 goto err_rings;
13937
13938
13939 i40e_vsi_map_rings_to_vectors(vsi);
13940 return vsi;
13941
13942err_rings:
13943 i40e_vsi_free_q_vectors(vsi);
13944 if (vsi->netdev_registered) {
13945 vsi->netdev_registered = false;
13946 unregister_netdev(vsi->netdev);
13947 free_netdev(vsi->netdev);
13948 vsi->netdev = NULL;
13949 }
13950 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13951err_vsi:
13952 i40e_vsi_clear(vsi);
13953 return NULL;
13954}
13955
13956
13957
13958
13959
13960
13961
13962
13963
13964
13965
13966
13967
13968
13969struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13970 u16 uplink_seid, u32 param1)
13971{
13972 struct i40e_vsi *vsi = NULL;
13973 struct i40e_veb *veb = NULL;
13974 u16 alloc_queue_pairs;
13975 int ret, i;
13976 int v_idx;
13977
13978
13979
13980
13981
13982
13983
13984
13985
13986
13987
13988
13989
13990
13991 for (i = 0; i < I40E_MAX_VEB; i++) {
13992 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13993 veb = pf->veb[i];
13994 break;
13995 }
13996 }
13997
13998 if (!veb && uplink_seid != pf->mac_seid) {
13999
14000 for (i = 0; i < pf->num_alloc_vsi; i++) {
14001 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14002 vsi = pf->vsi[i];
14003 break;
14004 }
14005 }
14006 if (!vsi) {
14007 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14008 uplink_seid);
14009 return NULL;
14010 }
14011
14012 if (vsi->uplink_seid == pf->mac_seid)
14013 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14014 vsi->tc_config.enabled_tc);
14015 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14016 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14017 vsi->tc_config.enabled_tc);
14018 if (veb) {
14019 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14020 dev_info(&vsi->back->pdev->dev,
14021 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14022 return NULL;
14023 }
14024
14025
14026
14027
14028 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14029 veb->bridge_mode = BRIDGE_MODE_VEPA;
14030 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14031 }
14032 i40e_config_bridge_mode(veb);
14033 }
14034 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14035 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14036 veb = pf->veb[i];
14037 }
14038 if (!veb) {
14039 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14040 return NULL;
14041 }
14042
14043 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14044 uplink_seid = veb->seid;
14045 }
14046
14047
14048 v_idx = i40e_vsi_mem_alloc(pf, type);
14049 if (v_idx < 0)
14050 goto err_alloc;
14051 vsi = pf->vsi[v_idx];
14052 if (!vsi)
14053 goto err_alloc;
14054 vsi->type = type;
14055 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14056
14057 if (type == I40E_VSI_MAIN)
14058 pf->lan_vsi = v_idx;
14059 else if (type == I40E_VSI_SRIOV)
14060 vsi->vf_id = param1;
14061
14062 alloc_queue_pairs = vsi->alloc_queue_pairs *
14063 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14064
14065 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14066 if (ret < 0) {
14067 dev_info(&pf->pdev->dev,
14068 "failed to get tracking for %d queues for VSI %d err=%d\n",
14069 alloc_queue_pairs, vsi->seid, ret);
14070 goto err_vsi;
14071 }
14072 vsi->base_queue = ret;
14073
14074
14075 vsi->uplink_seid = uplink_seid;
14076 ret = i40e_add_vsi(vsi);
14077 if (ret)
14078 goto err_vsi;
14079
14080 switch (vsi->type) {
14081
14082 case I40E_VSI_MAIN:
14083 case I40E_VSI_VMDQ2:
14084 ret = i40e_config_netdev(vsi);
14085 if (ret)
14086 goto err_netdev;
14087 ret = register_netdev(vsi->netdev);
14088 if (ret)
14089 goto err_netdev;
14090 vsi->netdev_registered = true;
14091 netif_carrier_off(vsi->netdev);
14092#ifdef CONFIG_I40E_DCB
14093
14094 i40e_dcbnl_setup(vsi);
14095#endif
14096 fallthrough;
14097 case I40E_VSI_FDIR:
14098
14099 ret = i40e_vsi_setup_vectors(vsi);
14100 if (ret)
14101 goto err_msix;
14102
14103 ret = i40e_alloc_rings(vsi);
14104 if (ret)
14105 goto err_rings;
14106
14107
14108 i40e_vsi_map_rings_to_vectors(vsi);
14109
14110 i40e_vsi_reset_stats(vsi);
14111 break;
14112 default:
14113
14114 break;
14115 }
14116
14117 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14118 (vsi->type == I40E_VSI_VMDQ2)) {
14119 ret = i40e_vsi_config_rss(vsi);
14120 }
14121 return vsi;
14122
14123err_rings:
14124 i40e_vsi_free_q_vectors(vsi);
14125err_msix:
14126 if (vsi->netdev_registered) {
14127 vsi->netdev_registered = false;
14128 unregister_netdev(vsi->netdev);
14129 free_netdev(vsi->netdev);
14130 vsi->netdev = NULL;
14131 }
14132err_netdev:
14133 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14134err_vsi:
14135 i40e_vsi_clear(vsi);
14136err_alloc:
14137 return NULL;
14138}
14139
14140
14141
14142
14143
14144
14145
14146static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14147{
14148 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14149 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14150 struct i40e_pf *pf = veb->pf;
14151 struct i40e_hw *hw = &pf->hw;
14152 u32 tc_bw_max;
14153 int ret = 0;
14154 int i;
14155
14156 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14157 &bw_data, NULL);
14158 if (ret) {
14159 dev_info(&pf->pdev->dev,
14160 "query veb bw config failed, err %s aq_err %s\n",
14161 i40e_stat_str(&pf->hw, ret),
14162 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14163 goto out;
14164 }
14165
14166 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14167 &ets_data, NULL);
14168 if (ret) {
14169 dev_info(&pf->pdev->dev,
14170 "query veb bw ets config failed, err %s aq_err %s\n",
14171 i40e_stat_str(&pf->hw, ret),
14172 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14173 goto out;
14174 }
14175
14176 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14177 veb->bw_max_quanta = ets_data.tc_bw_max;
14178 veb->is_abs_credits = bw_data.absolute_credits_enable;
14179 veb->enabled_tc = ets_data.tc_valid_bits;
14180 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14181 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14182 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14183 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14184 veb->bw_tc_limit_credits[i] =
14185 le16_to_cpu(bw_data.tc_bw_limits[i]);
14186 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14187 }
14188
14189out:
14190 return ret;
14191}
14192
14193
14194
14195
14196
14197
14198
14199
14200static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14201{
14202 int ret = -ENOENT;
14203 struct i40e_veb *veb;
14204 int i;
14205
14206
14207 mutex_lock(&pf->switch_mutex);
14208
14209
14210
14211
14212
14213
14214
14215 i = 0;
14216 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14217 i++;
14218 if (i >= I40E_MAX_VEB) {
14219 ret = -ENOMEM;
14220 goto err_alloc_veb;
14221 }
14222
14223 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14224 if (!veb) {
14225 ret = -ENOMEM;
14226 goto err_alloc_veb;
14227 }
14228 veb->pf = pf;
14229 veb->idx = i;
14230 veb->enabled_tc = 1;
14231
14232 pf->veb[i] = veb;
14233 ret = i;
14234err_alloc_veb:
14235 mutex_unlock(&pf->switch_mutex);
14236 return ret;
14237}
14238
14239
14240
14241
14242
14243
14244
14245
14246static void i40e_switch_branch_release(struct i40e_veb *branch)
14247{
14248 struct i40e_pf *pf = branch->pf;
14249 u16 branch_seid = branch->seid;
14250 u16 veb_idx = branch->idx;
14251 int i;
14252
14253
14254 for (i = 0; i < I40E_MAX_VEB; i++) {
14255 if (!pf->veb[i])
14256 continue;
14257 if (pf->veb[i]->uplink_seid == branch->seid)
14258 i40e_switch_branch_release(pf->veb[i]);
14259 }
14260
14261
14262
14263
14264
14265
14266 for (i = 0; i < pf->num_alloc_vsi; i++) {
14267 if (!pf->vsi[i])
14268 continue;
14269 if (pf->vsi[i]->uplink_seid == branch_seid &&
14270 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14271 i40e_vsi_release(pf->vsi[i]);
14272 }
14273 }
14274
14275
14276
14277
14278
14279
14280 if (pf->veb[veb_idx])
14281 i40e_veb_release(pf->veb[veb_idx]);
14282}
14283
14284
14285
14286
14287
14288static void i40e_veb_clear(struct i40e_veb *veb)
14289{
14290 if (!veb)
14291 return;
14292
14293 if (veb->pf) {
14294 struct i40e_pf *pf = veb->pf;
14295
14296 mutex_lock(&pf->switch_mutex);
14297 if (pf->veb[veb->idx] == veb)
14298 pf->veb[veb->idx] = NULL;
14299 mutex_unlock(&pf->switch_mutex);
14300 }
14301
14302 kfree(veb);
14303}
14304
14305
14306
14307
14308
14309void i40e_veb_release(struct i40e_veb *veb)
14310{
14311 struct i40e_vsi *vsi = NULL;
14312 struct i40e_pf *pf;
14313 int i, n = 0;
14314
14315 pf = veb->pf;
14316
14317
14318 for (i = 0; i < pf->num_alloc_vsi; i++) {
14319 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14320 n++;
14321 vsi = pf->vsi[i];
14322 }
14323 }
14324 if (n != 1) {
14325 dev_info(&pf->pdev->dev,
14326 "can't remove VEB %d with %d VSIs left\n",
14327 veb->seid, n);
14328 return;
14329 }
14330
14331
14332 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14333 if (veb->uplink_seid) {
14334 vsi->uplink_seid = veb->uplink_seid;
14335 if (veb->uplink_seid == pf->mac_seid)
14336 vsi->veb_idx = I40E_NO_VEB;
14337 else
14338 vsi->veb_idx = veb->veb_idx;
14339 } else {
14340
14341 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14342 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14343 }
14344
14345 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14346 i40e_veb_clear(veb);
14347}
14348
14349
14350
14351
14352
14353
14354static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14355{
14356 struct i40e_pf *pf = veb->pf;
14357 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14358 int ret;
14359
14360 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14361 veb->enabled_tc, false,
14362 &veb->seid, enable_stats, NULL);
14363
14364
14365 if (ret) {
14366 dev_info(&pf->pdev->dev,
14367 "couldn't add VEB, err %s aq_err %s\n",
14368 i40e_stat_str(&pf->hw, ret),
14369 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14370 return -EPERM;
14371 }
14372
14373
14374 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14375 &veb->stats_idx, NULL, NULL, NULL);
14376 if (ret) {
14377 dev_info(&pf->pdev->dev,
14378 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14379 i40e_stat_str(&pf->hw, ret),
14380 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14381 return -EPERM;
14382 }
14383 ret = i40e_veb_get_bw_info(veb);
14384 if (ret) {
14385 dev_info(&pf->pdev->dev,
14386 "couldn't get VEB bw info, err %s aq_err %s\n",
14387 i40e_stat_str(&pf->hw, ret),
14388 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14389 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14390 return -ENOENT;
14391 }
14392
14393 vsi->uplink_seid = veb->seid;
14394 vsi->veb_idx = veb->idx;
14395 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14396
14397 return 0;
14398}
14399
14400
14401
14402
14403
14404
14405
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14417 u16 uplink_seid, u16 vsi_seid,
14418 u8 enabled_tc)
14419{
14420 struct i40e_veb *veb, *uplink_veb = NULL;
14421 int vsi_idx, veb_idx;
14422 int ret;
14423
14424
14425 if ((uplink_seid == 0 || vsi_seid == 0) &&
14426 (uplink_seid + vsi_seid != 0)) {
14427 dev_info(&pf->pdev->dev,
14428 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14429 uplink_seid, vsi_seid);
14430 return NULL;
14431 }
14432
14433
14434 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14435 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14436 break;
14437 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14438 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14439 vsi_seid);
14440 return NULL;
14441 }
14442
14443 if (uplink_seid && uplink_seid != pf->mac_seid) {
14444 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14445 if (pf->veb[veb_idx] &&
14446 pf->veb[veb_idx]->seid == uplink_seid) {
14447 uplink_veb = pf->veb[veb_idx];
14448 break;
14449 }
14450 }
14451 if (!uplink_veb) {
14452 dev_info(&pf->pdev->dev,
14453 "uplink seid %d not found\n", uplink_seid);
14454 return NULL;
14455 }
14456 }
14457
14458
14459 veb_idx = i40e_veb_mem_alloc(pf);
14460 if (veb_idx < 0)
14461 goto err_alloc;
14462 veb = pf->veb[veb_idx];
14463 veb->flags = flags;
14464 veb->uplink_seid = uplink_seid;
14465 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14466 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14467
14468
14469 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14470 if (ret)
14471 goto err_veb;
14472 if (vsi_idx == pf->lan_vsi)
14473 pf->lan_veb = veb->idx;
14474
14475 return veb;
14476
14477err_veb:
14478 i40e_veb_clear(veb);
14479err_alloc:
14480 return NULL;
14481}
14482
14483
14484
14485
14486
14487
14488
14489
14490
14491
14492static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14493 struct i40e_aqc_switch_config_element_resp *ele,
14494 u16 num_reported, bool printconfig)
14495{
14496 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14497 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14498 u8 element_type = ele->element_type;
14499 u16 seid = le16_to_cpu(ele->seid);
14500
14501 if (printconfig)
14502 dev_info(&pf->pdev->dev,
14503 "type=%d seid=%d uplink=%d downlink=%d\n",
14504 element_type, seid, uplink_seid, downlink_seid);
14505
14506 switch (element_type) {
14507 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14508 pf->mac_seid = seid;
14509 break;
14510 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14511
14512 if (uplink_seid != pf->mac_seid)
14513 break;
14514 if (pf->lan_veb >= I40E_MAX_VEB) {
14515 int v;
14516
14517
14518 for (v = 0; v < I40E_MAX_VEB; v++) {
14519 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14520 pf->lan_veb = v;
14521 break;
14522 }
14523 }
14524 if (pf->lan_veb >= I40E_MAX_VEB) {
14525 v = i40e_veb_mem_alloc(pf);
14526 if (v < 0)
14527 break;
14528 pf->lan_veb = v;
14529 }
14530 }
14531 if (pf->lan_veb >= I40E_MAX_VEB)
14532 break;
14533
14534 pf->veb[pf->lan_veb]->seid = seid;
14535 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14536 pf->veb[pf->lan_veb]->pf = pf;
14537 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14538 break;
14539 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14540 if (num_reported != 1)
14541 break;
14542
14543
14544
14545 pf->mac_seid = uplink_seid;
14546 pf->pf_seid = downlink_seid;
14547 pf->main_vsi_seid = seid;
14548 if (printconfig)
14549 dev_info(&pf->pdev->dev,
14550 "pf_seid=%d main_vsi_seid=%d\n",
14551 pf->pf_seid, pf->main_vsi_seid);
14552 break;
14553 case I40E_SWITCH_ELEMENT_TYPE_PF:
14554 case I40E_SWITCH_ELEMENT_TYPE_VF:
14555 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14556 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14557 case I40E_SWITCH_ELEMENT_TYPE_PE:
14558 case I40E_SWITCH_ELEMENT_TYPE_PA:
14559
14560 break;
14561 default:
14562 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14563 element_type, seid);
14564 break;
14565 }
14566}
14567
14568
14569
14570
14571
14572
14573
14574
14575
14576int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14577{
14578 struct i40e_aqc_get_switch_config_resp *sw_config;
14579 u16 next_seid = 0;
14580 int ret = 0;
14581 u8 *aq_buf;
14582 int i;
14583
14584 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14585 if (!aq_buf)
14586 return -ENOMEM;
14587
14588 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14589 do {
14590 u16 num_reported, num_total;
14591
14592 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14593 I40E_AQ_LARGE_BUF,
14594 &next_seid, NULL);
14595 if (ret) {
14596 dev_info(&pf->pdev->dev,
14597 "get switch config failed err %s aq_err %s\n",
14598 i40e_stat_str(&pf->hw, ret),
14599 i40e_aq_str(&pf->hw,
14600 pf->hw.aq.asq_last_status));
14601 kfree(aq_buf);
14602 return -ENOENT;
14603 }
14604
14605 num_reported = le16_to_cpu(sw_config->header.num_reported);
14606 num_total = le16_to_cpu(sw_config->header.num_total);
14607
14608 if (printconfig)
14609 dev_info(&pf->pdev->dev,
14610 "header: %d reported %d total\n",
14611 num_reported, num_total);
14612
14613 for (i = 0; i < num_reported; i++) {
14614 struct i40e_aqc_switch_config_element_resp *ele =
14615 &sw_config->element[i];
14616
14617 i40e_setup_pf_switch_element(pf, ele, num_reported,
14618 printconfig);
14619 }
14620 } while (next_seid != 0);
14621
14622 kfree(aq_buf);
14623 return ret;
14624}
14625
14626
14627
14628
14629
14630
14631
14632
14633static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14634{
14635 u16 flags = 0;
14636 int ret;
14637
14638
14639 ret = i40e_fetch_switch_configuration(pf, false);
14640 if (ret) {
14641 dev_info(&pf->pdev->dev,
14642 "couldn't fetch switch config, err %s aq_err %s\n",
14643 i40e_stat_str(&pf->hw, ret),
14644 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14645 return ret;
14646 }
14647 i40e_pf_reset_stats(pf);
14648
14649
14650
14651
14652
14653
14654
14655 if ((pf->hw.pf_id == 0) &&
14656 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14657 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14658 pf->last_sw_conf_flags = flags;
14659 }
14660
14661 if (pf->hw.pf_id == 0) {
14662 u16 valid_flags;
14663
14664 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14665 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14666 NULL);
14667 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14668 dev_info(&pf->pdev->dev,
14669 "couldn't set switch config bits, err %s aq_err %s\n",
14670 i40e_stat_str(&pf->hw, ret),
14671 i40e_aq_str(&pf->hw,
14672 pf->hw.aq.asq_last_status));
14673
14674 }
14675 pf->last_sw_conf_valid_flags = valid_flags;
14676 }
14677
14678
14679 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14680 struct i40e_vsi *vsi = NULL;
14681 u16 uplink_seid;
14682
14683
14684
14685
14686 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14687 uplink_seid = pf->veb[pf->lan_veb]->seid;
14688 else
14689 uplink_seid = pf->mac_seid;
14690 if (pf->lan_vsi == I40E_NO_VSI)
14691 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14692 else if (reinit)
14693 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14694 if (!vsi) {
14695 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14696 i40e_cloud_filter_exit(pf);
14697 i40e_fdir_teardown(pf);
14698 return -EAGAIN;
14699 }
14700 } else {
14701
14702 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14703
14704 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14705 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14706 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14707 }
14708 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14709
14710 i40e_fdir_sb_setup(pf);
14711
14712
14713 ret = i40e_setup_pf_filter_control(pf);
14714 if (ret) {
14715 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14716 ret);
14717
14718 }
14719
14720
14721
14722
14723 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14724 i40e_pf_config_rss(pf);
14725
14726
14727 i40e_link_event(pf);
14728
14729
14730 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14731 I40E_AQ_AN_COMPLETED) ? true : false);
14732
14733 i40e_ptp_init(pf);
14734
14735
14736 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14737
14738 return ret;
14739}
14740
14741
14742
14743
14744
14745static void i40e_determine_queue_usage(struct i40e_pf *pf)
14746{
14747 int queues_left;
14748 int q_max;
14749
14750 pf->num_lan_qps = 0;
14751
14752
14753
14754
14755
14756 queues_left = pf->hw.func_caps.num_tx_qp;
14757
14758 if ((queues_left == 1) ||
14759 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14760
14761 queues_left = 0;
14762 pf->alloc_rss_size = pf->num_lan_qps = 1;
14763
14764
14765 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14766 I40E_FLAG_IWARP_ENABLED |
14767 I40E_FLAG_FD_SB_ENABLED |
14768 I40E_FLAG_FD_ATR_ENABLED |
14769 I40E_FLAG_DCB_CAPABLE |
14770 I40E_FLAG_DCB_ENABLED |
14771 I40E_FLAG_SRIOV_ENABLED |
14772 I40E_FLAG_VMDQ_ENABLED);
14773 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14774 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14775 I40E_FLAG_FD_SB_ENABLED |
14776 I40E_FLAG_FD_ATR_ENABLED |
14777 I40E_FLAG_DCB_CAPABLE))) {
14778
14779 pf->alloc_rss_size = pf->num_lan_qps = 1;
14780 queues_left -= pf->num_lan_qps;
14781
14782 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14783 I40E_FLAG_IWARP_ENABLED |
14784 I40E_FLAG_FD_SB_ENABLED |
14785 I40E_FLAG_FD_ATR_ENABLED |
14786 I40E_FLAG_DCB_ENABLED |
14787 I40E_FLAG_VMDQ_ENABLED);
14788 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14789 } else {
14790
14791 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14792 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14793 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14794 I40E_FLAG_DCB_ENABLED);
14795 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14796 }
14797
14798
14799 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14800 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14801 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14802 pf->num_lan_qps = q_max;
14803
14804 queues_left -= pf->num_lan_qps;
14805 }
14806
14807 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14808 if (queues_left > 1) {
14809 queues_left -= 1;
14810 } else {
14811 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14812 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14813 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14814 }
14815 }
14816
14817 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14818 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14819 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14820 (queues_left / pf->num_vf_qps));
14821 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14822 }
14823
14824 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14825 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14826 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14827 (queues_left / pf->num_vmdq_qps));
14828 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14829 }
14830
14831 pf->queues_left = queues_left;
14832 dev_dbg(&pf->pdev->dev,
14833 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14834 pf->hw.func_caps.num_tx_qp,
14835 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14836 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14837 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14838 queues_left);
14839}
14840
14841
14842
14843
14844
14845
14846
14847
14848
14849
14850
14851
14852static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14853{
14854 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14855
14856 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14857
14858
14859 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14860 settings->enable_fdir = true;
14861
14862
14863 settings->enable_ethtype = true;
14864 settings->enable_macvlan = true;
14865
14866 if (i40e_set_filter_control(&pf->hw, settings))
14867 return -ENOENT;
14868
14869 return 0;
14870}
14871
14872#define INFO_STRING_LEN 255
14873#define REMAIN(__x) (INFO_STRING_LEN - (__x))
14874static void i40e_print_features(struct i40e_pf *pf)
14875{
14876 struct i40e_hw *hw = &pf->hw;
14877 char *buf;
14878 int i;
14879
14880 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14881 if (!buf)
14882 return;
14883
14884 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14885#ifdef CONFIG_PCI_IOV
14886 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14887#endif
14888 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14889 pf->hw.func_caps.num_vsis,
14890 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14891 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14892 i += scnprintf(&buf[i], REMAIN(i), " RSS");
14893 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14894 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
14895 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14896 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
14897 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
14898 }
14899 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14900 i += scnprintf(&buf[i], REMAIN(i), " DCB");
14901 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
14902 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
14903 if (pf->flags & I40E_FLAG_PTP)
14904 i += scnprintf(&buf[i], REMAIN(i), " PTP");
14905 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14906 i += scnprintf(&buf[i], REMAIN(i), " VEB");
14907 else
14908 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
14909
14910 dev_info(&pf->pdev->dev, "%s\n", buf);
14911 kfree(buf);
14912 WARN_ON(i > INFO_STRING_LEN);
14913}
14914
14915
14916
14917
14918
14919
14920
14921
14922
14923
14924
14925static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14926{
14927 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14928 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14929}
14930
14931
14932
14933
14934
14935
14936void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14937{
14938 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14939 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14940 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14941 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14942 *flags |= I40E_FLAG_RS_FEC;
14943 *flags &= ~I40E_FLAG_BASE_R_FEC;
14944 }
14945 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14946 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14947 *flags |= I40E_FLAG_BASE_R_FEC;
14948 *flags &= ~I40E_FLAG_RS_FEC;
14949 }
14950 if (fec_cfg == 0)
14951 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14952}
14953
14954
14955
14956
14957
14958
14959
14960
14961
14962
14963static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14964{
14965 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
14966
14967 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
14968 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14969 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14970 set_bit(__I40E_RECOVERY_MODE, pf->state);
14971
14972 return true;
14973 }
14974 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14975 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
14976
14977 return false;
14978}
14979
14980
14981
14982
14983
14984
14985
14986
14987
14988
14989
14990
14991
14992
14993
14994
14995
14996
14997
14998
14999
15000
15001static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15002{
15003
15004 const unsigned long time_end = jiffies + 10 * HZ;
15005
15006 struct i40e_hw *hw = &pf->hw;
15007 i40e_status ret;
15008
15009 ret = i40e_pf_reset(hw);
15010 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15011 usleep_range(10000, 20000);
15012 ret = i40e_pf_reset(hw);
15013 }
15014
15015 if (ret == I40E_SUCCESS)
15016 pf->pfr_count++;
15017 else
15018 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15019
15020 return ret;
15021}
15022
15023
15024
15025
15026
15027
15028
15029
15030
15031
15032
15033
15034static bool i40e_check_fw_empr(struct i40e_pf *pf)
15035{
15036 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15037 I40E_GL_FWSTS_FWS1B_MASK;
15038 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15039 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15040}
15041
15042
15043
15044
15045
15046
15047
15048
15049
15050
15051
15052
15053static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15054{
15055 const i40e_status pfr = i40e_pf_loop_reset(pf);
15056 const bool is_empr = i40e_check_fw_empr(pf);
15057
15058 if (is_empr || pfr != I40E_SUCCESS)
15059 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15060
15061 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15062}
15063
15064
15065
15066
15067
15068
15069
15070
15071
15072
15073
15074static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15075{
15076 struct i40e_vsi *vsi;
15077 int err;
15078 int v_idx;
15079
15080 pci_save_state(pf->pdev);
15081
15082
15083 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15084 pf->service_timer_period = HZ;
15085
15086 INIT_WORK(&pf->service_task, i40e_service_task);
15087 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15088
15089 err = i40e_init_interrupt_scheme(pf);
15090 if (err)
15091 goto err_switch_setup;
15092
15093
15094
15095
15096
15097
15098 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15099 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15100 else
15101 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15102
15103
15104 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15105 GFP_KERNEL);
15106 if (!pf->vsi) {
15107 err = -ENOMEM;
15108 goto err_switch_setup;
15109 }
15110
15111
15112
15113
15114 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15115 if (v_idx < 0) {
15116 err = v_idx;
15117 goto err_switch_setup;
15118 }
15119 pf->lan_vsi = v_idx;
15120 vsi = pf->vsi[v_idx];
15121 if (!vsi) {
15122 err = -EFAULT;
15123 goto err_switch_setup;
15124 }
15125 vsi->alloc_queue_pairs = 1;
15126 err = i40e_config_netdev(vsi);
15127 if (err)
15128 goto err_switch_setup;
15129 err = register_netdev(vsi->netdev);
15130 if (err)
15131 goto err_switch_setup;
15132 vsi->netdev_registered = true;
15133 i40e_dbg_pf_init(pf);
15134
15135 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15136 if (err)
15137 goto err_switch_setup;
15138
15139
15140 i40e_send_version(pf);
15141
15142
15143 mod_timer(&pf->service_timer,
15144 round_jiffies(jiffies + pf->service_timer_period));
15145
15146 return 0;
15147
15148err_switch_setup:
15149 i40e_reset_interrupt_capability(pf);
15150 del_timer_sync(&pf->service_timer);
15151 i40e_shutdown_adminq(hw);
15152 iounmap(hw->hw_addr);
15153 pci_disable_pcie_error_reporting(pf->pdev);
15154 pci_release_mem_regions(pf->pdev);
15155 pci_disable_device(pf->pdev);
15156 kfree(pf);
15157
15158 return err;
15159}
15160
15161
15162
15163
15164
15165
15166
15167
15168
15169
15170
15171
15172static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15173{
15174 struct i40e_aq_get_phy_abilities_resp abilities;
15175#ifdef CONFIG_I40E_DCB
15176 enum i40e_get_fw_lldp_status_resp lldp_status;
15177 i40e_status status;
15178#endif
15179 struct i40e_pf *pf;
15180 struct i40e_hw *hw;
15181 static u16 pfs_found;
15182 u16 wol_nvm_bits;
15183 u16 link_status;
15184 int err;
15185 u32 val;
15186 u32 i;
15187
15188 err = pci_enable_device_mem(pdev);
15189 if (err)
15190 return err;
15191
15192
15193 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15194 if (err) {
15195 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
15196 if (err) {
15197 dev_err(&pdev->dev,
15198 "DMA configuration failed: 0x%x\n", err);
15199 goto err_dma;
15200 }
15201 }
15202
15203
15204 err = pci_request_mem_regions(pdev, i40e_driver_name);
15205 if (err) {
15206 dev_info(&pdev->dev,
15207 "pci_request_selected_regions failed %d\n", err);
15208 goto err_pci_reg;
15209 }
15210
15211 pci_enable_pcie_error_reporting(pdev);
15212 pci_set_master(pdev);
15213
15214
15215
15216
15217
15218
15219 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15220 if (!pf) {
15221 err = -ENOMEM;
15222 goto err_pf_alloc;
15223 }
15224 pf->next_vsi = 0;
15225 pf->pdev = pdev;
15226 set_bit(__I40E_DOWN, pf->state);
15227
15228 hw = &pf->hw;
15229 hw->back = pf;
15230
15231 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15232 I40E_MAX_CSR_SPACE);
15233
15234
15235
15236
15237
15238 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15239 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15240 pf->ioremap_len);
15241 err = -ENOMEM;
15242 goto err_ioremap;
15243 }
15244 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15245 if (!hw->hw_addr) {
15246 err = -EIO;
15247 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15248 (unsigned int)pci_resource_start(pdev, 0),
15249 pf->ioremap_len, err);
15250 goto err_ioremap;
15251 }
15252 hw->vendor_id = pdev->vendor;
15253 hw->device_id = pdev->device;
15254 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15255 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15256 hw->subsystem_device_id = pdev->subsystem_device;
15257 hw->bus.device = PCI_SLOT(pdev->devfn);
15258 hw->bus.func = PCI_FUNC(pdev->devfn);
15259 hw->bus.bus_id = pdev->bus->number;
15260 pf->instance = pfs_found;
15261
15262
15263
15264
15265 hw->switch_tag = 0xffff;
15266 hw->first_tag = ETH_P_8021AD;
15267 hw->second_tag = ETH_P_8021Q;
15268
15269 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15270 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15271 INIT_LIST_HEAD(&pf->ddp_old_prof);
15272
15273
15274
15275
15276 mutex_init(&hw->aq.asq_mutex);
15277 mutex_init(&hw->aq.arq_mutex);
15278
15279 pf->msg_enable = netif_msg_init(debug,
15280 NETIF_MSG_DRV |
15281 NETIF_MSG_PROBE |
15282 NETIF_MSG_LINK);
15283 if (debug < -1)
15284 pf->hw.debug_mask = debug;
15285
15286
15287 if (hw->revision_id == 0 &&
15288 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15289 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15290 i40e_flush(hw);
15291 msleep(200);
15292 pf->corer_count++;
15293
15294 i40e_clear_pxe_mode(hw);
15295 }
15296
15297
15298 i40e_clear_hw(hw);
15299
15300 err = i40e_set_mac_type(hw);
15301 if (err) {
15302 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15303 err);
15304 goto err_pf_reset;
15305 }
15306
15307 err = i40e_handle_resets(pf);
15308 if (err)
15309 goto err_pf_reset;
15310
15311 i40e_check_recovery_mode(pf);
15312
15313 if (is_kdump_kernel()) {
15314 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15315 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15316 } else {
15317 hw->aq.num_arq_entries = I40E_AQ_LEN;
15318 hw->aq.num_asq_entries = I40E_AQ_LEN;
15319 }
15320 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15321 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15322 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15323
15324 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15325 "%s-%s:misc",
15326 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15327
15328 err = i40e_init_shared_code(hw);
15329 if (err) {
15330 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15331 err);
15332 goto err_pf_reset;
15333 }
15334
15335
15336 pf->hw.fc.requested_mode = I40E_FC_NONE;
15337
15338 err = i40e_init_adminq(hw);
15339 if (err) {
15340 if (err == I40E_ERR_FIRMWARE_API_VERSION)
15341 dev_info(&pdev->dev,
15342 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15343 hw->aq.api_maj_ver,
15344 hw->aq.api_min_ver,
15345 I40E_FW_API_VERSION_MAJOR,
15346 I40E_FW_MINOR_VERSION(hw));
15347 else
15348 dev_info(&pdev->dev,
15349 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15350
15351 goto err_pf_reset;
15352 }
15353 i40e_get_oem_version(hw);
15354
15355
15356 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15357 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15358 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15359 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15360 hw->subsystem_vendor_id, hw->subsystem_device_id);
15361
15362 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15363 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15364 dev_info(&pdev->dev,
15365 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
15366 hw->aq.api_maj_ver,
15367 hw->aq.api_min_ver,
15368 I40E_FW_API_VERSION_MAJOR,
15369 I40E_FW_MINOR_VERSION(hw));
15370 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15371 dev_info(&pdev->dev,
15372 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15373 hw->aq.api_maj_ver,
15374 hw->aq.api_min_ver,
15375 I40E_FW_API_VERSION_MAJOR,
15376 I40E_FW_MINOR_VERSION(hw));
15377
15378 i40e_verify_eeprom(pf);
15379
15380
15381 if (hw->revision_id < 1)
15382 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15383
15384 i40e_clear_pxe_mode(hw);
15385
15386 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15387 if (err)
15388 goto err_adminq_setup;
15389
15390 err = i40e_sw_init(pf);
15391 if (err) {
15392 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15393 goto err_sw_init;
15394 }
15395
15396 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15397 return i40e_init_recovery_mode(pf, hw);
15398
15399 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15400 hw->func_caps.num_rx_qp, 0, 0);
15401 if (err) {
15402 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15403 goto err_init_lan_hmc;
15404 }
15405
15406 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15407 if (err) {
15408 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15409 err = -ENOENT;
15410 goto err_configure_lan_hmc;
15411 }
15412
15413
15414
15415
15416
15417 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15418 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15419 i40e_aq_stop_lldp(hw, true, false, NULL);
15420 }
15421
15422
15423 i40e_get_platform_mac_addr(pdev, pf);
15424
15425 if (!is_valid_ether_addr(hw->mac.addr)) {
15426 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15427 err = -EIO;
15428 goto err_mac_addr;
15429 }
15430 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15431 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15432 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15433 if (is_valid_ether_addr(hw->mac.port_addr))
15434 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15435
15436 pci_set_drvdata(pdev, pf);
15437 pci_save_state(pdev);
15438
15439#ifdef CONFIG_I40E_DCB
15440 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15441 (!status &&
15442 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15443 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15444 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15445 dev_info(&pdev->dev,
15446 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15447 "FW LLDP is disabled\n" :
15448 "FW LLDP is enabled\n");
15449
15450
15451 i40e_aq_set_dcb_parameters(hw, true, NULL);
15452
15453 err = i40e_init_pf_dcb(pf);
15454 if (err) {
15455 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15456 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15457
15458 }
15459#endif
15460
15461
15462 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15463 pf->service_timer_period = HZ;
15464
15465 INIT_WORK(&pf->service_task, i40e_service_task);
15466 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15467
15468
15469 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15470 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15471 pf->wol_en = false;
15472 else
15473 pf->wol_en = true;
15474 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15475
15476
15477 i40e_determine_queue_usage(pf);
15478 err = i40e_init_interrupt_scheme(pf);
15479 if (err)
15480 goto err_switch_setup;
15481
15482
15483
15484
15485
15486
15487 if (is_kdump_kernel())
15488 pf->num_lan_msix = 1;
15489
15490 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15491 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15492 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15493 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15494 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15495 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15496 UDP_TUNNEL_TYPE_GENEVE;
15497
15498
15499
15500
15501
15502
15503 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15504 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15505 else
15506 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15507 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15508 dev_warn(&pf->pdev->dev,
15509 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15510 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15511 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15512 }
15513
15514
15515 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15516 GFP_KERNEL);
15517 if (!pf->vsi) {
15518 err = -ENOMEM;
15519 goto err_switch_setup;
15520 }
15521
15522#ifdef CONFIG_PCI_IOV
15523
15524 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15525 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15526 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15527 if (pci_num_vf(pdev))
15528 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15529 }
15530#endif
15531 err = i40e_setup_pf_switch(pf, false);
15532 if (err) {
15533 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15534 goto err_vsis;
15535 }
15536 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15537
15538
15539 for (i = 0; i < pf->num_alloc_vsi; i++) {
15540 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15541 i40e_vsi_open(pf->vsi[i]);
15542 break;
15543 }
15544 }
15545
15546
15547
15548
15549 err = i40e_aq_set_phy_int_mask(&pf->hw,
15550 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15551 I40E_AQ_EVENT_MEDIA_NA |
15552 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15553 if (err)
15554 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15555 i40e_stat_str(&pf->hw, err),
15556 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15557
15558
15559
15560
15561
15562 val = rd32(hw, I40E_REG_MSS);
15563 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15564 val &= ~I40E_REG_MSS_MIN_MASK;
15565 val |= I40E_64BYTE_MSS;
15566 wr32(hw, I40E_REG_MSS, val);
15567 }
15568
15569 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15570 msleep(75);
15571 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15572 if (err)
15573 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15574 i40e_stat_str(&pf->hw, err),
15575 i40e_aq_str(&pf->hw,
15576 pf->hw.aq.asq_last_status));
15577 }
15578
15579
15580
15581
15582 clear_bit(__I40E_DOWN, pf->state);
15583
15584
15585
15586
15587
15588
15589 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15590 err = i40e_setup_misc_vector(pf);
15591 if (err) {
15592 dev_info(&pdev->dev,
15593 "setup of misc vector failed: %d\n", err);
15594 i40e_cloud_filter_exit(pf);
15595 i40e_fdir_teardown(pf);
15596 goto err_vsis;
15597 }
15598 }
15599
15600#ifdef CONFIG_PCI_IOV
15601
15602 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15603 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15604 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15605
15606 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15607 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15608 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15609 i40e_flush(hw);
15610
15611 if (pci_num_vf(pdev)) {
15612 dev_info(&pdev->dev,
15613 "Active VFs found, allocating resources.\n");
15614 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15615 if (err)
15616 dev_info(&pdev->dev,
15617 "Error %d allocating resources for existing VFs\n",
15618 err);
15619 }
15620 }
15621#endif
15622
15623 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15624 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15625 pf->num_iwarp_msix,
15626 I40E_IWARP_IRQ_PILE_ID);
15627 if (pf->iwarp_base_vector < 0) {
15628 dev_info(&pdev->dev,
15629 "failed to get tracking for %d vectors for IWARP err=%d\n",
15630 pf->num_iwarp_msix, pf->iwarp_base_vector);
15631 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15632 }
15633 }
15634
15635 i40e_dbg_pf_init(pf);
15636
15637
15638 i40e_send_version(pf);
15639
15640
15641 mod_timer(&pf->service_timer,
15642 round_jiffies(jiffies + pf->service_timer_period));
15643
15644
15645 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15646 err = i40e_lan_add_device(pf);
15647 if (err)
15648 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15649 err);
15650 }
15651
15652#define PCI_SPEED_SIZE 8
15653#define PCI_WIDTH_SIZE 8
15654
15655
15656
15657
15658 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15659 char speed[PCI_SPEED_SIZE] = "Unknown";
15660 char width[PCI_WIDTH_SIZE] = "Unknown";
15661
15662
15663
15664
15665 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15666 &link_status);
15667
15668 i40e_set_pci_config_data(hw, link_status);
15669
15670 switch (hw->bus.speed) {
15671 case i40e_bus_speed_8000:
15672 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15673 case i40e_bus_speed_5000:
15674 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15675 case i40e_bus_speed_2500:
15676 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15677 default:
15678 break;
15679 }
15680 switch (hw->bus.width) {
15681 case i40e_bus_width_pcie_x8:
15682 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15683 case i40e_bus_width_pcie_x4:
15684 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15685 case i40e_bus_width_pcie_x2:
15686 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15687 case i40e_bus_width_pcie_x1:
15688 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15689 default:
15690 break;
15691 }
15692
15693 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15694 speed, width);
15695
15696 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15697 hw->bus.speed < i40e_bus_speed_8000) {
15698 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15699 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15700 }
15701 }
15702
15703
15704 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15705 if (err)
15706 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15707 i40e_stat_str(&pf->hw, err),
15708 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15709 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15710
15711
15712 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15713
15714
15715 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15716 if (err)
15717 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15718 i40e_stat_str(&pf->hw, err),
15719 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15720
15721
15722#define MAX_FRAME_SIZE_DEFAULT 0x2600
15723 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15724 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15725 if (val < MAX_FRAME_SIZE_DEFAULT)
15726 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15727 i, val);
15728
15729
15730
15731
15732
15733
15734
15735 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15736 pf->main_vsi_seid);
15737
15738 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15739 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15740 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15741 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15742 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15743
15744 i40e_print_features(pf);
15745
15746 return 0;
15747
15748
15749err_vsis:
15750 set_bit(__I40E_DOWN, pf->state);
15751 i40e_clear_interrupt_scheme(pf);
15752 kfree(pf->vsi);
15753err_switch_setup:
15754 i40e_reset_interrupt_capability(pf);
15755 del_timer_sync(&pf->service_timer);
15756err_mac_addr:
15757err_configure_lan_hmc:
15758 (void)i40e_shutdown_lan_hmc(hw);
15759err_init_lan_hmc:
15760 kfree(pf->qp_pile);
15761err_sw_init:
15762err_adminq_setup:
15763err_pf_reset:
15764 iounmap(hw->hw_addr);
15765err_ioremap:
15766 kfree(pf);
15767err_pf_alloc:
15768 pci_disable_pcie_error_reporting(pdev);
15769 pci_release_mem_regions(pdev);
15770err_pci_reg:
15771err_dma:
15772 pci_disable_device(pdev);
15773 return err;
15774}
15775
15776
15777
15778
15779
15780
15781
15782
15783
15784
15785static void i40e_remove(struct pci_dev *pdev)
15786{
15787 struct i40e_pf *pf = pci_get_drvdata(pdev);
15788 struct i40e_hw *hw = &pf->hw;
15789 i40e_status ret_code;
15790 int i;
15791
15792 i40e_dbg_pf_exit(pf);
15793
15794 i40e_ptp_stop(pf);
15795
15796
15797 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15798 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15799
15800 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15801 usleep_range(1000, 2000);
15802
15803 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15804 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
15805 i40e_free_vfs(pf);
15806 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15807 }
15808
15809 set_bit(__I40E_SUSPENDED, pf->state);
15810 set_bit(__I40E_DOWN, pf->state);
15811 if (pf->service_timer.function)
15812 del_timer_sync(&pf->service_timer);
15813 if (pf->service_task.func)
15814 cancel_work_sync(&pf->service_task);
15815
15816 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15817 struct i40e_vsi *vsi = pf->vsi[0];
15818
15819
15820
15821
15822
15823 unregister_netdev(vsi->netdev);
15824 free_netdev(vsi->netdev);
15825
15826 goto unmap;
15827 }
15828
15829
15830
15831
15832 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15833
15834 i40e_fdir_teardown(pf);
15835
15836
15837
15838
15839 for (i = 0; i < I40E_MAX_VEB; i++) {
15840 if (!pf->veb[i])
15841 continue;
15842
15843 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15844 pf->veb[i]->uplink_seid == 0)
15845 i40e_switch_branch_release(pf->veb[i]);
15846 }
15847
15848
15849
15850
15851 if (pf->vsi[pf->lan_vsi])
15852 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15853
15854 i40e_cloud_filter_exit(pf);
15855
15856
15857 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15858 ret_code = i40e_lan_del_device(pf);
15859 if (ret_code)
15860 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15861 ret_code);
15862 }
15863
15864
15865 if (hw->hmc.hmc_obj) {
15866 ret_code = i40e_shutdown_lan_hmc(hw);
15867 if (ret_code)
15868 dev_warn(&pdev->dev,
15869 "Failed to destroy the HMC resources: %d\n",
15870 ret_code);
15871 }
15872
15873unmap:
15874
15875 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15876 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15877 free_irq(pf->pdev->irq, pf);
15878
15879
15880 i40e_shutdown_adminq(hw);
15881
15882
15883 mutex_destroy(&hw->aq.arq_mutex);
15884 mutex_destroy(&hw->aq.asq_mutex);
15885
15886
15887 rtnl_lock();
15888 i40e_clear_interrupt_scheme(pf);
15889 for (i = 0; i < pf->num_alloc_vsi; i++) {
15890 if (pf->vsi[i]) {
15891 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15892 i40e_vsi_clear_rings(pf->vsi[i]);
15893 i40e_vsi_clear(pf->vsi[i]);
15894 pf->vsi[i] = NULL;
15895 }
15896 }
15897 rtnl_unlock();
15898
15899 for (i = 0; i < I40E_MAX_VEB; i++) {
15900 kfree(pf->veb[i]);
15901 pf->veb[i] = NULL;
15902 }
15903
15904 kfree(pf->qp_pile);
15905 kfree(pf->vsi);
15906
15907 iounmap(hw->hw_addr);
15908 kfree(pf);
15909 pci_release_mem_regions(pdev);
15910
15911 pci_disable_pcie_error_reporting(pdev);
15912 pci_disable_device(pdev);
15913}
15914
15915
15916
15917
15918
15919
15920
15921
15922
15923
15924static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15925 pci_channel_state_t error)
15926{
15927 struct i40e_pf *pf = pci_get_drvdata(pdev);
15928
15929 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15930
15931 if (!pf) {
15932 dev_info(&pdev->dev,
15933 "Cannot recover - error happened during device probe\n");
15934 return PCI_ERS_RESULT_DISCONNECT;
15935 }
15936
15937
15938 if (!test_bit(__I40E_SUSPENDED, pf->state))
15939 i40e_prep_for_reset(pf);
15940
15941
15942 return PCI_ERS_RESULT_NEED_RESET;
15943}
15944
15945
15946
15947
15948
15949
15950
15951
15952
15953
15954static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15955{
15956 struct i40e_pf *pf = pci_get_drvdata(pdev);
15957 pci_ers_result_t result;
15958 u32 reg;
15959
15960 dev_dbg(&pdev->dev, "%s\n", __func__);
15961 if (pci_enable_device_mem(pdev)) {
15962 dev_info(&pdev->dev,
15963 "Cannot re-enable PCI device after reset.\n");
15964 result = PCI_ERS_RESULT_DISCONNECT;
15965 } else {
15966 pci_set_master(pdev);
15967 pci_restore_state(pdev);
15968 pci_save_state(pdev);
15969 pci_wake_from_d3(pdev, false);
15970
15971 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15972 if (reg == 0)
15973 result = PCI_ERS_RESULT_RECOVERED;
15974 else
15975 result = PCI_ERS_RESULT_DISCONNECT;
15976 }
15977
15978 return result;
15979}
15980
15981
15982
15983
15984
15985static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
15986{
15987 struct i40e_pf *pf = pci_get_drvdata(pdev);
15988
15989 i40e_prep_for_reset(pf);
15990}
15991
15992
15993
15994
15995
15996static void i40e_pci_error_reset_done(struct pci_dev *pdev)
15997{
15998 struct i40e_pf *pf = pci_get_drvdata(pdev);
15999
16000 i40e_reset_and_rebuild(pf, false, false);
16001}
16002
16003
16004
16005
16006
16007
16008
16009
16010static void i40e_pci_error_resume(struct pci_dev *pdev)
16011{
16012 struct i40e_pf *pf = pci_get_drvdata(pdev);
16013
16014 dev_dbg(&pdev->dev, "%s\n", __func__);
16015 if (test_bit(__I40E_SUSPENDED, pf->state))
16016 return;
16017
16018 i40e_handle_reset_warning(pf, false);
16019}
16020
16021
16022
16023
16024
16025
16026static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16027{
16028 struct i40e_hw *hw = &pf->hw;
16029 i40e_status ret;
16030 u8 mac_addr[6];
16031 u16 flags = 0;
16032
16033
16034 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16035 ether_addr_copy(mac_addr,
16036 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16037 } else {
16038 dev_err(&pf->pdev->dev,
16039 "Failed to retrieve MAC address; using default\n");
16040 ether_addr_copy(mac_addr, hw->mac.addr);
16041 }
16042
16043
16044
16045
16046
16047 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16048
16049 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16050 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16051
16052 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16053 if (ret) {
16054 dev_err(&pf->pdev->dev,
16055 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16056 return;
16057 }
16058
16059 flags = I40E_AQC_MC_MAG_EN
16060 | I40E_AQC_WOL_PRESERVE_ON_PFR
16061 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16062 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16063 if (ret)
16064 dev_err(&pf->pdev->dev,
16065 "Failed to enable Multicast Magic Packet wake up\n");
16066}
16067
16068
16069
16070
16071
16072static void i40e_shutdown(struct pci_dev *pdev)
16073{
16074 struct i40e_pf *pf = pci_get_drvdata(pdev);
16075 struct i40e_hw *hw = &pf->hw;
16076
16077 set_bit(__I40E_SUSPENDED, pf->state);
16078 set_bit(__I40E_DOWN, pf->state);
16079
16080 del_timer_sync(&pf->service_timer);
16081 cancel_work_sync(&pf->service_task);
16082 i40e_cloud_filter_exit(pf);
16083 i40e_fdir_teardown(pf);
16084
16085
16086
16087
16088 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16089
16090 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16091 i40e_enable_mc_magic_wake(pf);
16092
16093 i40e_prep_for_reset(pf);
16094
16095 wr32(hw, I40E_PFPM_APM,
16096 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16097 wr32(hw, I40E_PFPM_WUFC,
16098 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16099
16100
16101 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16102 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16103 free_irq(pf->pdev->irq, pf);
16104
16105
16106
16107
16108
16109 rtnl_lock();
16110 i40e_clear_interrupt_scheme(pf);
16111 rtnl_unlock();
16112
16113 if (system_state == SYSTEM_POWER_OFF) {
16114 pci_wake_from_d3(pdev, pf->wol_en);
16115 pci_set_power_state(pdev, PCI_D3hot);
16116 }
16117}
16118
16119
16120
16121
16122
16123static int __maybe_unused i40e_suspend(struct device *dev)
16124{
16125 struct i40e_pf *pf = dev_get_drvdata(dev);
16126 struct i40e_hw *hw = &pf->hw;
16127
16128
16129 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16130 return 0;
16131
16132 set_bit(__I40E_DOWN, pf->state);
16133
16134
16135 del_timer_sync(&pf->service_timer);
16136 cancel_work_sync(&pf->service_task);
16137
16138
16139
16140
16141 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16142
16143 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16144 i40e_enable_mc_magic_wake(pf);
16145
16146
16147
16148
16149
16150 rtnl_lock();
16151
16152 i40e_prep_for_reset(pf);
16153
16154 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16155 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16156
16157
16158
16159
16160
16161
16162 i40e_clear_interrupt_scheme(pf);
16163
16164 rtnl_unlock();
16165
16166 return 0;
16167}
16168
16169
16170
16171
16172
16173static int __maybe_unused i40e_resume(struct device *dev)
16174{
16175 struct i40e_pf *pf = dev_get_drvdata(dev);
16176 int err;
16177
16178
16179 if (!test_bit(__I40E_SUSPENDED, pf->state))
16180 return 0;
16181
16182
16183
16184
16185 rtnl_lock();
16186
16187
16188
16189
16190 err = i40e_restore_interrupt_scheme(pf);
16191 if (err) {
16192 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16193 err);
16194 }
16195
16196 clear_bit(__I40E_DOWN, pf->state);
16197 i40e_reset_and_rebuild(pf, false, true);
16198
16199 rtnl_unlock();
16200
16201
16202 clear_bit(__I40E_SUSPENDED, pf->state);
16203
16204
16205 mod_timer(&pf->service_timer,
16206 round_jiffies(jiffies + pf->service_timer_period));
16207
16208 return 0;
16209}
16210
16211static const struct pci_error_handlers i40e_err_handler = {
16212 .error_detected = i40e_pci_error_detected,
16213 .slot_reset = i40e_pci_error_slot_reset,
16214 .reset_prepare = i40e_pci_error_reset_prepare,
16215 .reset_done = i40e_pci_error_reset_done,
16216 .resume = i40e_pci_error_resume,
16217};
16218
16219static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16220
16221static struct pci_driver i40e_driver = {
16222 .name = i40e_driver_name,
16223 .id_table = i40e_pci_tbl,
16224 .probe = i40e_probe,
16225 .remove = i40e_remove,
16226 .driver = {
16227 .pm = &i40e_pm_ops,
16228 },
16229 .shutdown = i40e_shutdown,
16230 .err_handler = &i40e_err_handler,
16231 .sriov_configure = i40e_pci_sriov_configure,
16232};
16233
16234
16235
16236
16237
16238
16239
16240static int __init i40e_init_module(void)
16241{
16242 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16243 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16244
16245
16246
16247
16248
16249
16250
16251
16252 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16253 if (!i40e_wq) {
16254 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16255 return -ENOMEM;
16256 }
16257
16258 i40e_dbg_init();
16259 return pci_register_driver(&i40e_driver);
16260}
16261module_init(i40e_init_module);
16262
16263
16264
16265
16266
16267
16268
16269static void __exit i40e_exit_module(void)
16270{
16271 pci_unregister_driver(&i40e_driver);
16272 destroy_workqueue(i40e_wq);
16273 i40e_dbg_exit();
16274}
16275module_exit(i40e_exit_module);
16276