1
2
3
4#include <linux/etherdevice.h>
5#include <linux/of_net.h>
6#include <linux/pci.h>
7#include <linux/bpf.h>
8
9
10#include "i40e.h"
11#include "i40e_diag.h"
12#include "i40e_xsk.h"
13#include <net/udp_tunnel.h>
14#include <net/xdp_sock.h>
15
16
17
18
19#define CREATE_TRACE_POINTS
20#include "i40e_trace.h"
21
22const char i40e_driver_name[] = "i40e";
23static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
25
26#define DRV_KERN "-k"
27
28#define DRV_VERSION_MAJOR 2
29#define DRV_VERSION_MINOR 8
30#define DRV_VERSION_BUILD 20
31#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34const char i40e_driver_version_str[] = DRV_VERSION;
35static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
36
37
38static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40static int i40e_add_vsi(struct i40e_vsi *vsi);
41static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43static int i40e_setup_misc_vector(struct i40e_pf *pf);
44static void i40e_determine_queue_usage(struct i40e_pf *pf);
45static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47static int i40e_reset(struct i40e_pf *pf);
48static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
50static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
51static bool i40e_check_recovery_mode(struct i40e_pf *pf);
52static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
53static void i40e_fdir_sb_setup(struct i40e_pf *pf);
54static int i40e_veb_get_bw_info(struct i40e_veb *veb);
55static int i40e_get_capabilities(struct i40e_pf *pf,
56 enum i40e_admin_queue_opc list_type);
57
58
59
60
61
62
63
64
65
66static const struct pci_device_id i40e_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
91
92 {0, }
93};
94MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
95
96#define I40E_MAX_VF_COUNT 128
97static int debug = -1;
98module_param(debug, uint, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
100
101MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103MODULE_LICENSE("GPL v2");
104MODULE_VERSION(DRV_VERSION);
105
106static struct workqueue_struct *i40e_wq;
107
108
109
110
111
112
113
114
115int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
116 u64 size, u32 alignment)
117{
118 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
119
120 mem->size = ALIGN(size, alignment);
121 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
122 GFP_KERNEL);
123 if (!mem->va)
124 return -ENOMEM;
125
126 return 0;
127}
128
129
130
131
132
133
134int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
135{
136 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
137
138 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
139 mem->va = NULL;
140 mem->pa = 0;
141 mem->size = 0;
142
143 return 0;
144}
145
146
147
148
149
150
151
152int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
153 u32 size)
154{
155 mem->size = size;
156 mem->va = kzalloc(size, GFP_KERNEL);
157
158 if (!mem->va)
159 return -ENOMEM;
160
161 return 0;
162}
163
164
165
166
167
168
169int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
170{
171
172 kfree(mem->va);
173 mem->va = NULL;
174 mem->size = 0;
175
176 return 0;
177}
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
193 u16 needed, u16 id)
194{
195 int ret = -ENOMEM;
196 int i, j;
197
198 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
199 dev_info(&pf->pdev->dev,
200 "param err: pile=%s needed=%d id=0x%04x\n",
201 pile ? "<valid>" : "<null>", needed, id);
202 return -EINVAL;
203 }
204
205
206 i = pile->search_hint;
207 while (i < pile->num_entries) {
208
209 if (pile->list[i] & I40E_PILE_VALID_BIT) {
210 i++;
211 continue;
212 }
213
214
215 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
216 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
217 break;
218 }
219
220 if (j == needed) {
221
222 for (j = 0; j < needed; j++)
223 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
224 ret = i;
225 pile->search_hint = i + j;
226 break;
227 }
228
229
230 i += j;
231 }
232
233 return ret;
234}
235
236
237
238
239
240
241
242
243
244static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
245{
246 int valid_id = (id | I40E_PILE_VALID_BIT);
247 int count = 0;
248 int i;
249
250 if (!pile || index >= pile->num_entries)
251 return -EINVAL;
252
253 for (i = index;
254 i < pile->num_entries && pile->list[i] == valid_id;
255 i++) {
256 pile->list[i] = 0;
257 count++;
258 }
259
260 if (count && index < pile->search_hint)
261 pile->search_hint = index;
262
263 return count;
264}
265
266
267
268
269
270
271struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
272{
273 int i;
274
275 for (i = 0; i < pf->num_alloc_vsi; i++)
276 if (pf->vsi[i] && (pf->vsi[i]->id == id))
277 return pf->vsi[i];
278
279 return NULL;
280}
281
282
283
284
285
286
287
288void i40e_service_event_schedule(struct i40e_pf *pf)
289{
290 if ((!test_bit(__I40E_DOWN, pf->state) &&
291 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
292 test_bit(__I40E_RECOVERY_MODE, pf->state))
293 queue_work(i40e_wq, &pf->service_task);
294}
295
296
297
298
299
300
301
302
303
304static void i40e_tx_timeout(struct net_device *netdev)
305{
306 struct i40e_netdev_priv *np = netdev_priv(netdev);
307 struct i40e_vsi *vsi = np->vsi;
308 struct i40e_pf *pf = vsi->back;
309 struct i40e_ring *tx_ring = NULL;
310 unsigned int i, hung_queue = 0;
311 u32 head, val;
312
313 pf->tx_timeout_count++;
314
315
316 for (i = 0; i < netdev->num_tx_queues; i++) {
317 struct netdev_queue *q;
318 unsigned long trans_start;
319
320 q = netdev_get_tx_queue(netdev, i);
321 trans_start = q->trans_start;
322 if (netif_xmit_stopped(q) &&
323 time_after(jiffies,
324 (trans_start + netdev->watchdog_timeo))) {
325 hung_queue = i;
326 break;
327 }
328 }
329
330 if (i == netdev->num_tx_queues) {
331 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
332 } else {
333
334 for (i = 0; i < vsi->num_queue_pairs; i++) {
335 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
336 if (hung_queue ==
337 vsi->tx_rings[i]->queue_index) {
338 tx_ring = vsi->tx_rings[i];
339 break;
340 }
341 }
342 }
343 }
344
345 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
346 pf->tx_timeout_recovery_level = 1;
347 else if (time_before(jiffies,
348 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
349 return;
350
351
352 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
353 return;
354
355 if (tx_ring) {
356 head = i40e_get_head(tx_ring);
357
358 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
359 val = rd32(&pf->hw,
360 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
361 tx_ring->vsi->base_vector - 1));
362 else
363 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
364
365 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
366 vsi->seid, hung_queue, tx_ring->next_to_clean,
367 head, tx_ring->next_to_use,
368 readl(tx_ring->tail), val);
369 }
370
371 pf->tx_timeout_last_recovery = jiffies;
372 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
373 pf->tx_timeout_recovery_level, hung_queue);
374
375 switch (pf->tx_timeout_recovery_level) {
376 case 1:
377 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
378 break;
379 case 2:
380 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
381 break;
382 case 3:
383 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
384 break;
385 default:
386 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
387 break;
388 }
389
390 i40e_service_event_schedule(pf);
391 pf->tx_timeout_recovery_level++;
392}
393
394
395
396
397
398
399
400
401struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
402{
403 return &vsi->net_stats;
404}
405
406
407
408
409
410
411static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
412 struct rtnl_link_stats64 *stats)
413{
414 u64 bytes, packets;
415 unsigned int start;
416
417 do {
418 start = u64_stats_fetch_begin_irq(&ring->syncp);
419 packets = ring->stats.packets;
420 bytes = ring->stats.bytes;
421 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
422
423 stats->tx_packets += packets;
424 stats->tx_bytes += bytes;
425}
426
427
428
429
430
431
432
433
434
435static void i40e_get_netdev_stats_struct(struct net_device *netdev,
436 struct rtnl_link_stats64 *stats)
437{
438 struct i40e_netdev_priv *np = netdev_priv(netdev);
439 struct i40e_vsi *vsi = np->vsi;
440 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
441 struct i40e_ring *ring;
442 int i;
443
444 if (test_bit(__I40E_VSI_DOWN, vsi->state))
445 return;
446
447 if (!vsi->tx_rings)
448 return;
449
450 rcu_read_lock();
451 for (i = 0; i < vsi->num_queue_pairs; i++) {
452 u64 bytes, packets;
453 unsigned int start;
454
455 ring = READ_ONCE(vsi->tx_rings[i]);
456 if (!ring)
457 continue;
458 i40e_get_netdev_stats_struct_tx(ring, stats);
459
460 if (i40e_enabled_xdp_vsi(vsi)) {
461 ring++;
462 i40e_get_netdev_stats_struct_tx(ring, stats);
463 }
464
465 ring++;
466 do {
467 start = u64_stats_fetch_begin_irq(&ring->syncp);
468 packets = ring->stats.packets;
469 bytes = ring->stats.bytes;
470 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
471
472 stats->rx_packets += packets;
473 stats->rx_bytes += bytes;
474
475 }
476 rcu_read_unlock();
477
478
479 stats->multicast = vsi_stats->multicast;
480 stats->tx_errors = vsi_stats->tx_errors;
481 stats->tx_dropped = vsi_stats->tx_dropped;
482 stats->rx_errors = vsi_stats->rx_errors;
483 stats->rx_dropped = vsi_stats->rx_dropped;
484 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
485 stats->rx_length_errors = vsi_stats->rx_length_errors;
486}
487
488
489
490
491
492void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
493{
494 struct rtnl_link_stats64 *ns;
495 int i;
496
497 if (!vsi)
498 return;
499
500 ns = i40e_get_vsi_stats_struct(vsi);
501 memset(ns, 0, sizeof(*ns));
502 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
503 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
504 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
505 if (vsi->rx_rings && vsi->rx_rings[0]) {
506 for (i = 0; i < vsi->num_queue_pairs; i++) {
507 memset(&vsi->rx_rings[i]->stats, 0,
508 sizeof(vsi->rx_rings[i]->stats));
509 memset(&vsi->rx_rings[i]->rx_stats, 0,
510 sizeof(vsi->rx_rings[i]->rx_stats));
511 memset(&vsi->tx_rings[i]->stats, 0,
512 sizeof(vsi->tx_rings[i]->stats));
513 memset(&vsi->tx_rings[i]->tx_stats, 0,
514 sizeof(vsi->tx_rings[i]->tx_stats));
515 }
516 }
517 vsi->stat_offsets_loaded = false;
518}
519
520
521
522
523
524void i40e_pf_reset_stats(struct i40e_pf *pf)
525{
526 int i;
527
528 memset(&pf->stats, 0, sizeof(pf->stats));
529 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
530 pf->stat_offsets_loaded = false;
531
532 for (i = 0; i < I40E_MAX_VEB; i++) {
533 if (pf->veb[i]) {
534 memset(&pf->veb[i]->stats, 0,
535 sizeof(pf->veb[i]->stats));
536 memset(&pf->veb[i]->stats_offsets, 0,
537 sizeof(pf->veb[i]->stats_offsets));
538 memset(&pf->veb[i]->tc_stats, 0,
539 sizeof(pf->veb[i]->tc_stats));
540 memset(&pf->veb[i]->tc_stats_offsets, 0,
541 sizeof(pf->veb[i]->tc_stats_offsets));
542 pf->veb[i]->stat_offsets_loaded = false;
543 }
544 }
545 pf->hw_csum_rx_error = 0;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
564 bool offset_loaded, u64 *offset, u64 *stat)
565{
566 u64 new_data;
567
568 if (hw->device_id == I40E_DEV_ID_QEMU) {
569 new_data = rd32(hw, loreg);
570 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
571 } else {
572 new_data = rd64(hw, loreg);
573 }
574 if (!offset_loaded)
575 *offset = new_data;
576 if (likely(new_data >= *offset))
577 *stat = new_data - *offset;
578 else
579 *stat = (new_data + BIT_ULL(48)) - *offset;
580 *stat &= 0xFFFFFFFFFFFFULL;
581}
582
583
584
585
586
587
588
589
590
591static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
592 bool offset_loaded, u64 *offset, u64 *stat)
593{
594 u32 new_data;
595
596 new_data = rd32(hw, reg);
597 if (!offset_loaded)
598 *offset = new_data;
599 if (likely(new_data >= *offset))
600 *stat = (u32)(new_data - *offset);
601 else
602 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
603}
604
605
606
607
608
609
610
611static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
612{
613 u32 new_data = rd32(hw, reg);
614
615 wr32(hw, reg, 1);
616 *stat += new_data;
617}
618
619
620
621
622
623void i40e_update_eth_stats(struct i40e_vsi *vsi)
624{
625 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
626 struct i40e_pf *pf = vsi->back;
627 struct i40e_hw *hw = &pf->hw;
628 struct i40e_eth_stats *oes;
629 struct i40e_eth_stats *es;
630
631 es = &vsi->eth_stats;
632 oes = &vsi->eth_stats_offsets;
633
634
635 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->tx_errors, &es->tx_errors);
638 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
639 vsi->stat_offsets_loaded,
640 &oes->rx_discards, &es->rx_discards);
641 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
644
645 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
646 I40E_GLV_GORCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->rx_bytes, &es->rx_bytes);
649 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
650 I40E_GLV_UPRCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->rx_unicast, &es->rx_unicast);
653 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
654 I40E_GLV_MPRCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->rx_multicast, &es->rx_multicast);
657 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
658 I40E_GLV_BPRCL(stat_idx),
659 vsi->stat_offsets_loaded,
660 &oes->rx_broadcast, &es->rx_broadcast);
661
662 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
663 I40E_GLV_GOTCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->tx_bytes, &es->tx_bytes);
666 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
667 I40E_GLV_UPTCL(stat_idx),
668 vsi->stat_offsets_loaded,
669 &oes->tx_unicast, &es->tx_unicast);
670 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
671 I40E_GLV_MPTCL(stat_idx),
672 vsi->stat_offsets_loaded,
673 &oes->tx_multicast, &es->tx_multicast);
674 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
675 I40E_GLV_BPTCL(stat_idx),
676 vsi->stat_offsets_loaded,
677 &oes->tx_broadcast, &es->tx_broadcast);
678 vsi->stat_offsets_loaded = true;
679}
680
681
682
683
684
685void i40e_update_veb_stats(struct i40e_veb *veb)
686{
687 struct i40e_pf *pf = veb->pf;
688 struct i40e_hw *hw = &pf->hw;
689 struct i40e_eth_stats *oes;
690 struct i40e_eth_stats *es;
691 struct i40e_veb_tc_stats *veb_oes;
692 struct i40e_veb_tc_stats *veb_es;
693 int i, idx = 0;
694
695 idx = veb->stats_idx;
696 es = &veb->stats;
697 oes = &veb->stats_offsets;
698 veb_es = &veb->tc_stats;
699 veb_oes = &veb->tc_stats_offsets;
700
701
702 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_discards, &es->tx_discards);
705 if (hw->revision_id > 0)
706 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
707 veb->stat_offsets_loaded,
708 &oes->rx_unknown_protocol,
709 &es->rx_unknown_protocol);
710 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
711 veb->stat_offsets_loaded,
712 &oes->rx_bytes, &es->rx_bytes);
713 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
714 veb->stat_offsets_loaded,
715 &oes->rx_unicast, &es->rx_unicast);
716 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
717 veb->stat_offsets_loaded,
718 &oes->rx_multicast, &es->rx_multicast);
719 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->rx_broadcast, &es->rx_broadcast);
722
723 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
724 veb->stat_offsets_loaded,
725 &oes->tx_bytes, &es->tx_bytes);
726 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
727 veb->stat_offsets_loaded,
728 &oes->tx_unicast, &es->tx_unicast);
729 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
730 veb->stat_offsets_loaded,
731 &oes->tx_multicast, &es->tx_multicast);
732 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->tx_broadcast, &es->tx_broadcast);
735 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
736 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
737 I40E_GLVEBTC_RPCL(i, idx),
738 veb->stat_offsets_loaded,
739 &veb_oes->tc_rx_packets[i],
740 &veb_es->tc_rx_packets[i]);
741 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
742 I40E_GLVEBTC_RBCL(i, idx),
743 veb->stat_offsets_loaded,
744 &veb_oes->tc_rx_bytes[i],
745 &veb_es->tc_rx_bytes[i]);
746 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
747 I40E_GLVEBTC_TPCL(i, idx),
748 veb->stat_offsets_loaded,
749 &veb_oes->tc_tx_packets[i],
750 &veb_es->tc_tx_packets[i]);
751 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
752 I40E_GLVEBTC_TBCL(i, idx),
753 veb->stat_offsets_loaded,
754 &veb_oes->tc_tx_bytes[i],
755 &veb_es->tc_tx_bytes[i]);
756 }
757 veb->stat_offsets_loaded = true;
758}
759
760
761
762
763
764
765
766
767
768
769
770static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
771{
772 struct i40e_pf *pf = vsi->back;
773 struct rtnl_link_stats64 *ons;
774 struct rtnl_link_stats64 *ns;
775 struct i40e_eth_stats *oes;
776 struct i40e_eth_stats *es;
777 u32 tx_restart, tx_busy;
778 struct i40e_ring *p;
779 u32 rx_page, rx_buf;
780 u64 bytes, packets;
781 unsigned int start;
782 u64 tx_linearize;
783 u64 tx_force_wb;
784 u64 rx_p, rx_b;
785 u64 tx_p, tx_b;
786 u16 q;
787
788 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
789 test_bit(__I40E_CONFIG_BUSY, pf->state))
790 return;
791
792 ns = i40e_get_vsi_stats_struct(vsi);
793 ons = &vsi->net_stats_offsets;
794 es = &vsi->eth_stats;
795 oes = &vsi->eth_stats_offsets;
796
797
798
799
800 rx_b = rx_p = 0;
801 tx_b = tx_p = 0;
802 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
803 rx_page = 0;
804 rx_buf = 0;
805 rcu_read_lock();
806 for (q = 0; q < vsi->num_queue_pairs; q++) {
807
808 p = READ_ONCE(vsi->tx_rings[q]);
809
810 do {
811 start = u64_stats_fetch_begin_irq(&p->syncp);
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 tx_b += bytes;
816 tx_p += packets;
817 tx_restart += p->tx_stats.restart_queue;
818 tx_busy += p->tx_stats.tx_busy;
819 tx_linearize += p->tx_stats.tx_linearize;
820 tx_force_wb += p->tx_stats.tx_force_wb;
821
822
823 p = &p[1];
824 do {
825 start = u64_stats_fetch_begin_irq(&p->syncp);
826 packets = p->stats.packets;
827 bytes = p->stats.bytes;
828 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
829 rx_b += bytes;
830 rx_p += packets;
831 rx_buf += p->rx_stats.alloc_buff_failed;
832 rx_page += p->rx_stats.alloc_page_failed;
833 }
834 rcu_read_unlock();
835 vsi->tx_restart = tx_restart;
836 vsi->tx_busy = tx_busy;
837 vsi->tx_linearize = tx_linearize;
838 vsi->tx_force_wb = tx_force_wb;
839 vsi->rx_page_failed = rx_page;
840 vsi->rx_buf_failed = rx_buf;
841
842 ns->rx_packets = rx_p;
843 ns->rx_bytes = rx_b;
844 ns->tx_packets = tx_p;
845 ns->tx_bytes = tx_b;
846
847
848 i40e_update_eth_stats(vsi);
849 ons->tx_errors = oes->tx_errors;
850 ns->tx_errors = es->tx_errors;
851 ons->multicast = oes->rx_multicast;
852 ns->multicast = es->rx_multicast;
853 ons->rx_dropped = oes->rx_discards;
854 ns->rx_dropped = es->rx_discards;
855 ons->tx_dropped = oes->tx_discards;
856 ns->tx_dropped = es->tx_discards;
857
858
859 if (vsi == pf->vsi[pf->lan_vsi]) {
860 ns->rx_crc_errors = pf->stats.crc_errors;
861 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
862 ns->rx_length_errors = pf->stats.rx_length_errors;
863 }
864}
865
866
867
868
869
870static void i40e_update_pf_stats(struct i40e_pf *pf)
871{
872 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
873 struct i40e_hw_port_stats *nsd = &pf->stats;
874 struct i40e_hw *hw = &pf->hw;
875 u32 val;
876 int i;
877
878 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
879 I40E_GLPRT_GORCL(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
882 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
883 I40E_GLPRT_GOTCL(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
886 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.rx_discards,
889 &nsd->eth.rx_discards);
890 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
891 I40E_GLPRT_UPRCL(hw->port),
892 pf->stat_offsets_loaded,
893 &osd->eth.rx_unicast,
894 &nsd->eth.rx_unicast);
895 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
896 I40E_GLPRT_MPRCL(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->eth.rx_multicast,
899 &nsd->eth.rx_multicast);
900 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
901 I40E_GLPRT_BPRCL(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->eth.rx_broadcast,
904 &nsd->eth.rx_broadcast);
905 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
906 I40E_GLPRT_UPTCL(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->eth.tx_unicast,
909 &nsd->eth.tx_unicast);
910 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
911 I40E_GLPRT_MPTCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.tx_multicast,
914 &nsd->eth.tx_multicast);
915 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
916 I40E_GLPRT_BPTCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.tx_broadcast,
919 &nsd->eth.tx_broadcast);
920
921 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->tx_dropped_link_down,
924 &nsd->tx_dropped_link_down);
925
926 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->crc_errors, &nsd->crc_errors);
929
930 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->illegal_bytes, &nsd->illegal_bytes);
933
934 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->mac_local_faults,
937 &nsd->mac_local_faults);
938 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->mac_remote_faults,
941 &nsd->mac_remote_faults);
942
943 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->rx_length_errors,
946 &nsd->rx_length_errors);
947
948 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->link_xon_rx, &nsd->link_xon_rx);
951 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_tx, &nsd->link_xon_tx);
954 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xoff_rx, &nsd->link_xoff_rx);
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_tx, &nsd->link_xoff_tx);
960
961 for (i = 0; i < 8; i++) {
962 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
963 pf->stat_offsets_loaded,
964 &osd->priority_xoff_rx[i],
965 &nsd->priority_xoff_rx[i]);
966 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 pf->stat_offsets_loaded,
968 &osd->priority_xon_rx[i],
969 &nsd->priority_xon_rx[i]);
970 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
971 pf->stat_offsets_loaded,
972 &osd->priority_xon_tx[i],
973 &nsd->priority_xon_tx[i]);
974 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded,
976 &osd->priority_xoff_tx[i],
977 &nsd->priority_xoff_tx[i]);
978 i40e_stat_update32(hw,
979 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 pf->stat_offsets_loaded,
981 &osd->priority_xon_2_xoff[i],
982 &nsd->priority_xon_2_xoff[i]);
983 }
984
985 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
986 I40E_GLPRT_PRC64L(hw->port),
987 pf->stat_offsets_loaded,
988 &osd->rx_size_64, &nsd->rx_size_64);
989 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
990 I40E_GLPRT_PRC127L(hw->port),
991 pf->stat_offsets_loaded,
992 &osd->rx_size_127, &nsd->rx_size_127);
993 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
994 I40E_GLPRT_PRC255L(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_size_255, &nsd->rx_size_255);
997 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
998 I40E_GLPRT_PRC511L(hw->port),
999 pf->stat_offsets_loaded,
1000 &osd->rx_size_511, &nsd->rx_size_511);
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1002 I40E_GLPRT_PRC1023L(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->rx_size_1023, &nsd->rx_size_1023);
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1006 I40E_GLPRT_PRC1522L(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_size_1522, &nsd->rx_size_1522);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1010 I40E_GLPRT_PRC9522L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_big, &nsd->rx_size_big);
1013
1014 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1015 I40E_GLPRT_PTC64L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->tx_size_64, &nsd->tx_size_64);
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1019 I40E_GLPRT_PTC127L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->tx_size_127, &nsd->tx_size_127);
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1023 I40E_GLPRT_PTC255L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->tx_size_255, &nsd->tx_size_255);
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1027 I40E_GLPRT_PTC511L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->tx_size_511, &nsd->tx_size_511);
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1031 I40E_GLPRT_PTC1023L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->tx_size_1023, &nsd->tx_size_1023);
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1035 I40E_GLPRT_PTC1522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->tx_size_1522, &nsd->tx_size_1522);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1039 I40E_GLPRT_PTC9522L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_big, &nsd->tx_size_big);
1042
1043 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_undersize, &nsd->rx_undersize);
1046 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_fragments, &nsd->rx_fragments);
1049 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_oversize, &nsd->rx_oversize);
1052 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_jabber, &nsd->rx_jabber);
1055
1056
1057 i40e_stat_update_and_clear32(hw,
1058 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1059 &nsd->fd_atr_match);
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1062 &nsd->fd_sb_match);
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1065 &nsd->fd_atr_tunnel_match);
1066
1067 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1068 nsd->tx_lpi_status =
1069 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1070 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1071 nsd->rx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1074 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1075 pf->stat_offsets_loaded,
1076 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1077 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1080
1081 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1082 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1083 nsd->fd_sb_status = true;
1084 else
1085 nsd->fd_sb_status = false;
1086
1087 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1088 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1089 nsd->fd_atr_status = true;
1090 else
1091 nsd->fd_atr_status = false;
1092
1093 pf->stat_offsets_loaded = true;
1094}
1095
1096
1097
1098
1099
1100
1101
1102void i40e_update_stats(struct i40e_vsi *vsi)
1103{
1104 struct i40e_pf *pf = vsi->back;
1105
1106 if (vsi == pf->vsi[pf->lan_vsi])
1107 i40e_update_pf_stats(pf);
1108
1109 i40e_update_vsi_stats(vsi);
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1121 const u8 *macaddr, s16 vlan)
1122{
1123 struct i40e_mac_filter *f;
1124 u64 key;
1125
1126 if (!vsi || !macaddr)
1127 return NULL;
1128
1129 key = i40e_addr_to_hkey(macaddr);
1130 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1131 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1132 (vlan == f->vlan))
1133 return f;
1134 }
1135 return NULL;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1147{
1148 struct i40e_mac_filter *f;
1149 u64 key;
1150
1151 if (!vsi || !macaddr)
1152 return NULL;
1153
1154 key = i40e_addr_to_hkey(macaddr);
1155 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1156 if ((ether_addr_equal(macaddr, f->macaddr)))
1157 return f;
1158 }
1159 return NULL;
1160}
1161
1162
1163
1164
1165
1166
1167
1168bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1169{
1170
1171 if (vsi->info.pvid)
1172 return true;
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 return vsi->has_vlan_filter;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1227 struct hlist_head *tmp_add_list,
1228 struct hlist_head *tmp_del_list,
1229 int vlan_filters)
1230{
1231 s16 pvid = le16_to_cpu(vsi->info.pvid);
1232 struct i40e_mac_filter *f, *add_head;
1233 struct i40e_new_mac_filter *new;
1234 struct hlist_node *h;
1235 int bkt, new_vlan;
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 hlist_for_each_entry(new, tmp_add_list, hlist) {
1253 if (pvid && new->f->vlan != pvid)
1254 new->f->vlan = pvid;
1255 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1256 new->f->vlan = 0;
1257 else if (!vlan_filters && new->f->vlan == 0)
1258 new->f->vlan = I40E_VLAN_ANY;
1259 }
1260
1261
1262 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1263
1264
1265
1266
1267
1268 if ((pvid && f->vlan != pvid) ||
1269 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1270 (!vlan_filters && f->vlan == 0)) {
1271
1272 if (pvid)
1273 new_vlan = pvid;
1274 else if (vlan_filters)
1275 new_vlan = 0;
1276 else
1277 new_vlan = I40E_VLAN_ANY;
1278
1279
1280 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1281 if (!add_head)
1282 return -ENOMEM;
1283
1284
1285 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1286 if (!new)
1287 return -ENOMEM;
1288
1289 new->f = add_head;
1290 new->state = add_head->state;
1291
1292
1293 hlist_add_head(&new->hlist, tmp_add_list);
1294
1295
1296 f->state = I40E_FILTER_REMOVE;
1297 hash_del(&f->hlist);
1298 hlist_add_head(&f->hlist, tmp_del_list);
1299 }
1300 }
1301
1302 vsi->has_vlan_filter = !!vlan_filters;
1303
1304 return 0;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1316{
1317 struct i40e_aqc_remove_macvlan_element_data element;
1318 struct i40e_pf *pf = vsi->back;
1319
1320
1321 if (vsi->type != I40E_VSI_MAIN)
1322 return;
1323
1324 memset(&element, 0, sizeof(element));
1325 ether_addr_copy(element.mac_addr, macaddr);
1326 element.vlan_tag = 0;
1327
1328 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1329 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1330
1331 memset(&element, 0, sizeof(element));
1332 ether_addr_copy(element.mac_addr, macaddr);
1333 element.vlan_tag = 0;
1334
1335 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1336 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1337 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1352 const u8 *macaddr, s16 vlan)
1353{
1354 struct i40e_mac_filter *f;
1355 u64 key;
1356
1357 if (!vsi || !macaddr)
1358 return NULL;
1359
1360 f = i40e_find_filter(vsi, macaddr, vlan);
1361 if (!f) {
1362 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1363 if (!f)
1364 return NULL;
1365
1366
1367
1368
1369 if (vlan >= 0)
1370 vsi->has_vlan_filter = true;
1371
1372 ether_addr_copy(f->macaddr, macaddr);
1373 f->vlan = vlan;
1374 f->state = I40E_FILTER_NEW;
1375 INIT_HLIST_NODE(&f->hlist);
1376
1377 key = i40e_addr_to_hkey(macaddr);
1378 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1379
1380 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1381 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 if (f->state == I40E_FILTER_REMOVE)
1393 f->state = I40E_FILTER_ACTIVE;
1394
1395 return f;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1414{
1415 if (!f)
1416 return;
1417
1418
1419
1420
1421
1422 if ((f->state == I40E_FILTER_FAILED) ||
1423 (f->state == I40E_FILTER_NEW)) {
1424 hash_del(&f->hlist);
1425 kfree(f);
1426 } else {
1427 f->state = I40E_FILTER_REMOVE;
1428 }
1429
1430 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1431 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1447{
1448 struct i40e_mac_filter *f;
1449
1450 if (!vsi || !macaddr)
1451 return;
1452
1453 f = i40e_find_filter(vsi, macaddr, vlan);
1454 __i40e_del_filter(vsi, f);
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1470 const u8 *macaddr)
1471{
1472 struct i40e_mac_filter *f, *add = NULL;
1473 struct hlist_node *h;
1474 int bkt;
1475
1476 if (vsi->info.pvid)
1477 return i40e_add_filter(vsi, macaddr,
1478 le16_to_cpu(vsi->info.pvid));
1479
1480 if (!i40e_is_vsi_in_vlan(vsi))
1481 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1482
1483 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1484 if (f->state == I40E_FILTER_REMOVE)
1485 continue;
1486 add = i40e_add_filter(vsi, macaddr, f->vlan);
1487 if (!add)
1488 return NULL;
1489 }
1490
1491 return add;
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1505{
1506 struct i40e_mac_filter *f;
1507 struct hlist_node *h;
1508 bool found = false;
1509 int bkt;
1510
1511 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1512 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1513 if (ether_addr_equal(macaddr, f->macaddr)) {
1514 __i40e_del_filter(vsi, f);
1515 found = true;
1516 }
1517 }
1518
1519 if (found)
1520 return 0;
1521 else
1522 return -ENOENT;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532static int i40e_set_mac(struct net_device *netdev, void *p)
1533{
1534 struct i40e_netdev_priv *np = netdev_priv(netdev);
1535 struct i40e_vsi *vsi = np->vsi;
1536 struct i40e_pf *pf = vsi->back;
1537 struct i40e_hw *hw = &pf->hw;
1538 struct sockaddr *addr = p;
1539
1540 if (!is_valid_ether_addr(addr->sa_data))
1541 return -EADDRNOTAVAIL;
1542
1543 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1544 netdev_info(netdev, "already using mac address %pM\n",
1545 addr->sa_data);
1546 return 0;
1547 }
1548
1549 if (test_bit(__I40E_DOWN, pf->state) ||
1550 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1551 return -EADDRNOTAVAIL;
1552
1553 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1554 netdev_info(netdev, "returning to hw mac address %pM\n",
1555 hw->mac.addr);
1556 else
1557 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1558
1559
1560
1561
1562
1563
1564
1565 spin_lock_bh(&vsi->mac_filter_hash_lock);
1566 i40e_del_mac_filter(vsi, netdev->dev_addr);
1567 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1568 i40e_add_mac_filter(vsi, netdev->dev_addr);
1569 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1570
1571 if (vsi->type == I40E_VSI_MAIN) {
1572 i40e_status ret;
1573
1574 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1575 addr->sa_data, NULL);
1576 if (ret)
1577 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1578 i40e_stat_str(hw, ret),
1579 i40e_aq_str(hw, hw->aq.asq_last_status));
1580 }
1581
1582
1583
1584
1585 i40e_service_event_schedule(pf);
1586 return 0;
1587}
1588
1589
1590
1591
1592
1593
1594static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1595 u8 *lut, u16 lut_size)
1596{
1597 struct i40e_pf *pf = vsi->back;
1598 struct i40e_hw *hw = &pf->hw;
1599 int ret = 0;
1600
1601 if (seed) {
1602 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1603 (struct i40e_aqc_get_set_rss_key_data *)seed;
1604 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1605 if (ret) {
1606 dev_info(&pf->pdev->dev,
1607 "Cannot set RSS key, err %s aq_err %s\n",
1608 i40e_stat_str(hw, ret),
1609 i40e_aq_str(hw, hw->aq.asq_last_status));
1610 return ret;
1611 }
1612 }
1613 if (lut) {
1614 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1615
1616 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1617 if (ret) {
1618 dev_info(&pf->pdev->dev,
1619 "Cannot set RSS lut, err %s aq_err %s\n",
1620 i40e_stat_str(hw, ret),
1621 i40e_aq_str(hw, hw->aq.asq_last_status));
1622 return ret;
1623 }
1624 }
1625 return ret;
1626}
1627
1628
1629
1630
1631
1632static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1633{
1634 struct i40e_pf *pf = vsi->back;
1635 u8 seed[I40E_HKEY_ARRAY_SIZE];
1636 u8 *lut;
1637 int ret;
1638
1639 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1640 return 0;
1641 if (!vsi->rss_size)
1642 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1643 vsi->num_queue_pairs);
1644 if (!vsi->rss_size)
1645 return -EINVAL;
1646 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1647 if (!lut)
1648 return -ENOMEM;
1649
1650
1651
1652
1653 if (vsi->rss_lut_user)
1654 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1655 else
1656 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1657 if (vsi->rss_hkey_user)
1658 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1659 else
1660 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1661 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1662 kfree(lut);
1663 return ret;
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1675 struct i40e_vsi_context *ctxt,
1676 u8 enabled_tc)
1677{
1678 u16 qcount = 0, max_qcount, qmap, sections = 0;
1679 int i, override_q, pow, num_qps, ret;
1680 u8 netdev_tc = 0, offset = 0;
1681
1682 if (vsi->type != I40E_VSI_MAIN)
1683 return -EINVAL;
1684 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1685 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1686 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1687 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1688 num_qps = vsi->mqprio_qopt.qopt.count[0];
1689
1690
1691 pow = ilog2(num_qps);
1692 if (!is_power_of_2(num_qps))
1693 pow++;
1694 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1695 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1696
1697
1698 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1699 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1700
1701 if (vsi->tc_config.enabled_tc & BIT(i)) {
1702 offset = vsi->mqprio_qopt.qopt.offset[i];
1703 qcount = vsi->mqprio_qopt.qopt.count[i];
1704 if (qcount > max_qcount)
1705 max_qcount = qcount;
1706 vsi->tc_config.tc_info[i].qoffset = offset;
1707 vsi->tc_config.tc_info[i].qcount = qcount;
1708 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1709 } else {
1710
1711
1712
1713
1714 vsi->tc_config.tc_info[i].qoffset = 0;
1715 vsi->tc_config.tc_info[i].qcount = 1;
1716 vsi->tc_config.tc_info[i].netdev_tc = 0;
1717 }
1718 }
1719
1720
1721 vsi->num_queue_pairs = offset + qcount;
1722
1723
1724 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1725 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1726 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1727 ctxt->info.valid_sections |= cpu_to_le16(sections);
1728
1729
1730 vsi->rss_size = max_qcount;
1731 ret = i40e_vsi_config_rss(vsi);
1732 if (ret) {
1733 dev_info(&vsi->back->pdev->dev,
1734 "Failed to reconfig rss for num_queues (%u)\n",
1735 max_qcount);
1736 return ret;
1737 }
1738 vsi->reconfig_rss = true;
1739 dev_dbg(&vsi->back->pdev->dev,
1740 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1741
1742
1743
1744
1745 override_q = vsi->mqprio_qopt.qopt.count[0];
1746 if (override_q && override_q < vsi->num_queue_pairs) {
1747 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1748 vsi->next_base_queue = override_q;
1749 }
1750 return 0;
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1763 struct i40e_vsi_context *ctxt,
1764 u8 enabled_tc,
1765 bool is_add)
1766{
1767 struct i40e_pf *pf = vsi->back;
1768 u16 sections = 0;
1769 u8 netdev_tc = 0;
1770 u16 numtc = 1;
1771 u16 qcount;
1772 u8 offset;
1773 u16 qmap;
1774 int i;
1775 u16 num_tc_qps = 0;
1776
1777 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1778 offset = 0;
1779
1780
1781 num_tc_qps = vsi->alloc_queue_pairs;
1782 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1783
1784 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1785 if (enabled_tc & BIT(i))
1786 numtc++;
1787 }
1788 if (!numtc) {
1789 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1790 numtc = 1;
1791 }
1792 num_tc_qps = num_tc_qps / numtc;
1793 num_tc_qps = min_t(int, num_tc_qps,
1794 i40e_pf_get_max_q_per_tc(pf));
1795 }
1796
1797 vsi->tc_config.numtc = numtc;
1798 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1799
1800
1801 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1802 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1803
1804
1805 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1806
1807 if (vsi->tc_config.enabled_tc & BIT(i)) {
1808
1809 int pow, num_qps;
1810
1811 switch (vsi->type) {
1812 case I40E_VSI_MAIN:
1813 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1814 I40E_FLAG_FD_ATR_ENABLED)) ||
1815 vsi->tc_config.enabled_tc != 1) {
1816 qcount = min_t(int, pf->alloc_rss_size,
1817 num_tc_qps);
1818 break;
1819 }
1820
1821 case I40E_VSI_FDIR:
1822 case I40E_VSI_SRIOV:
1823 case I40E_VSI_VMDQ2:
1824 default:
1825 qcount = num_tc_qps;
1826 WARN_ON(i != 0);
1827 break;
1828 }
1829 vsi->tc_config.tc_info[i].qoffset = offset;
1830 vsi->tc_config.tc_info[i].qcount = qcount;
1831
1832
1833 num_qps = qcount;
1834 pow = 0;
1835 while (num_qps && (BIT_ULL(pow) < qcount)) {
1836 pow++;
1837 num_qps >>= 1;
1838 }
1839
1840 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1841 qmap =
1842 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1843 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1844
1845 offset += qcount;
1846 } else {
1847
1848
1849
1850
1851 vsi->tc_config.tc_info[i].qoffset = 0;
1852 vsi->tc_config.tc_info[i].qcount = 1;
1853 vsi->tc_config.tc_info[i].netdev_tc = 0;
1854
1855 qmap = 0;
1856 }
1857 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1858 }
1859
1860
1861 vsi->num_queue_pairs = offset;
1862 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1863 if (vsi->req_queue_pairs > 0)
1864 vsi->num_queue_pairs = vsi->req_queue_pairs;
1865 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1866 vsi->num_queue_pairs = pf->num_lan_msix;
1867 }
1868
1869
1870 if (is_add) {
1871 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1872
1873 ctxt->info.up_enable_bits = enabled_tc;
1874 }
1875 if (vsi->type == I40E_VSI_SRIOV) {
1876 ctxt->info.mapping_flags |=
1877 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1878 for (i = 0; i < vsi->num_queue_pairs; i++)
1879 ctxt->info.queue_mapping[i] =
1880 cpu_to_le16(vsi->base_queue + i);
1881 } else {
1882 ctxt->info.mapping_flags |=
1883 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1884 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1885 }
1886 ctxt->info.valid_sections |= cpu_to_le16(sections);
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1898{
1899 struct i40e_netdev_priv *np = netdev_priv(netdev);
1900 struct i40e_vsi *vsi = np->vsi;
1901
1902 if (i40e_add_mac_filter(vsi, addr))
1903 return 0;
1904 else
1905 return -ENOMEM;
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1917{
1918 struct i40e_netdev_priv *np = netdev_priv(netdev);
1919 struct i40e_vsi *vsi = np->vsi;
1920
1921
1922
1923
1924
1925
1926 if (ether_addr_equal(addr, netdev->dev_addr))
1927 return 0;
1928
1929 i40e_del_mac_filter(vsi, addr);
1930
1931 return 0;
1932}
1933
1934
1935
1936
1937
1938static void i40e_set_rx_mode(struct net_device *netdev)
1939{
1940 struct i40e_netdev_priv *np = netdev_priv(netdev);
1941 struct i40e_vsi *vsi = np->vsi;
1942
1943 spin_lock_bh(&vsi->mac_filter_hash_lock);
1944
1945 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1946 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1947
1948 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1949
1950
1951 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1952 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1953 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1954 }
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1966 struct hlist_head *from)
1967{
1968 struct i40e_mac_filter *f;
1969 struct hlist_node *h;
1970
1971 hlist_for_each_entry_safe(f, h, from, hlist) {
1972 u64 key = i40e_addr_to_hkey(f->macaddr);
1973
1974
1975 hlist_del(&f->hlist);
1976 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1977 }
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1989 struct hlist_head *from)
1990{
1991 struct i40e_new_mac_filter *new;
1992 struct hlist_node *h;
1993
1994 hlist_for_each_entry_safe(new, h, from, hlist) {
1995
1996 hlist_del(&new->hlist);
1997 kfree(new);
1998 }
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009static
2010struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2011{
2012 hlist_for_each_entry_continue(next, hlist) {
2013 if (!is_broadcast_ether_addr(next->f->macaddr))
2014 return next;
2015 }
2016
2017 return NULL;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static int
2031i40e_update_filter_state(int count,
2032 struct i40e_aqc_add_macvlan_element_data *add_list,
2033 struct i40e_new_mac_filter *add_head)
2034{
2035 int retval = 0;
2036 int i;
2037
2038 for (i = 0; i < count; i++) {
2039
2040
2041
2042
2043
2044
2045 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2046 add_head->state = I40E_FILTER_FAILED;
2047 } else {
2048 add_head->state = I40E_FILTER_ACTIVE;
2049 retval++;
2050 }
2051
2052 add_head = i40e_next_filter(add_head);
2053 if (!add_head)
2054 break;
2055 }
2056
2057 return retval;
2058}
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073static
2074void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2075 struct i40e_aqc_remove_macvlan_element_data *list,
2076 int num_del, int *retval)
2077{
2078 struct i40e_hw *hw = &vsi->back->hw;
2079 i40e_status aq_ret;
2080 int aq_err;
2081
2082 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2083 aq_err = hw->aq.asq_last_status;
2084
2085
2086 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2087 *retval = -EIO;
2088 dev_info(&vsi->back->pdev->dev,
2089 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2090 vsi_name, i40e_stat_str(hw, aq_ret),
2091 i40e_aq_str(hw, aq_err));
2092 }
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static
2108void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2109 struct i40e_aqc_add_macvlan_element_data *list,
2110 struct i40e_new_mac_filter *add_head,
2111 int num_add)
2112{
2113 struct i40e_hw *hw = &vsi->back->hw;
2114 int aq_err, fcnt;
2115
2116 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2117 aq_err = hw->aq.asq_last_status;
2118 fcnt = i40e_update_filter_state(num_add, list, add_head);
2119
2120 if (fcnt != num_add) {
2121 if (vsi->type == I40E_VSI_MAIN) {
2122 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2123 dev_warn(&vsi->back->pdev->dev,
2124 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2125 i40e_aq_str(hw, aq_err), vsi_name);
2126 } else if (vsi->type == I40E_VSI_SRIOV ||
2127 vsi->type == I40E_VSI_VMDQ1 ||
2128 vsi->type == I40E_VSI_VMDQ2) {
2129 dev_warn(&vsi->back->pdev->dev,
2130 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2131 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2132 } else {
2133 dev_warn(&vsi->back->pdev->dev,
2134 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2135 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2136 }
2137 }
2138}
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152static i40e_status
2153i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2154 struct i40e_mac_filter *f)
2155{
2156 bool enable = f->state == I40E_FILTER_NEW;
2157 struct i40e_hw *hw = &vsi->back->hw;
2158 i40e_status aq_ret;
2159
2160 if (f->vlan == I40E_VLAN_ANY) {
2161 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2162 vsi->seid,
2163 enable,
2164 NULL);
2165 } else {
2166 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2167 vsi->seid,
2168 enable,
2169 f->vlan,
2170 NULL);
2171 }
2172
2173 if (aq_ret) {
2174 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2175 dev_warn(&vsi->back->pdev->dev,
2176 "Error %s, forcing overflow promiscuous on %s\n",
2177 i40e_aq_str(hw, hw->aq.asq_last_status),
2178 vsi_name);
2179 }
2180
2181 return aq_ret;
2182}
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2194{
2195 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2196 struct i40e_hw *hw = &pf->hw;
2197 i40e_status aq_ret;
2198
2199 if (vsi->type == I40E_VSI_MAIN &&
2200 pf->lan_veb != I40E_NO_VEB &&
2201 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2202
2203
2204
2205
2206
2207 if (promisc)
2208 aq_ret = i40e_aq_set_default_vsi(hw,
2209 vsi->seid,
2210 NULL);
2211 else
2212 aq_ret = i40e_aq_clear_default_vsi(hw,
2213 vsi->seid,
2214 NULL);
2215 if (aq_ret) {
2216 dev_info(&pf->pdev->dev,
2217 "Set default VSI failed, err %s, aq_err %s\n",
2218 i40e_stat_str(hw, aq_ret),
2219 i40e_aq_str(hw, hw->aq.asq_last_status));
2220 }
2221 } else {
2222 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2223 hw,
2224 vsi->seid,
2225 promisc, NULL,
2226 true);
2227 if (aq_ret) {
2228 dev_info(&pf->pdev->dev,
2229 "set unicast promisc failed, err %s, aq_err %s\n",
2230 i40e_stat_str(hw, aq_ret),
2231 i40e_aq_str(hw, hw->aq.asq_last_status));
2232 }
2233 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2234 hw,
2235 vsi->seid,
2236 promisc, NULL);
2237 if (aq_ret) {
2238 dev_info(&pf->pdev->dev,
2239 "set multicast promisc failed, err %s, aq_err %s\n",
2240 i40e_stat_str(hw, aq_ret),
2241 i40e_aq_str(hw, hw->aq.asq_last_status));
2242 }
2243 }
2244
2245 if (!aq_ret)
2246 pf->cur_promisc = promisc;
2247
2248 return aq_ret;
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2260{
2261 struct hlist_head tmp_add_list, tmp_del_list;
2262 struct i40e_mac_filter *f;
2263 struct i40e_new_mac_filter *new, *add_head = NULL;
2264 struct i40e_hw *hw = &vsi->back->hw;
2265 bool old_overflow, new_overflow;
2266 unsigned int failed_filters = 0;
2267 unsigned int vlan_filters = 0;
2268 char vsi_name[16] = "PF";
2269 int filter_list_len = 0;
2270 i40e_status aq_ret = 0;
2271 u32 changed_flags = 0;
2272 struct hlist_node *h;
2273 struct i40e_pf *pf;
2274 int num_add = 0;
2275 int num_del = 0;
2276 int retval = 0;
2277 u16 cmd_flags;
2278 int list_size;
2279 int bkt;
2280
2281
2282 struct i40e_aqc_add_macvlan_element_data *add_list;
2283 struct i40e_aqc_remove_macvlan_element_data *del_list;
2284
2285 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2286 usleep_range(1000, 2000);
2287 pf = vsi->back;
2288
2289 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2290
2291 if (vsi->netdev) {
2292 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2293 vsi->current_netdev_flags = vsi->netdev->flags;
2294 }
2295
2296 INIT_HLIST_HEAD(&tmp_add_list);
2297 INIT_HLIST_HEAD(&tmp_del_list);
2298
2299 if (vsi->type == I40E_VSI_SRIOV)
2300 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2301 else if (vsi->type != I40E_VSI_MAIN)
2302 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2303
2304 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2305 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2306
2307 spin_lock_bh(&vsi->mac_filter_hash_lock);
2308
2309 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2310 if (f->state == I40E_FILTER_REMOVE) {
2311
2312 hash_del(&f->hlist);
2313 hlist_add_head(&f->hlist, &tmp_del_list);
2314
2315
2316 continue;
2317 }
2318 if (f->state == I40E_FILTER_NEW) {
2319
2320 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2321 if (!new)
2322 goto err_no_memory_locked;
2323
2324
2325 new->f = f;
2326 new->state = f->state;
2327
2328
2329 hlist_add_head(&new->hlist, &tmp_add_list);
2330 }
2331
2332
2333
2334
2335
2336 if (f->vlan > 0)
2337 vlan_filters++;
2338 }
2339
2340 retval = i40e_correct_mac_vlan_filters(vsi,
2341 &tmp_add_list,
2342 &tmp_del_list,
2343 vlan_filters);
2344 if (retval)
2345 goto err_no_memory_locked;
2346
2347 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2348 }
2349
2350
2351 if (!hlist_empty(&tmp_del_list)) {
2352 filter_list_len = hw->aq.asq_buf_size /
2353 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2354 list_size = filter_list_len *
2355 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2356 del_list = kzalloc(list_size, GFP_ATOMIC);
2357 if (!del_list)
2358 goto err_no_memory;
2359
2360 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2361 cmd_flags = 0;
2362
2363
2364
2365
2366 if (is_broadcast_ether_addr(f->macaddr)) {
2367 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2368
2369 hlist_del(&f->hlist);
2370 kfree(f);
2371 continue;
2372 }
2373
2374
2375 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2376 if (f->vlan == I40E_VLAN_ANY) {
2377 del_list[num_del].vlan_tag = 0;
2378 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2379 } else {
2380 del_list[num_del].vlan_tag =
2381 cpu_to_le16((u16)(f->vlan));
2382 }
2383
2384 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2385 del_list[num_del].flags = cmd_flags;
2386 num_del++;
2387
2388
2389 if (num_del == filter_list_len) {
2390 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2391 num_del, &retval);
2392 memset(del_list, 0, list_size);
2393 num_del = 0;
2394 }
2395
2396
2397
2398 hlist_del(&f->hlist);
2399 kfree(f);
2400 }
2401
2402 if (num_del) {
2403 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2404 num_del, &retval);
2405 }
2406
2407 kfree(del_list);
2408 del_list = NULL;
2409 }
2410
2411 if (!hlist_empty(&tmp_add_list)) {
2412
2413 filter_list_len = hw->aq.asq_buf_size /
2414 sizeof(struct i40e_aqc_add_macvlan_element_data);
2415 list_size = filter_list_len *
2416 sizeof(struct i40e_aqc_add_macvlan_element_data);
2417 add_list = kzalloc(list_size, GFP_ATOMIC);
2418 if (!add_list)
2419 goto err_no_memory;
2420
2421 num_add = 0;
2422 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2423
2424
2425
2426 if (is_broadcast_ether_addr(new->f->macaddr)) {
2427 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2428 new->f))
2429 new->state = I40E_FILTER_FAILED;
2430 else
2431 new->state = I40E_FILTER_ACTIVE;
2432 continue;
2433 }
2434
2435
2436 if (num_add == 0)
2437 add_head = new;
2438 cmd_flags = 0;
2439 ether_addr_copy(add_list[num_add].mac_addr,
2440 new->f->macaddr);
2441 if (new->f->vlan == I40E_VLAN_ANY) {
2442 add_list[num_add].vlan_tag = 0;
2443 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2444 } else {
2445 add_list[num_add].vlan_tag =
2446 cpu_to_le16((u16)(new->f->vlan));
2447 }
2448 add_list[num_add].queue_number = 0;
2449
2450 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2451 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2452 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2453 num_add++;
2454
2455
2456 if (num_add == filter_list_len) {
2457 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2458 add_head, num_add);
2459 memset(add_list, 0, list_size);
2460 num_add = 0;
2461 }
2462 }
2463 if (num_add) {
2464 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2465 num_add);
2466 }
2467
2468
2469
2470 spin_lock_bh(&vsi->mac_filter_hash_lock);
2471 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2472
2473 if (new->f->state == I40E_FILTER_NEW)
2474 new->f->state = new->state;
2475 hlist_del(&new->hlist);
2476 kfree(new);
2477 }
2478 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2479 kfree(add_list);
2480 add_list = NULL;
2481 }
2482
2483
2484 spin_lock_bh(&vsi->mac_filter_hash_lock);
2485 vsi->active_filters = 0;
2486 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2487 if (f->state == I40E_FILTER_ACTIVE)
2488 vsi->active_filters++;
2489 else if (f->state == I40E_FILTER_FAILED)
2490 failed_filters++;
2491 }
2492 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2493
2494
2495
2496
2497
2498 if (old_overflow && !failed_filters &&
2499 vsi->active_filters < vsi->promisc_threshold) {
2500 dev_info(&pf->pdev->dev,
2501 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2502 vsi_name);
2503 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2504 vsi->promisc_threshold = 0;
2505 }
2506
2507
2508 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2509 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2510 goto out;
2511 }
2512
2513 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2514
2515
2516
2517
2518 if (!old_overflow && new_overflow)
2519 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2520
2521
2522 if (changed_flags & IFF_ALLMULTI) {
2523 bool cur_multipromisc;
2524
2525 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2526 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2527 vsi->seid,
2528 cur_multipromisc,
2529 NULL);
2530 if (aq_ret) {
2531 retval = i40e_aq_rc_to_posix(aq_ret,
2532 hw->aq.asq_last_status);
2533 dev_info(&pf->pdev->dev,
2534 "set multi promisc failed on %s, err %s aq_err %s\n",
2535 vsi_name,
2536 i40e_stat_str(hw, aq_ret),
2537 i40e_aq_str(hw, hw->aq.asq_last_status));
2538 } else {
2539 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
2540 vsi->netdev->name,
2541 cur_multipromisc ? "entering" : "leaving");
2542 }
2543 }
2544
2545 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2546 bool cur_promisc;
2547
2548 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2549 new_overflow);
2550 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2551 if (aq_ret) {
2552 retval = i40e_aq_rc_to_posix(aq_ret,
2553 hw->aq.asq_last_status);
2554 dev_info(&pf->pdev->dev,
2555 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2556 cur_promisc ? "on" : "off",
2557 vsi_name,
2558 i40e_stat_str(hw, aq_ret),
2559 i40e_aq_str(hw, hw->aq.asq_last_status));
2560 }
2561 }
2562out:
2563
2564 if (retval)
2565 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2566
2567 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2568 return retval;
2569
2570err_no_memory:
2571
2572 spin_lock_bh(&vsi->mac_filter_hash_lock);
2573err_no_memory_locked:
2574 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2575 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2576 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2577
2578 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2579 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2580 return -ENOMEM;
2581}
2582
2583
2584
2585
2586
2587static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2588{
2589 int v;
2590
2591 if (!pf)
2592 return;
2593 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2594 return;
2595 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2596 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2597 return;
2598 }
2599
2600 for (v = 0; v < pf->num_alloc_vsi; v++) {
2601 if (pf->vsi[v] &&
2602 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2603 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2604
2605 if (ret) {
2606
2607 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2608 pf->state);
2609 break;
2610 }
2611 }
2612 }
2613 clear_bit(__I40E_VF_DISABLE, pf->state);
2614}
2615
2616
2617
2618
2619
2620static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2621{
2622 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2623 return I40E_RXBUFFER_2048;
2624 else
2625 return I40E_RXBUFFER_3072;
2626}
2627
2628
2629
2630
2631
2632
2633
2634
2635static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2636{
2637 struct i40e_netdev_priv *np = netdev_priv(netdev);
2638 struct i40e_vsi *vsi = np->vsi;
2639 struct i40e_pf *pf = vsi->back;
2640
2641 if (i40e_enabled_xdp_vsi(vsi)) {
2642 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2643
2644 if (frame_size > i40e_max_xdp_frame_size(vsi))
2645 return -EINVAL;
2646 }
2647
2648 netdev_info(netdev, "changing MTU from %d to %d\n",
2649 netdev->mtu, new_mtu);
2650 netdev->mtu = new_mtu;
2651 if (netif_running(netdev))
2652 i40e_vsi_reinit_locked(vsi);
2653 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2654 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2655 return 0;
2656}
2657
2658
2659
2660
2661
2662
2663
2664int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2665{
2666 struct i40e_netdev_priv *np = netdev_priv(netdev);
2667 struct i40e_pf *pf = np->vsi->back;
2668
2669 switch (cmd) {
2670 case SIOCGHWTSTAMP:
2671 return i40e_ptp_get_ts_config(pf, ifr);
2672 case SIOCSHWTSTAMP:
2673 return i40e_ptp_set_ts_config(pf, ifr);
2674 default:
2675 return -EOPNOTSUPP;
2676 }
2677}
2678
2679
2680
2681
2682
2683void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2684{
2685 struct i40e_vsi_context ctxt;
2686 i40e_status ret;
2687
2688
2689 if (vsi->info.pvid)
2690 return;
2691
2692 if ((vsi->info.valid_sections &
2693 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2694 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2695 return;
2696
2697 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2698 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2699 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2700
2701 ctxt.seid = vsi->seid;
2702 ctxt.info = vsi->info;
2703 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2704 if (ret) {
2705 dev_info(&vsi->back->pdev->dev,
2706 "update vlan stripping failed, err %s aq_err %s\n",
2707 i40e_stat_str(&vsi->back->hw, ret),
2708 i40e_aq_str(&vsi->back->hw,
2709 vsi->back->hw.aq.asq_last_status));
2710 }
2711}
2712
2713
2714
2715
2716
2717void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2718{
2719 struct i40e_vsi_context ctxt;
2720 i40e_status ret;
2721
2722
2723 if (vsi->info.pvid)
2724 return;
2725
2726 if ((vsi->info.valid_sections &
2727 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2728 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2729 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2730 return;
2731
2732 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2733 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2734 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2735
2736 ctxt.seid = vsi->seid;
2737 ctxt.info = vsi->info;
2738 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2739 if (ret) {
2740 dev_info(&vsi->back->pdev->dev,
2741 "update vlan stripping failed, err %s aq_err %s\n",
2742 i40e_stat_str(&vsi->back->hw, ret),
2743 i40e_aq_str(&vsi->back->hw,
2744 vsi->back->hw.aq.asq_last_status));
2745 }
2746}
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2762{
2763 struct i40e_mac_filter *f, *add_f;
2764 struct hlist_node *h;
2765 int bkt;
2766
2767 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2768 if (f->state == I40E_FILTER_REMOVE)
2769 continue;
2770 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2771 if (!add_f) {
2772 dev_info(&vsi->back->pdev->dev,
2773 "Could not add vlan filter %d for %pM\n",
2774 vid, f->macaddr);
2775 return -ENOMEM;
2776 }
2777 }
2778
2779 return 0;
2780}
2781
2782
2783
2784
2785
2786
2787int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2788{
2789 int err;
2790
2791 if (vsi->info.pvid)
2792 return -EINVAL;
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802 if (!vid)
2803 return 0;
2804
2805
2806 spin_lock_bh(&vsi->mac_filter_hash_lock);
2807 err = i40e_add_vlan_all_mac(vsi, vid);
2808 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2809 if (err)
2810 return err;
2811
2812
2813
2814
2815 i40e_service_event_schedule(vsi->back);
2816 return 0;
2817}
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2833{
2834 struct i40e_mac_filter *f;
2835 struct hlist_node *h;
2836 int bkt;
2837
2838 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2839 if (f->vlan == vid)
2840 __i40e_del_filter(vsi, f);
2841 }
2842}
2843
2844
2845
2846
2847
2848
2849void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2850{
2851 if (!vid || vsi->info.pvid)
2852 return;
2853
2854 spin_lock_bh(&vsi->mac_filter_hash_lock);
2855 i40e_rm_vlan_all_mac(vsi, vid);
2856 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2857
2858
2859
2860
2861 i40e_service_event_schedule(vsi->back);
2862}
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2873 __always_unused __be16 proto, u16 vid)
2874{
2875 struct i40e_netdev_priv *np = netdev_priv(netdev);
2876 struct i40e_vsi *vsi = np->vsi;
2877 int ret = 0;
2878
2879 if (vid >= VLAN_N_VID)
2880 return -EINVAL;
2881
2882 ret = i40e_vsi_add_vlan(vsi, vid);
2883 if (!ret)
2884 set_bit(vid, vsi->active_vlans);
2885
2886 return ret;
2887}
2888
2889
2890
2891
2892
2893
2894
2895static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2896 __always_unused __be16 proto, u16 vid)
2897{
2898 struct i40e_netdev_priv *np = netdev_priv(netdev);
2899 struct i40e_vsi *vsi = np->vsi;
2900
2901 if (vid >= VLAN_N_VID)
2902 return;
2903 set_bit(vid, vsi->active_vlans);
2904}
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2915 __always_unused __be16 proto, u16 vid)
2916{
2917 struct i40e_netdev_priv *np = netdev_priv(netdev);
2918 struct i40e_vsi *vsi = np->vsi;
2919
2920
2921
2922
2923
2924 i40e_vsi_kill_vlan(vsi, vid);
2925
2926 clear_bit(vid, vsi->active_vlans);
2927
2928 return 0;
2929}
2930
2931
2932
2933
2934
2935static void i40e_restore_vlan(struct i40e_vsi *vsi)
2936{
2937 u16 vid;
2938
2939 if (!vsi->netdev)
2940 return;
2941
2942 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2943 i40e_vlan_stripping_enable(vsi);
2944 else
2945 i40e_vlan_stripping_disable(vsi);
2946
2947 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2948 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2949 vid);
2950}
2951
2952
2953
2954
2955
2956
2957int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2958{
2959 struct i40e_vsi_context ctxt;
2960 i40e_status ret;
2961
2962 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2963 vsi->info.pvid = cpu_to_le16(vid);
2964 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2965 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2966 I40E_AQ_VSI_PVLAN_EMOD_STR;
2967
2968 ctxt.seid = vsi->seid;
2969 ctxt.info = vsi->info;
2970 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2971 if (ret) {
2972 dev_info(&vsi->back->pdev->dev,
2973 "add pvid failed, err %s aq_err %s\n",
2974 i40e_stat_str(&vsi->back->hw, ret),
2975 i40e_aq_str(&vsi->back->hw,
2976 vsi->back->hw.aq.asq_last_status));
2977 return -ENOENT;
2978 }
2979
2980 return 0;
2981}
2982
2983
2984
2985
2986
2987
2988
2989void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2990{
2991 vsi->info.pvid = 0;
2992
2993 i40e_vlan_stripping_disable(vsi);
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3007{
3008 int i, err = 0;
3009
3010 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3011 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3012
3013 if (!i40e_enabled_xdp_vsi(vsi))
3014 return err;
3015
3016 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3017 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3018
3019 return err;
3020}
3021
3022
3023
3024
3025
3026
3027
3028static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3029{
3030 int i;
3031
3032 if (vsi->tx_rings) {
3033 for (i = 0; i < vsi->num_queue_pairs; i++)
3034 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3035 i40e_free_tx_resources(vsi->tx_rings[i]);
3036 }
3037
3038 if (vsi->xdp_rings) {
3039 for (i = 0; i < vsi->num_queue_pairs; i++)
3040 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3041 i40e_free_tx_resources(vsi->xdp_rings[i]);
3042 }
3043}
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3056{
3057 int i, err = 0;
3058
3059 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3060 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3061 return err;
3062}
3063
3064
3065
3066
3067
3068
3069
3070static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3071{
3072 int i;
3073
3074 if (!vsi->rx_rings)
3075 return;
3076
3077 for (i = 0; i < vsi->num_queue_pairs; i++)
3078 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3079 i40e_free_rx_resources(vsi->rx_rings[i]);
3080}
3081
3082
3083
3084
3085
3086
3087
3088
3089static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3090{
3091 int cpu;
3092
3093 if (!ring->q_vector || !ring->netdev || ring->ch)
3094 return;
3095
3096
3097 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3098 return;
3099
3100 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3101 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3102 ring->queue_index);
3103}
3104
3105
3106
3107
3108
3109
3110
3111static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3112{
3113 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3114 int qid = ring->queue_index;
3115
3116 if (ring_is_xdp(ring))
3117 qid -= ring->vsi->alloc_queue_pairs;
3118
3119 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3120 return NULL;
3121
3122 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3123}
3124
3125
3126
3127
3128
3129
3130
3131static int i40e_configure_tx_ring(struct i40e_ring *ring)
3132{
3133 struct i40e_vsi *vsi = ring->vsi;
3134 u16 pf_q = vsi->base_queue + ring->queue_index;
3135 struct i40e_hw *hw = &vsi->back->hw;
3136 struct i40e_hmc_obj_txq tx_ctx;
3137 i40e_status err = 0;
3138 u32 qtx_ctl = 0;
3139
3140 if (ring_is_xdp(ring))
3141 ring->xsk_umem = i40e_xsk_umem(ring);
3142
3143
3144 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3145 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3146 ring->atr_count = 0;
3147 } else {
3148 ring->atr_sample_rate = 0;
3149 }
3150
3151
3152 i40e_config_xps_tx_ring(ring);
3153
3154
3155 memset(&tx_ctx, 0, sizeof(tx_ctx));
3156
3157 tx_ctx.new_context = 1;
3158 tx_ctx.base = (ring->dma / 128);
3159 tx_ctx.qlen = ring->count;
3160 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3161 I40E_FLAG_FD_ATR_ENABLED));
3162 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3163
3164 if (vsi->type != I40E_VSI_FDIR)
3165 tx_ctx.head_wb_ena = 1;
3166 tx_ctx.head_wb_addr = ring->dma +
3167 (ring->count * sizeof(struct i40e_tx_desc));
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180 if (ring->ch)
3181 tx_ctx.rdylist =
3182 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3183
3184 else
3185 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3186
3187 tx_ctx.rdylist_act = 0;
3188
3189
3190 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3191 if (err) {
3192 dev_info(&vsi->back->pdev->dev,
3193 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3194 ring->queue_index, pf_q, err);
3195 return -ENOMEM;
3196 }
3197
3198
3199 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3200 if (err) {
3201 dev_info(&vsi->back->pdev->dev,
3202 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3203 ring->queue_index, pf_q, err);
3204 return -ENOMEM;
3205 }
3206
3207
3208 if (ring->ch) {
3209 if (ring->ch->type == I40E_VSI_VMDQ2)
3210 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3211 else
3212 return -EINVAL;
3213
3214 qtx_ctl |= (ring->ch->vsi_number <<
3215 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3216 I40E_QTX_CTL_VFVM_INDX_MASK;
3217 } else {
3218 if (vsi->type == I40E_VSI_VMDQ2) {
3219 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3220 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3221 I40E_QTX_CTL_VFVM_INDX_MASK;
3222 } else {
3223 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3224 }
3225 }
3226
3227 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3228 I40E_QTX_CTL_PF_INDX_MASK);
3229 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3230 i40e_flush(hw);
3231
3232
3233 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3234
3235 return 0;
3236}
3237
3238
3239
3240
3241
3242
3243
3244static int i40e_configure_rx_ring(struct i40e_ring *ring)
3245{
3246 struct i40e_vsi *vsi = ring->vsi;
3247 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3248 u16 pf_q = vsi->base_queue + ring->queue_index;
3249 struct i40e_hw *hw = &vsi->back->hw;
3250 struct i40e_hmc_obj_rxq rx_ctx;
3251 i40e_status err = 0;
3252 bool ok;
3253 int ret;
3254
3255 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3256
3257
3258 memset(&rx_ctx, 0, sizeof(rx_ctx));
3259
3260 if (ring->vsi->type == I40E_VSI_MAIN)
3261 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3262
3263 ring->xsk_umem = i40e_xsk_umem(ring);
3264 if (ring->xsk_umem) {
3265 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3266 XDP_PACKET_HEADROOM;
3267
3268
3269
3270
3271 chain_len = 1;
3272 ring->zca.free = i40e_zca_free;
3273 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3274 MEM_TYPE_ZERO_COPY,
3275 &ring->zca);
3276 if (ret)
3277 return ret;
3278 dev_info(&vsi->back->pdev->dev,
3279 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3280 ring->queue_index);
3281
3282 } else {
3283 ring->rx_buf_len = vsi->rx_buf_len;
3284 if (ring->vsi->type == I40E_VSI_MAIN) {
3285 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3286 MEM_TYPE_PAGE_SHARED,
3287 NULL);
3288 if (ret)
3289 return ret;
3290 }
3291 }
3292
3293 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3294 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3295
3296 rx_ctx.base = (ring->dma / 128);
3297 rx_ctx.qlen = ring->count;
3298
3299
3300 rx_ctx.dsize = 1;
3301
3302
3303
3304
3305 rx_ctx.hsplit_0 = 0;
3306
3307 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3308 if (hw->revision_id == 0)
3309 rx_ctx.lrxqthresh = 0;
3310 else
3311 rx_ctx.lrxqthresh = 1;
3312 rx_ctx.crcstrip = 1;
3313 rx_ctx.l2tsel = 1;
3314
3315 rx_ctx.showiv = 0;
3316
3317 rx_ctx.prefena = 1;
3318
3319
3320 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3321 if (err) {
3322 dev_info(&vsi->back->pdev->dev,
3323 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3324 ring->queue_index, pf_q, err);
3325 return -ENOMEM;
3326 }
3327
3328
3329 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3330 if (err) {
3331 dev_info(&vsi->back->pdev->dev,
3332 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3333 ring->queue_index, pf_q, err);
3334 return -ENOMEM;
3335 }
3336
3337
3338 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3339 clear_ring_build_skb_enabled(ring);
3340 else
3341 set_ring_build_skb_enabled(ring);
3342
3343
3344 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3345 writel(0, ring->tail);
3346
3347 ok = ring->xsk_umem ?
3348 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3349 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3350 if (!ok) {
3351
3352
3353
3354 dev_info(&vsi->back->pdev->dev,
3355 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3356 ring->xsk_umem ? "UMEM enabled " : "",
3357 ring->queue_index, pf_q);
3358 }
3359
3360 return 0;
3361}
3362
3363
3364
3365
3366
3367
3368
3369static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3370{
3371 int err = 0;
3372 u16 i;
3373
3374 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3375 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3376
3377 if (err || !i40e_enabled_xdp_vsi(vsi))
3378 return err;
3379
3380 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3381 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3382
3383 return err;
3384}
3385
3386
3387
3388
3389
3390
3391
3392static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3393{
3394 int err = 0;
3395 u16 i;
3396
3397 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3398 vsi->max_frame = I40E_MAX_RXBUFFER;
3399 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3400#if (PAGE_SIZE < 8192)
3401 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3402 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3403 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3404 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3405#endif
3406 } else {
3407 vsi->max_frame = I40E_MAX_RXBUFFER;
3408 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3409 I40E_RXBUFFER_2048;
3410 }
3411
3412
3413 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3414 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3415
3416 return err;
3417}
3418
3419
3420
3421
3422
3423static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3424{
3425 struct i40e_ring *tx_ring, *rx_ring;
3426 u16 qoffset, qcount;
3427 int i, n;
3428
3429 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3430
3431 for (i = 0; i < vsi->num_queue_pairs; i++) {
3432 rx_ring = vsi->rx_rings[i];
3433 tx_ring = vsi->tx_rings[i];
3434 rx_ring->dcb_tc = 0;
3435 tx_ring->dcb_tc = 0;
3436 }
3437 return;
3438 }
3439
3440 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3441 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3442 continue;
3443
3444 qoffset = vsi->tc_config.tc_info[n].qoffset;
3445 qcount = vsi->tc_config.tc_info[n].qcount;
3446 for (i = qoffset; i < (qoffset + qcount); i++) {
3447 rx_ring = vsi->rx_rings[i];
3448 tx_ring = vsi->tx_rings[i];
3449 rx_ring->dcb_tc = n;
3450 tx_ring->dcb_tc = n;
3451 }
3452 }
3453}
3454
3455
3456
3457
3458
3459static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3460{
3461 if (vsi->netdev)
3462 i40e_set_rx_mode(vsi->netdev);
3463}
3464
3465
3466
3467
3468
3469
3470
3471
3472static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3473{
3474 struct i40e_fdir_filter *filter;
3475 struct i40e_pf *pf = vsi->back;
3476 struct hlist_node *node;
3477
3478 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3479 return;
3480
3481
3482 pf->fd_tcp4_filter_cnt = 0;
3483 pf->fd_udp4_filter_cnt = 0;
3484 pf->fd_sctp4_filter_cnt = 0;
3485 pf->fd_ip4_filter_cnt = 0;
3486
3487 hlist_for_each_entry_safe(filter, node,
3488 &pf->fdir_filter_list, fdir_node) {
3489 i40e_add_del_fdir(vsi, filter, true);
3490 }
3491}
3492
3493
3494
3495
3496
3497static int i40e_vsi_configure(struct i40e_vsi *vsi)
3498{
3499 int err;
3500
3501 i40e_set_vsi_rx_mode(vsi);
3502 i40e_restore_vlan(vsi);
3503 i40e_vsi_config_dcb_rings(vsi);
3504 err = i40e_vsi_configure_tx(vsi);
3505 if (!err)
3506 err = i40e_vsi_configure_rx(vsi);
3507
3508 return err;
3509}
3510
3511
3512
3513
3514
3515static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3516{
3517 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3518 struct i40e_pf *pf = vsi->back;
3519 struct i40e_hw *hw = &pf->hw;
3520 u16 vector;
3521 int i, q;
3522 u32 qp;
3523
3524
3525
3526
3527
3528 qp = vsi->base_queue;
3529 vector = vsi->base_vector;
3530 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3531 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3532
3533 q_vector->rx.next_update = jiffies + 1;
3534 q_vector->rx.target_itr =
3535 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3536 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3537 q_vector->rx.target_itr);
3538 q_vector->rx.current_itr = q_vector->rx.target_itr;
3539
3540 q_vector->tx.next_update = jiffies + 1;
3541 q_vector->tx.target_itr =
3542 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3543 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3544 q_vector->tx.target_itr);
3545 q_vector->tx.current_itr = q_vector->tx.target_itr;
3546
3547 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3548 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3549
3550
3551 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3552 for (q = 0; q < q_vector->num_ringpairs; q++) {
3553 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3554 u32 val;
3555
3556 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3557 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3558 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3559 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3560 (I40E_QUEUE_TYPE_TX <<
3561 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3562
3563 wr32(hw, I40E_QINT_RQCTL(qp), val);
3564
3565 if (has_xdp) {
3566 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3567 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3568 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3569 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3570 (I40E_QUEUE_TYPE_TX <<
3571 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3572
3573 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3574 }
3575
3576 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3577 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3578 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3579 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3580 (I40E_QUEUE_TYPE_RX <<
3581 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3582
3583
3584 if (q == (q_vector->num_ringpairs - 1))
3585 val |= (I40E_QUEUE_END_OF_LIST <<
3586 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3587
3588 wr32(hw, I40E_QINT_TQCTL(qp), val);
3589 qp++;
3590 }
3591 }
3592
3593 i40e_flush(hw);
3594}
3595
3596
3597
3598
3599
3600static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3601{
3602 struct i40e_hw *hw = &pf->hw;
3603 u32 val;
3604
3605
3606 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3607 rd32(hw, I40E_PFINT_ICR0);
3608
3609 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3610 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3611 I40E_PFINT_ICR0_ENA_GRST_MASK |
3612 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3613 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3614 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3615 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3616 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3617
3618 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3619 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3620
3621 if (pf->flags & I40E_FLAG_PTP)
3622 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3623
3624 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3625
3626
3627 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3628 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3629
3630
3631 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3632}
3633
3634
3635
3636
3637
3638static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3639{
3640 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3641 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3642 struct i40e_pf *pf = vsi->back;
3643 struct i40e_hw *hw = &pf->hw;
3644 u32 val;
3645
3646
3647 q_vector->rx.next_update = jiffies + 1;
3648 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3649 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
3650 q_vector->rx.current_itr = q_vector->rx.target_itr;
3651 q_vector->tx.next_update = jiffies + 1;
3652 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3653 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
3654 q_vector->tx.current_itr = q_vector->tx.target_itr;
3655
3656 i40e_enable_misc_int_causes(pf);
3657
3658
3659 wr32(hw, I40E_PFINT_LNKLST0, 0);
3660
3661
3662 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3663 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3664 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3665 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3666
3667 wr32(hw, I40E_QINT_RQCTL(0), val);
3668
3669 if (i40e_enabled_xdp_vsi(vsi)) {
3670 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3671 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3672 (I40E_QUEUE_TYPE_TX
3673 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3674
3675 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3676 }
3677
3678 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3679 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3680 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3681
3682 wr32(hw, I40E_QINT_TQCTL(0), val);
3683 i40e_flush(hw);
3684}
3685
3686
3687
3688
3689
3690void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3691{
3692 struct i40e_hw *hw = &pf->hw;
3693
3694 wr32(hw, I40E_PFINT_DYN_CTL0,
3695 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3696 i40e_flush(hw);
3697}
3698
3699
3700
3701
3702
3703void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3704{
3705 struct i40e_hw *hw = &pf->hw;
3706 u32 val;
3707
3708 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3709 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3710 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3711
3712 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3713 i40e_flush(hw);
3714}
3715
3716
3717
3718
3719
3720
3721static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3722{
3723 struct i40e_q_vector *q_vector = data;
3724
3725 if (!q_vector->tx.ring && !q_vector->rx.ring)
3726 return IRQ_HANDLED;
3727
3728 napi_schedule_irqoff(&q_vector->napi);
3729
3730 return IRQ_HANDLED;
3731}
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3742 const cpumask_t *mask)
3743{
3744 struct i40e_q_vector *q_vector =
3745 container_of(notify, struct i40e_q_vector, affinity_notify);
3746
3747 cpumask_copy(&q_vector->affinity_mask, mask);
3748}
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758static void i40e_irq_affinity_release(struct kref *ref) {}
3759
3760
3761
3762
3763
3764
3765
3766
3767static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3768{
3769 int q_vectors = vsi->num_q_vectors;
3770 struct i40e_pf *pf = vsi->back;
3771 int base = vsi->base_vector;
3772 int rx_int_idx = 0;
3773 int tx_int_idx = 0;
3774 int vector, err;
3775 int irq_num;
3776 int cpu;
3777
3778 for (vector = 0; vector < q_vectors; vector++) {
3779 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3780
3781 irq_num = pf->msix_entries[base + vector].vector;
3782
3783 if (q_vector->tx.ring && q_vector->rx.ring) {
3784 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3785 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3786 tx_int_idx++;
3787 } else if (q_vector->rx.ring) {
3788 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3789 "%s-%s-%d", basename, "rx", rx_int_idx++);
3790 } else if (q_vector->tx.ring) {
3791 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3792 "%s-%s-%d", basename, "tx", tx_int_idx++);
3793 } else {
3794
3795 continue;
3796 }
3797 err = request_irq(irq_num,
3798 vsi->irq_handler,
3799 0,
3800 q_vector->name,
3801 q_vector);
3802 if (err) {
3803 dev_info(&pf->pdev->dev,
3804 "MSIX request_irq failed, error: %d\n", err);
3805 goto free_queue_irqs;
3806 }
3807
3808
3809 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3810 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3811 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3812
3813
3814
3815
3816
3817
3818 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3819 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3820 }
3821
3822 vsi->irqs_ready = true;
3823 return 0;
3824
3825free_queue_irqs:
3826 while (vector) {
3827 vector--;
3828 irq_num = pf->msix_entries[base + vector].vector;
3829 irq_set_affinity_notifier(irq_num, NULL);
3830 irq_set_affinity_hint(irq_num, NULL);
3831 free_irq(irq_num, &vsi->q_vectors[vector]);
3832 }
3833 return err;
3834}
3835
3836
3837
3838
3839
3840static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3841{
3842 struct i40e_pf *pf = vsi->back;
3843 struct i40e_hw *hw = &pf->hw;
3844 int base = vsi->base_vector;
3845 int i;
3846
3847
3848 for (i = 0; i < vsi->num_queue_pairs; i++) {
3849 u32 val;
3850
3851 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3852 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3853 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3854
3855 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3856 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3857 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3858
3859 if (!i40e_enabled_xdp_vsi(vsi))
3860 continue;
3861 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3862 }
3863
3864
3865 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3866 for (i = vsi->base_vector;
3867 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3868 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3869
3870 i40e_flush(hw);
3871 for (i = 0; i < vsi->num_q_vectors; i++)
3872 synchronize_irq(pf->msix_entries[i + base].vector);
3873 } else {
3874
3875 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3876 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3877 i40e_flush(hw);
3878 synchronize_irq(pf->pdev->irq);
3879 }
3880}
3881
3882
3883
3884
3885
3886static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3887{
3888 struct i40e_pf *pf = vsi->back;
3889 int i;
3890
3891 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3892 for (i = 0; i < vsi->num_q_vectors; i++)
3893 i40e_irq_dynamic_enable(vsi, i);
3894 } else {
3895 i40e_irq_dynamic_enable_icr0(pf);
3896 }
3897
3898 i40e_flush(&pf->hw);
3899 return 0;
3900}
3901
3902
3903
3904
3905
3906static void i40e_free_misc_vector(struct i40e_pf *pf)
3907{
3908
3909 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3910 i40e_flush(&pf->hw);
3911
3912 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3913 synchronize_irq(pf->msix_entries[0].vector);
3914 free_irq(pf->msix_entries[0].vector, pf);
3915 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3916 }
3917}
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928static irqreturn_t i40e_intr(int irq, void *data)
3929{
3930 struct i40e_pf *pf = (struct i40e_pf *)data;
3931 struct i40e_hw *hw = &pf->hw;
3932 irqreturn_t ret = IRQ_NONE;
3933 u32 icr0, icr0_remaining;
3934 u32 val, ena_mask;
3935
3936 icr0 = rd32(hw, I40E_PFINT_ICR0);
3937 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3938
3939
3940 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3941 goto enable_intr;
3942
3943
3944 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3945 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3946 pf->sw_int_count++;
3947
3948 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3949 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3950 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3951 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3952 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3953 }
3954
3955
3956 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3957 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3958 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3959
3960
3961
3962
3963
3964
3965
3966 if (!test_bit(__I40E_DOWN, pf->state))
3967 napi_schedule_irqoff(&q_vector->napi);
3968 }
3969
3970 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3971 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3972 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3973 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3974 }
3975
3976 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3977 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3978 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3979 }
3980
3981 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3982 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3983 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3984 }
3985
3986 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3987 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3988 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3989 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3990 val = rd32(hw, I40E_GLGEN_RSTAT);
3991 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3992 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3993 if (val == I40E_RESET_CORER) {
3994 pf->corer_count++;
3995 } else if (val == I40E_RESET_GLOBR) {
3996 pf->globr_count++;
3997 } else if (val == I40E_RESET_EMPR) {
3998 pf->empr_count++;
3999 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4000 }
4001 }
4002
4003 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4004 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4005 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4006 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4007 rd32(hw, I40E_PFHMC_ERRORINFO),
4008 rd32(hw, I40E_PFHMC_ERRORDATA));
4009 }
4010
4011 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4012 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4013
4014 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4015 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4016 i40e_ptp_tx_hwtstamp(pf);
4017 }
4018 }
4019
4020
4021
4022
4023
4024 icr0_remaining = icr0 & ena_mask;
4025 if (icr0_remaining) {
4026 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4027 icr0_remaining);
4028 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4029 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4030 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4031 dev_info(&pf->pdev->dev, "device will be reset\n");
4032 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4033 i40e_service_event_schedule(pf);
4034 }
4035 ena_mask &= ~icr0_remaining;
4036 }
4037 ret = IRQ_HANDLED;
4038
4039enable_intr:
4040
4041 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4042 if (!test_bit(__I40E_DOWN, pf->state) ||
4043 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4044 i40e_service_event_schedule(pf);
4045 i40e_irq_dynamic_enable_icr0(pf);
4046 }
4047
4048 return ret;
4049}
4050
4051
4052
4053
4054
4055
4056
4057
4058static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4059{
4060 struct i40e_vsi *vsi = tx_ring->vsi;
4061 u16 i = tx_ring->next_to_clean;
4062 struct i40e_tx_buffer *tx_buf;
4063 struct i40e_tx_desc *tx_desc;
4064
4065 tx_buf = &tx_ring->tx_bi[i];
4066 tx_desc = I40E_TX_DESC(tx_ring, i);
4067 i -= tx_ring->count;
4068
4069 do {
4070 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4071
4072
4073 if (!eop_desc)
4074 break;
4075
4076
4077 smp_rmb();
4078
4079
4080 if (!(eop_desc->cmd_type_offset_bsz &
4081 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4082 break;
4083
4084
4085 tx_buf->next_to_watch = NULL;
4086
4087 tx_desc->buffer_addr = 0;
4088 tx_desc->cmd_type_offset_bsz = 0;
4089
4090 tx_buf++;
4091 tx_desc++;
4092 i++;
4093 if (unlikely(!i)) {
4094 i -= tx_ring->count;
4095 tx_buf = tx_ring->tx_bi;
4096 tx_desc = I40E_TX_DESC(tx_ring, 0);
4097 }
4098
4099 dma_unmap_single(tx_ring->dev,
4100 dma_unmap_addr(tx_buf, dma),
4101 dma_unmap_len(tx_buf, len),
4102 DMA_TO_DEVICE);
4103 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4104 kfree(tx_buf->raw_buf);
4105
4106 tx_buf->raw_buf = NULL;
4107 tx_buf->tx_flags = 0;
4108 tx_buf->next_to_watch = NULL;
4109 dma_unmap_len_set(tx_buf, len, 0);
4110 tx_desc->buffer_addr = 0;
4111 tx_desc->cmd_type_offset_bsz = 0;
4112
4113
4114 tx_buf++;
4115 tx_desc++;
4116 i++;
4117 if (unlikely(!i)) {
4118 i -= tx_ring->count;
4119 tx_buf = tx_ring->tx_bi;
4120 tx_desc = I40E_TX_DESC(tx_ring, 0);
4121 }
4122
4123
4124 budget--;
4125 } while (likely(budget));
4126
4127 i += tx_ring->count;
4128 tx_ring->next_to_clean = i;
4129
4130 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4131 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4132
4133 return budget > 0;
4134}
4135
4136
4137
4138
4139
4140
4141static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4142{
4143 struct i40e_q_vector *q_vector = data;
4144 struct i40e_vsi *vsi;
4145
4146 if (!q_vector->tx.ring)
4147 return IRQ_HANDLED;
4148
4149 vsi = q_vector->tx.ring->vsi;
4150 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4151
4152 return IRQ_HANDLED;
4153}
4154
4155
4156
4157
4158
4159
4160
4161static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4162{
4163 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4164 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4165 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4166
4167 tx_ring->q_vector = q_vector;
4168 tx_ring->next = q_vector->tx.ring;
4169 q_vector->tx.ring = tx_ring;
4170 q_vector->tx.count++;
4171
4172
4173 if (i40e_enabled_xdp_vsi(vsi)) {
4174 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4175
4176 xdp_ring->q_vector = q_vector;
4177 xdp_ring->next = q_vector->tx.ring;
4178 q_vector->tx.ring = xdp_ring;
4179 q_vector->tx.count++;
4180 }
4181
4182 rx_ring->q_vector = q_vector;
4183 rx_ring->next = q_vector->rx.ring;
4184 q_vector->rx.ring = rx_ring;
4185 q_vector->rx.count++;
4186}
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4198{
4199 int qp_remaining = vsi->num_queue_pairs;
4200 int q_vectors = vsi->num_q_vectors;
4201 int num_ringpairs;
4202 int v_start = 0;
4203 int qp_idx = 0;
4204
4205
4206
4207
4208
4209
4210
4211
4212 for (; v_start < q_vectors; v_start++) {
4213 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4214
4215 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4216
4217 q_vector->num_ringpairs = num_ringpairs;
4218 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4219
4220 q_vector->rx.count = 0;
4221 q_vector->tx.count = 0;
4222 q_vector->rx.ring = NULL;
4223 q_vector->tx.ring = NULL;
4224
4225 while (num_ringpairs--) {
4226 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4227 qp_idx++;
4228 qp_remaining--;
4229 }
4230 }
4231}
4232
4233
4234
4235
4236
4237
4238static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4239{
4240 struct i40e_pf *pf = vsi->back;
4241 int err;
4242
4243 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4244 err = i40e_vsi_request_irq_msix(vsi, basename);
4245 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4246 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4247 pf->int_name, pf);
4248 else
4249 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4250 pf->int_name, pf);
4251
4252 if (err)
4253 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4254
4255 return err;
4256}
4257
4258#ifdef CONFIG_NET_POLL_CONTROLLER
4259
4260
4261
4262
4263
4264
4265
4266static void i40e_netpoll(struct net_device *netdev)
4267{
4268 struct i40e_netdev_priv *np = netdev_priv(netdev);
4269 struct i40e_vsi *vsi = np->vsi;
4270 struct i40e_pf *pf = vsi->back;
4271 int i;
4272
4273
4274 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4275 return;
4276
4277 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4278 for (i = 0; i < vsi->num_q_vectors; i++)
4279 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4280 } else {
4281 i40e_intr(pf->pdev->irq, netdev);
4282 }
4283}
4284#endif
4285
4286#define I40E_QTX_ENA_WAIT_COUNT 50
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4300{
4301 int i;
4302 u32 tx_reg;
4303
4304 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4305 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4306 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4307 break;
4308
4309 usleep_range(10, 20);
4310 }
4311 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4312 return -ETIMEDOUT;
4313
4314 return 0;
4315}
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4328{
4329 struct i40e_hw *hw = &pf->hw;
4330 u32 tx_reg;
4331 int i;
4332
4333
4334 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4335 if (!enable)
4336 usleep_range(10, 20);
4337
4338 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4339 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4340 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4341 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4342 break;
4343 usleep_range(1000, 2000);
4344 }
4345
4346
4347 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4348 return;
4349
4350
4351 if (enable) {
4352 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4353 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4354 } else {
4355 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4356 }
4357
4358 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4359}
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4370 bool is_xdp, bool enable)
4371{
4372 int ret;
4373
4374 i40e_control_tx_q(pf, pf_q, enable);
4375
4376
4377 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4378 if (ret) {
4379 dev_info(&pf->pdev->dev,
4380 "VSI seid %d %sTx ring %d %sable timeout\n",
4381 seid, (is_xdp ? "XDP " : ""), pf_q,
4382 (enable ? "en" : "dis"));
4383 }
4384
4385 return ret;
4386}
4387
4388
4389
4390
4391
4392
4393static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4394{
4395 struct i40e_pf *pf = vsi->back;
4396 int i, pf_q, ret = 0;
4397
4398 pf_q = vsi->base_queue;
4399 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4400 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4401 pf_q,
4402 false , enable);
4403 if (ret)
4404 break;
4405
4406 if (!i40e_enabled_xdp_vsi(vsi))
4407 continue;
4408
4409 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4410 pf_q + vsi->alloc_queue_pairs,
4411 true , enable);
4412 if (ret)
4413 break;
4414 }
4415 return ret;
4416}
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4430{
4431 int i;
4432 u32 rx_reg;
4433
4434 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4435 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4436 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4437 break;
4438
4439 usleep_range(10, 20);
4440 }
4441 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4442 return -ETIMEDOUT;
4443
4444 return 0;
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4458{
4459 struct i40e_hw *hw = &pf->hw;
4460 u32 rx_reg;
4461 int i;
4462
4463 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4464 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4465 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4466 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4467 break;
4468 usleep_range(1000, 2000);
4469 }
4470
4471
4472 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4473 return;
4474
4475
4476 if (enable)
4477 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4478 else
4479 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4480
4481 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4482}
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4495{
4496 int ret = 0;
4497
4498 i40e_control_rx_q(pf, pf_q, enable);
4499
4500
4501 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4502 if (ret)
4503 return ret;
4504
4505 return ret;
4506}
4507
4508
4509
4510
4511
4512
4513static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4514{
4515 struct i40e_pf *pf = vsi->back;
4516 int i, pf_q, ret = 0;
4517
4518 pf_q = vsi->base_queue;
4519 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4520 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4521 if (ret) {
4522 dev_info(&pf->pdev->dev,
4523 "VSI seid %d Rx ring %d %sable timeout\n",
4524 vsi->seid, pf_q, (enable ? "en" : "dis"));
4525 break;
4526 }
4527 }
4528
4529
4530
4531
4532 if (!enable)
4533 mdelay(50);
4534
4535 return ret;
4536}
4537
4538
4539
4540
4541
4542int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4543{
4544 int ret = 0;
4545
4546
4547 ret = i40e_vsi_control_rx(vsi, true);
4548 if (ret)
4549 return ret;
4550 ret = i40e_vsi_control_tx(vsi, true);
4551
4552 return ret;
4553}
4554
4555
4556
4557
4558
4559void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4560{
4561
4562 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4563 return i40e_vsi_stop_rings_no_wait(vsi);
4564
4565
4566
4567
4568 i40e_vsi_control_tx(vsi, false);
4569 i40e_vsi_control_rx(vsi, false);
4570}
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4584{
4585 struct i40e_pf *pf = vsi->back;
4586 int i, pf_q;
4587
4588 pf_q = vsi->base_queue;
4589 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4590 i40e_control_tx_q(pf, pf_q, false);
4591 i40e_control_rx_q(pf, pf_q, false);
4592 }
4593}
4594
4595
4596
4597
4598
4599static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4600{
4601 struct i40e_pf *pf = vsi->back;
4602 struct i40e_hw *hw = &pf->hw;
4603 int base = vsi->base_vector;
4604 u32 val, qp;
4605 int i;
4606
4607 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4608 if (!vsi->q_vectors)
4609 return;
4610
4611 if (!vsi->irqs_ready)
4612 return;
4613
4614 vsi->irqs_ready = false;
4615 for (i = 0; i < vsi->num_q_vectors; i++) {
4616 int irq_num;
4617 u16 vector;
4618
4619 vector = i + base;
4620 irq_num = pf->msix_entries[vector].vector;
4621
4622
4623 if (!vsi->q_vectors[i] ||
4624 !vsi->q_vectors[i]->num_ringpairs)
4625 continue;
4626
4627
4628 irq_set_affinity_notifier(irq_num, NULL);
4629
4630 irq_set_affinity_hint(irq_num, NULL);
4631 synchronize_irq(irq_num);
4632 free_irq(irq_num, vsi->q_vectors[i]);
4633
4634
4635
4636
4637
4638
4639
4640
4641 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4642 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4643 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4644 val |= I40E_QUEUE_END_OF_LIST
4645 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4646 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4647
4648 while (qp != I40E_QUEUE_END_OF_LIST) {
4649 u32 next;
4650
4651 val = rd32(hw, I40E_QINT_RQCTL(qp));
4652
4653 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4654 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4655 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4656 I40E_QINT_RQCTL_INTEVENT_MASK);
4657
4658 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4659 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4660
4661 wr32(hw, I40E_QINT_RQCTL(qp), val);
4662
4663 val = rd32(hw, I40E_QINT_TQCTL(qp));
4664
4665 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4666 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4667
4668 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4669 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4670 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4671 I40E_QINT_TQCTL_INTEVENT_MASK);
4672
4673 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4674 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4675
4676 wr32(hw, I40E_QINT_TQCTL(qp), val);
4677 qp = next;
4678 }
4679 }
4680 } else {
4681 free_irq(pf->pdev->irq, pf);
4682
4683 val = rd32(hw, I40E_PFINT_LNKLST0);
4684 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4685 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4686 val |= I40E_QUEUE_END_OF_LIST
4687 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4688 wr32(hw, I40E_PFINT_LNKLST0, val);
4689
4690 val = rd32(hw, I40E_QINT_RQCTL(qp));
4691 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4692 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4693 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4694 I40E_QINT_RQCTL_INTEVENT_MASK);
4695
4696 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4697 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4698
4699 wr32(hw, I40E_QINT_RQCTL(qp), val);
4700
4701 val = rd32(hw, I40E_QINT_TQCTL(qp));
4702
4703 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4704 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4705 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4706 I40E_QINT_TQCTL_INTEVENT_MASK);
4707
4708 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4709 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4710
4711 wr32(hw, I40E_QINT_TQCTL(qp), val);
4712 }
4713}
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4725{
4726 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4727 struct i40e_ring *ring;
4728
4729 if (!q_vector)
4730 return;
4731
4732
4733 i40e_for_each_ring(ring, q_vector->tx)
4734 ring->q_vector = NULL;
4735
4736 i40e_for_each_ring(ring, q_vector->rx)
4737 ring->q_vector = NULL;
4738
4739
4740 if (vsi->netdev)
4741 netif_napi_del(&q_vector->napi);
4742
4743 vsi->q_vectors[v_idx] = NULL;
4744
4745 kfree_rcu(q_vector, rcu);
4746}
4747
4748
4749
4750
4751
4752
4753
4754
4755static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4756{
4757 int v_idx;
4758
4759 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4760 i40e_free_q_vector(vsi, v_idx);
4761}
4762
4763
4764
4765
4766
4767static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4768{
4769
4770 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4771 pci_disable_msix(pf->pdev);
4772 kfree(pf->msix_entries);
4773 pf->msix_entries = NULL;
4774 kfree(pf->irq_pile);
4775 pf->irq_pile = NULL;
4776 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4777 pci_disable_msi(pf->pdev);
4778 }
4779 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4780}
4781
4782
4783
4784
4785
4786
4787
4788
4789static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4790{
4791 int i;
4792
4793 i40e_free_misc_vector(pf);
4794
4795 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4796 I40E_IWARP_IRQ_PILE_ID);
4797
4798 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4799 for (i = 0; i < pf->num_alloc_vsi; i++)
4800 if (pf->vsi[i])
4801 i40e_vsi_free_q_vectors(pf->vsi[i]);
4802 i40e_reset_interrupt_capability(pf);
4803}
4804
4805
4806
4807
4808
4809static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4810{
4811 int q_idx;
4812
4813 if (!vsi->netdev)
4814 return;
4815
4816 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4817 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4818
4819 if (q_vector->rx.ring || q_vector->tx.ring)
4820 napi_enable(&q_vector->napi);
4821 }
4822}
4823
4824
4825
4826
4827
4828static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4829{
4830 int q_idx;
4831
4832 if (!vsi->netdev)
4833 return;
4834
4835 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4836 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4837
4838 if (q_vector->rx.ring || q_vector->tx.ring)
4839 napi_disable(&q_vector->napi);
4840 }
4841}
4842
4843
4844
4845
4846
4847static void i40e_vsi_close(struct i40e_vsi *vsi)
4848{
4849 struct i40e_pf *pf = vsi->back;
4850 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4851 i40e_down(vsi);
4852 i40e_vsi_free_irq(vsi);
4853 i40e_vsi_free_tx_resources(vsi);
4854 i40e_vsi_free_rx_resources(vsi);
4855 vsi->current_netdev_flags = 0;
4856 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4857 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4858 set_bit(__I40E_CLIENT_RESET, pf->state);
4859}
4860
4861
4862
4863
4864
4865static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4866{
4867 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4868 return;
4869
4870 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4871 if (vsi->netdev && netif_running(vsi->netdev))
4872 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4873 else
4874 i40e_vsi_close(vsi);
4875}
4876
4877
4878
4879
4880
4881static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4882{
4883 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4884 return;
4885
4886 if (vsi->netdev && netif_running(vsi->netdev))
4887 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4888 else
4889 i40e_vsi_open(vsi);
4890}
4891
4892
4893
4894
4895
4896static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4897{
4898 int v;
4899
4900 for (v = 0; v < pf->num_alloc_vsi; v++) {
4901 if (pf->vsi[v])
4902 i40e_quiesce_vsi(pf->vsi[v]);
4903 }
4904}
4905
4906
4907
4908
4909
4910static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4911{
4912 int v;
4913
4914 for (v = 0; v < pf->num_alloc_vsi; v++) {
4915 if (pf->vsi[v])
4916 i40e_unquiesce_vsi(pf->vsi[v]);
4917 }
4918}
4919
4920
4921
4922
4923
4924
4925
4926int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4927{
4928 struct i40e_pf *pf = vsi->back;
4929 int i, pf_q, ret;
4930
4931 pf_q = vsi->base_queue;
4932 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4933
4934 ret = i40e_pf_txq_wait(pf, pf_q, false);
4935 if (ret) {
4936 dev_info(&pf->pdev->dev,
4937 "VSI seid %d Tx ring %d disable timeout\n",
4938 vsi->seid, pf_q);
4939 return ret;
4940 }
4941
4942 if (!i40e_enabled_xdp_vsi(vsi))
4943 goto wait_rx;
4944
4945
4946 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4947 false);
4948 if (ret) {
4949 dev_info(&pf->pdev->dev,
4950 "VSI seid %d XDP Tx ring %d disable timeout\n",
4951 vsi->seid, pf_q);
4952 return ret;
4953 }
4954wait_rx:
4955
4956 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4957 if (ret) {
4958 dev_info(&pf->pdev->dev,
4959 "VSI seid %d Rx ring %d disable timeout\n",
4960 vsi->seid, pf_q);
4961 return ret;
4962 }
4963 }
4964
4965 return 0;
4966}
4967
4968#ifdef CONFIG_I40E_DCB
4969
4970
4971
4972
4973
4974
4975
4976static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4977{
4978 int v, ret = 0;
4979
4980 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4981 if (pf->vsi[v]) {
4982 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4983 if (ret)
4984 break;
4985 }
4986 }
4987
4988 return ret;
4989}
4990
4991#endif
4992
4993
4994
4995
4996
4997
4998
4999
5000static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5001{
5002 struct i40e_dcb_app_priority_table app;
5003 struct i40e_hw *hw = &pf->hw;
5004 u8 enabled_tc = 1;
5005 u8 tc, i;
5006
5007 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5008
5009 for (i = 0; i < dcbcfg->numapps; i++) {
5010 app = dcbcfg->app[i];
5011 if (app.selector == I40E_APP_SEL_TCPIP &&
5012 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5013 tc = dcbcfg->etscfg.prioritytable[app.priority];
5014 enabled_tc |= BIT(tc);
5015 break;
5016 }
5017 }
5018
5019 return enabled_tc;
5020}
5021
5022
5023
5024
5025
5026
5027
5028static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5029{
5030 int i, tc_unused = 0;
5031 u8 num_tc = 0;
5032 u8 ret = 0;
5033
5034
5035
5036
5037
5038 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5039 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5040
5041
5042
5043
5044 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5045 if (num_tc & BIT(i)) {
5046 if (!tc_unused) {
5047 ret++;
5048 } else {
5049 pr_err("Non-contiguous TC - Disabling DCB\n");
5050 return 1;
5051 }
5052 } else {
5053 tc_unused = 1;
5054 }
5055 }
5056
5057
5058 if (!ret)
5059 ret = 1;
5060
5061 return ret;
5062}
5063
5064
5065
5066
5067
5068
5069
5070
5071static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5072{
5073 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5074 u8 enabled_tc = 1;
5075 u8 i;
5076
5077 for (i = 0; i < num_tc; i++)
5078 enabled_tc |= BIT(i);
5079
5080 return enabled_tc;
5081}
5082
5083
5084
5085
5086
5087
5088
5089
5090static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5091{
5092 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5093 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5094 u8 enabled_tc = 1, i;
5095
5096 for (i = 1; i < num_tc; i++)
5097 enabled_tc |= BIT(i);
5098 return enabled_tc;
5099}
5100
5101
5102
5103
5104
5105
5106
5107static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5108{
5109 struct i40e_hw *hw = &pf->hw;
5110 u8 i, enabled_tc = 1;
5111 u8 num_tc = 0;
5112 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5113
5114 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5115 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5116
5117
5118 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5119 return 1;
5120
5121
5122 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5123 return i40e_dcb_get_num_tc(dcbcfg);
5124
5125
5126 if (pf->hw.func_caps.iscsi)
5127 enabled_tc = i40e_get_iscsi_tc_map(pf);
5128 else
5129 return 1;
5130
5131 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5132 if (enabled_tc & BIT(i))
5133 num_tc++;
5134 }
5135 return num_tc;
5136}
5137
5138
5139
5140
5141
5142
5143
5144static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5145{
5146 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5147 return i40e_mqprio_get_enabled_tc(pf);
5148
5149
5150
5151
5152 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5153 return I40E_DEFAULT_TRAFFIC_CLASS;
5154
5155
5156 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5157 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5158
5159
5160 if (pf->hw.func_caps.iscsi)
5161 return i40e_get_iscsi_tc_map(pf);
5162 else
5163 return I40E_DEFAULT_TRAFFIC_CLASS;
5164}
5165
5166
5167
5168
5169
5170
5171
5172static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5173{
5174 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5175 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5176 struct i40e_pf *pf = vsi->back;
5177 struct i40e_hw *hw = &pf->hw;
5178 i40e_status ret;
5179 u32 tc_bw_max;
5180 int i;
5181
5182
5183 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5184 if (ret) {
5185 dev_info(&pf->pdev->dev,
5186 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5187 i40e_stat_str(&pf->hw, ret),
5188 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5189 return -EINVAL;
5190 }
5191
5192
5193 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5194 NULL);
5195 if (ret) {
5196 dev_info(&pf->pdev->dev,
5197 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5198 i40e_stat_str(&pf->hw, ret),
5199 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5200 return -EINVAL;
5201 }
5202
5203 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5204 dev_info(&pf->pdev->dev,
5205 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5206 bw_config.tc_valid_bits,
5207 bw_ets_config.tc_valid_bits);
5208
5209 }
5210
5211 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5212 vsi->bw_max_quanta = bw_config.max_bw;
5213 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5214 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5215 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5216 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5217 vsi->bw_ets_limit_credits[i] =
5218 le16_to_cpu(bw_ets_config.credits[i]);
5219
5220 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5221 }
5222
5223 return 0;
5224}
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5235 u8 *bw_share)
5236{
5237 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5238 struct i40e_pf *pf = vsi->back;
5239 i40e_status ret;
5240 int i;
5241
5242
5243 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5244 return 0;
5245 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5246 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5247 if (ret)
5248 dev_info(&pf->pdev->dev,
5249 "Failed to reset tx rate for vsi->seid %u\n",
5250 vsi->seid);
5251 return ret;
5252 }
5253 bw_data.tc_valid_bits = enabled_tc;
5254 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5255 bw_data.tc_bw_credits[i] = bw_share[i];
5256
5257 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5258 if (ret) {
5259 dev_info(&pf->pdev->dev,
5260 "AQ command Config VSI BW allocation per TC failed = %d\n",
5261 pf->hw.aq.asq_last_status);
5262 return -EINVAL;
5263 }
5264
5265 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5266 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5267
5268 return 0;
5269}
5270
5271
5272
5273
5274
5275
5276
5277static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5278{
5279 struct net_device *netdev = vsi->netdev;
5280 struct i40e_pf *pf = vsi->back;
5281 struct i40e_hw *hw = &pf->hw;
5282 u8 netdev_tc = 0;
5283 int i;
5284 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5285
5286 if (!netdev)
5287 return;
5288
5289 if (!enabled_tc) {
5290 netdev_reset_tc(netdev);
5291 return;
5292 }
5293
5294
5295 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5296 return;
5297
5298
5299 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5300
5301
5302
5303
5304
5305
5306
5307 if (vsi->tc_config.enabled_tc & BIT(i))
5308 netdev_set_tc_queue(netdev,
5309 vsi->tc_config.tc_info[i].netdev_tc,
5310 vsi->tc_config.tc_info[i].qcount,
5311 vsi->tc_config.tc_info[i].qoffset);
5312 }
5313
5314 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5315 return;
5316
5317
5318 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5319
5320 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5321
5322 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5323 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5324 }
5325}
5326
5327
5328
5329
5330
5331
5332static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5333 struct i40e_vsi_context *ctxt)
5334{
5335
5336
5337
5338
5339 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5340 memcpy(&vsi->info.queue_mapping,
5341 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5342 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5343 sizeof(vsi->info.tc_mapping));
5344}
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5360{
5361 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5362 struct i40e_pf *pf = vsi->back;
5363 struct i40e_hw *hw = &pf->hw;
5364 struct i40e_vsi_context ctxt;
5365 int ret = 0;
5366 int i;
5367
5368
5369 if (vsi->tc_config.enabled_tc == enabled_tc &&
5370 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5371 return ret;
5372
5373
5374 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5375 if (enabled_tc & BIT(i))
5376 bw_share[i] = 1;
5377 }
5378
5379 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5380 if (ret) {
5381 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5382
5383 dev_info(&pf->pdev->dev,
5384 "Failed configuring TC map %d for VSI %d\n",
5385 enabled_tc, vsi->seid);
5386 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5387 &bw_config, NULL);
5388 if (ret) {
5389 dev_info(&pf->pdev->dev,
5390 "Failed querying vsi bw info, err %s aq_err %s\n",
5391 i40e_stat_str(hw, ret),
5392 i40e_aq_str(hw, hw->aq.asq_last_status));
5393 goto out;
5394 }
5395 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5396 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5397
5398 if (!valid_tc)
5399 valid_tc = bw_config.tc_valid_bits;
5400
5401 valid_tc |= 1;
5402 dev_info(&pf->pdev->dev,
5403 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5404 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5405 enabled_tc = valid_tc;
5406 }
5407
5408 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5409 if (ret) {
5410 dev_err(&pf->pdev->dev,
5411 "Unable to configure TC map %d for VSI %d\n",
5412 enabled_tc, vsi->seid);
5413 goto out;
5414 }
5415 }
5416
5417
5418 ctxt.seid = vsi->seid;
5419 ctxt.pf_num = vsi->back->hw.pf_id;
5420 ctxt.vf_num = 0;
5421 ctxt.uplink_seid = vsi->uplink_seid;
5422 ctxt.info = vsi->info;
5423 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5424 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5425 if (ret)
5426 goto out;
5427 } else {
5428 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5429 }
5430
5431
5432
5433
5434 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5435 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5436 vsi->num_queue_pairs);
5437 ret = i40e_vsi_config_rss(vsi);
5438 if (ret) {
5439 dev_info(&vsi->back->pdev->dev,
5440 "Failed to reconfig rss for num_queues\n");
5441 return ret;
5442 }
5443 vsi->reconfig_rss = false;
5444 }
5445 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5446 ctxt.info.valid_sections |=
5447 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5448 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5449 }
5450
5451
5452
5453
5454 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5455 if (ret) {
5456 dev_info(&pf->pdev->dev,
5457 "Update vsi tc config failed, err %s aq_err %s\n",
5458 i40e_stat_str(hw, ret),
5459 i40e_aq_str(hw, hw->aq.asq_last_status));
5460 goto out;
5461 }
5462
5463 i40e_vsi_update_queue_map(vsi, &ctxt);
5464 vsi->info.valid_sections = 0;
5465
5466
5467 ret = i40e_vsi_get_bw_info(vsi);
5468 if (ret) {
5469 dev_info(&pf->pdev->dev,
5470 "Failed updating vsi bw info, err %s aq_err %s\n",
5471 i40e_stat_str(hw, ret),
5472 i40e_aq_str(hw, hw->aq.asq_last_status));
5473 goto out;
5474 }
5475
5476
5477 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5478out:
5479 return ret;
5480}
5481
5482
5483
5484
5485
5486
5487static int i40e_get_link_speed(struct i40e_vsi *vsi)
5488{
5489 struct i40e_pf *pf = vsi->back;
5490
5491 switch (pf->hw.phy.link_info.link_speed) {
5492 case I40E_LINK_SPEED_40GB:
5493 return 40000;
5494 case I40E_LINK_SPEED_25GB:
5495 return 25000;
5496 case I40E_LINK_SPEED_20GB:
5497 return 20000;
5498 case I40E_LINK_SPEED_10GB:
5499 return 10000;
5500 case I40E_LINK_SPEED_1GB:
5501 return 1000;
5502 default:
5503 return -EINVAL;
5504 }
5505}
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5516{
5517 struct i40e_pf *pf = vsi->back;
5518 u64 credits = 0;
5519 int speed = 0;
5520 int ret = 0;
5521
5522 speed = i40e_get_link_speed(vsi);
5523 if (max_tx_rate > speed) {
5524 dev_err(&pf->pdev->dev,
5525 "Invalid max tx rate %llu specified for VSI seid %d.",
5526 max_tx_rate, seid);
5527 return -EINVAL;
5528 }
5529 if (max_tx_rate && max_tx_rate < 50) {
5530 dev_warn(&pf->pdev->dev,
5531 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5532 max_tx_rate = 50;
5533 }
5534
5535
5536 credits = max_tx_rate;
5537 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5538 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5539 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5540 if (ret)
5541 dev_err(&pf->pdev->dev,
5542 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5543 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5544 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5545 return ret;
5546}
5547
5548
5549
5550
5551
5552
5553
5554static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5555{
5556 enum i40e_admin_queue_err last_aq_status;
5557 struct i40e_cloud_filter *cfilter;
5558 struct i40e_channel *ch, *ch_tmp;
5559 struct i40e_pf *pf = vsi->back;
5560 struct hlist_node *node;
5561 int ret, i;
5562
5563
5564
5565
5566 vsi->current_rss_size = 0;
5567
5568
5569 if (list_empty(&vsi->ch_list))
5570 return;
5571
5572 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5573 struct i40e_vsi *p_vsi;
5574
5575 list_del(&ch->list);
5576 p_vsi = ch->parent_vsi;
5577 if (!p_vsi || !ch->initialized) {
5578 kfree(ch);
5579 continue;
5580 }
5581
5582 for (i = 0; i < ch->num_queue_pairs; i++) {
5583 struct i40e_ring *tx_ring, *rx_ring;
5584 u16 pf_q;
5585
5586 pf_q = ch->base_queue + i;
5587 tx_ring = vsi->tx_rings[pf_q];
5588 tx_ring->ch = NULL;
5589
5590 rx_ring = vsi->rx_rings[pf_q];
5591 rx_ring->ch = NULL;
5592 }
5593
5594
5595 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5596 if (ret)
5597 dev_info(&vsi->back->pdev->dev,
5598 "Failed to reset tx rate for ch->seid %u\n",
5599 ch->seid);
5600
5601
5602 hlist_for_each_entry_safe(cfilter, node,
5603 &pf->cloud_filter_list, cloud_node) {
5604 if (cfilter->seid != ch->seid)
5605 continue;
5606
5607 hash_del(&cfilter->cloud_node);
5608 if (cfilter->dst_port)
5609 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5610 cfilter,
5611 false);
5612 else
5613 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5614 false);
5615 last_aq_status = pf->hw.aq.asq_last_status;
5616 if (ret)
5617 dev_info(&pf->pdev->dev,
5618 "Failed to delete cloud filter, err %s aq_err %s\n",
5619 i40e_stat_str(&pf->hw, ret),
5620 i40e_aq_str(&pf->hw, last_aq_status));
5621 kfree(cfilter);
5622 }
5623
5624
5625 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5626 NULL);
5627 if (ret)
5628 dev_err(&vsi->back->pdev->dev,
5629 "unable to remove channel (%d) for parent VSI(%d)\n",
5630 ch->seid, p_vsi->seid);
5631 kfree(ch);
5632 }
5633 INIT_LIST_HEAD(&vsi->ch_list);
5634}
5635
5636
5637
5638
5639
5640
5641
5642static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5643{
5644 struct i40e_channel *ch, *ch_tmp;
5645
5646 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5647 if (ch->initialized)
5648 return true;
5649 }
5650
5651 return false;
5652}
5653
5654
5655
5656
5657
5658
5659
5660
5661static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5662{
5663 struct i40e_channel *ch, *ch_tmp;
5664 int max = 0;
5665
5666 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5667 if (!ch->initialized)
5668 continue;
5669 if (ch->num_queue_pairs > max)
5670 max = ch->num_queue_pairs;
5671 }
5672
5673 return max;
5674}
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5688 struct i40e_vsi *vsi, bool *reconfig_rss)
5689{
5690 int max_ch_queues;
5691
5692 if (!reconfig_rss)
5693 return -EINVAL;
5694
5695 *reconfig_rss = false;
5696 if (vsi->current_rss_size) {
5697 if (num_queues > vsi->current_rss_size) {
5698 dev_dbg(&pf->pdev->dev,
5699 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5700 num_queues, vsi->current_rss_size);
5701 return -EINVAL;
5702 } else if ((num_queues < vsi->current_rss_size) &&
5703 (!is_power_of_2(num_queues))) {
5704 dev_dbg(&pf->pdev->dev,
5705 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5706 num_queues, vsi->current_rss_size);
5707 return -EINVAL;
5708 }
5709 }
5710
5711 if (!is_power_of_2(num_queues)) {
5712
5713
5714
5715
5716
5717 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5718 if (num_queues < max_ch_queues) {
5719 dev_dbg(&pf->pdev->dev,
5720 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5721 num_queues, max_ch_queues);
5722 return -EINVAL;
5723 }
5724 *reconfig_rss = true;
5725 }
5726
5727 return 0;
5728}
5729
5730
5731
5732
5733
5734
5735
5736
5737static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5738{
5739 struct i40e_pf *pf = vsi->back;
5740 u8 seed[I40E_HKEY_ARRAY_SIZE];
5741 struct i40e_hw *hw = &pf->hw;
5742 int local_rss_size;
5743 u8 *lut;
5744 int ret;
5745
5746 if (!vsi->rss_size)
5747 return -EINVAL;
5748
5749 if (rss_size > vsi->rss_size)
5750 return -EINVAL;
5751
5752 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5753 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5754 if (!lut)
5755 return -ENOMEM;
5756
5757
5758 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5759
5760
5761
5762
5763 if (vsi->rss_hkey_user)
5764 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5765 else
5766 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5767
5768 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5769 if (ret) {
5770 dev_info(&pf->pdev->dev,
5771 "Cannot set RSS lut, err %s aq_err %s\n",
5772 i40e_stat_str(hw, ret),
5773 i40e_aq_str(hw, hw->aq.asq_last_status));
5774 kfree(lut);
5775 return ret;
5776 }
5777 kfree(lut);
5778
5779
5780 if (!vsi->orig_rss_size)
5781 vsi->orig_rss_size = vsi->rss_size;
5782 vsi->current_rss_size = local_rss_size;
5783
5784 return ret;
5785}
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5797 struct i40e_vsi_context *ctxt,
5798 struct i40e_channel *ch)
5799{
5800 u16 qcount, qmap, sections = 0;
5801 u8 offset = 0;
5802 int pow;
5803
5804 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5805 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5806
5807 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5808 ch->num_queue_pairs = qcount;
5809
5810
5811 pow = ilog2(qcount);
5812 if (!is_power_of_2(qcount))
5813 pow++;
5814
5815 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5816 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5817
5818
5819 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5820
5821 ctxt->info.up_enable_bits = 0x1;
5822 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5823 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5824 ctxt->info.valid_sections |= cpu_to_le16(sections);
5825}
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5836 struct i40e_channel *ch)
5837{
5838 struct i40e_hw *hw = &pf->hw;
5839 struct i40e_vsi_context ctxt;
5840 u8 enabled_tc = 0x1;
5841 int ret;
5842
5843 if (ch->type != I40E_VSI_VMDQ2) {
5844 dev_info(&pf->pdev->dev,
5845 "add new vsi failed, ch->type %d\n", ch->type);
5846 return -EINVAL;
5847 }
5848
5849 memset(&ctxt, 0, sizeof(ctxt));
5850 ctxt.pf_num = hw->pf_id;
5851 ctxt.vf_num = 0;
5852 ctxt.uplink_seid = uplink_seid;
5853 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5854 if (ch->type == I40E_VSI_VMDQ2)
5855 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5856
5857 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5858 ctxt.info.valid_sections |=
5859 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5860 ctxt.info.switch_id =
5861 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5862 }
5863
5864
5865 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5866
5867
5868 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5869 if (ret) {
5870 dev_info(&pf->pdev->dev,
5871 "add new vsi failed, err %s aq_err %s\n",
5872 i40e_stat_str(&pf->hw, ret),
5873 i40e_aq_str(&pf->hw,
5874 pf->hw.aq.asq_last_status));
5875 return -ENOENT;
5876 }
5877
5878
5879
5880
5881 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5882 ch->seid = ctxt.seid;
5883 ch->vsi_number = ctxt.vsi_number;
5884 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5885
5886
5887
5888
5889
5890 ch->info.mapping_flags = ctxt.info.mapping_flags;
5891 memcpy(&ch->info.queue_mapping,
5892 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5893 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5894 sizeof(ctxt.info.tc_mapping));
5895
5896 return 0;
5897}
5898
5899static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5900 u8 *bw_share)
5901{
5902 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5903 i40e_status ret;
5904 int i;
5905
5906 bw_data.tc_valid_bits = ch->enabled_tc;
5907 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5908 bw_data.tc_bw_credits[i] = bw_share[i];
5909
5910 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5911 &bw_data, NULL);
5912 if (ret) {
5913 dev_info(&vsi->back->pdev->dev,
5914 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5915 vsi->back->hw.aq.asq_last_status, ch->seid);
5916 return -EINVAL;
5917 }
5918
5919 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5920 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5921
5922 return 0;
5923}
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5935 struct i40e_vsi *vsi,
5936 struct i40e_channel *ch)
5937{
5938 i40e_status ret;
5939 int i;
5940 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5941
5942
5943 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5944 if (ch->enabled_tc & BIT(i))
5945 bw_share[i] = 1;
5946 }
5947
5948
5949 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5950 if (ret) {
5951 dev_info(&vsi->back->pdev->dev,
5952 "Failed configuring TC map %d for channel (seid %u)\n",
5953 ch->enabled_tc, ch->seid);
5954 return ret;
5955 }
5956
5957 for (i = 0; i < ch->num_queue_pairs; i++) {
5958 struct i40e_ring *tx_ring, *rx_ring;
5959 u16 pf_q;
5960
5961 pf_q = ch->base_queue + i;
5962
5963
5964
5965
5966 tx_ring = vsi->tx_rings[pf_q];
5967 tx_ring->ch = ch;
5968
5969
5970 rx_ring = vsi->rx_rings[pf_q];
5971 rx_ring->ch = ch;
5972 }
5973
5974 return 0;
5975}
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5989 struct i40e_vsi *vsi,
5990 struct i40e_channel *ch,
5991 u16 uplink_seid, u8 type)
5992{
5993 int ret;
5994
5995 ch->initialized = false;
5996 ch->base_queue = vsi->next_base_queue;
5997 ch->type = type;
5998
5999
6000 ret = i40e_add_channel(pf, uplink_seid, ch);
6001 if (ret) {
6002 dev_info(&pf->pdev->dev,
6003 "failed to add_channel using uplink_seid %u\n",
6004 uplink_seid);
6005 return ret;
6006 }
6007
6008
6009 ch->initialized = true;
6010
6011
6012 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6013 if (ret) {
6014 dev_info(&pf->pdev->dev,
6015 "failed to configure TX rings for channel %u\n",
6016 ch->seid);
6017 return ret;
6018 }
6019
6020
6021 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6022 dev_dbg(&pf->pdev->dev,
6023 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6024 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6025 ch->num_queue_pairs,
6026 vsi->next_base_queue);
6027 return ret;
6028}
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6041 struct i40e_channel *ch)
6042{
6043 u8 vsi_type;
6044 u16 seid;
6045 int ret;
6046
6047 if (vsi->type == I40E_VSI_MAIN) {
6048 vsi_type = I40E_VSI_VMDQ2;
6049 } else {
6050 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6051 vsi->type);
6052 return false;
6053 }
6054
6055
6056 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6057
6058
6059 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6060 if (ret) {
6061 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6062 return false;
6063 }
6064
6065 return ch->initialized ? true : false;
6066}
6067
6068
6069
6070
6071
6072
6073
6074
6075static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6076{
6077 u8 mode;
6078 struct i40e_pf *pf = vsi->back;
6079 struct i40e_hw *hw = &pf->hw;
6080 int ret;
6081
6082 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6083 if (ret)
6084 return -EINVAL;
6085
6086 if (hw->dev_caps.switch_mode) {
6087
6088
6089
6090 u32 switch_mode = hw->dev_caps.switch_mode &
6091 I40E_SWITCH_MODE_MASK;
6092 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6093 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6094 return 0;
6095 dev_err(&pf->pdev->dev,
6096 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6097 hw->dev_caps.switch_mode);
6098 return -EINVAL;
6099 }
6100 }
6101
6102
6103 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6104
6105
6106 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6107
6108
6109 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6110
6111
6112 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6113 pf->last_sw_conf_valid_flags,
6114 mode, NULL);
6115 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6116 dev_err(&pf->pdev->dev,
6117 "couldn't set switch config bits, err %s aq_err %s\n",
6118 i40e_stat_str(hw, ret),
6119 i40e_aq_str(hw,
6120 hw->aq.asq_last_status));
6121
6122 return ret;
6123}
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133int i40e_create_queue_channel(struct i40e_vsi *vsi,
6134 struct i40e_channel *ch)
6135{
6136 struct i40e_pf *pf = vsi->back;
6137 bool reconfig_rss;
6138 int err;
6139
6140 if (!ch)
6141 return -EINVAL;
6142
6143 if (!ch->num_queue_pairs) {
6144 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6145 ch->num_queue_pairs);
6146 return -EINVAL;
6147 }
6148
6149
6150 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6151 &reconfig_rss);
6152 if (err) {
6153 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6154 ch->num_queue_pairs);
6155 return -EINVAL;
6156 }
6157
6158
6159
6160
6161 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6162 (!i40e_is_any_channel(vsi))) {
6163 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6164 dev_dbg(&pf->pdev->dev,
6165 "Failed to create channel. Override queues (%u) not power of 2\n",
6166 vsi->tc_config.tc_info[0].qcount);
6167 return -EINVAL;
6168 }
6169
6170 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6171 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6172
6173 if (vsi->type == I40E_VSI_MAIN) {
6174 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6175 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6176 true);
6177 else
6178 i40e_do_reset_safe(pf,
6179 I40E_PF_RESET_FLAG);
6180 }
6181 }
6182
6183
6184
6185 }
6186
6187
6188
6189
6190 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6191 dev_dbg(&pf->pdev->dev,
6192 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6193 vsi->cnt_q_avail, ch->num_queue_pairs);
6194 return -EINVAL;
6195 }
6196
6197
6198 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6199 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6200 if (err) {
6201 dev_info(&pf->pdev->dev,
6202 "Error: unable to reconfig rss for num_queues (%u)\n",
6203 ch->num_queue_pairs);
6204 return -EINVAL;
6205 }
6206 }
6207
6208 if (!i40e_setup_channel(pf, vsi, ch)) {
6209 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6210 return -EINVAL;
6211 }
6212
6213 dev_info(&pf->pdev->dev,
6214 "Setup channel (id:%u) utilizing num_queues %d\n",
6215 ch->seid, ch->num_queue_pairs);
6216
6217
6218 if (ch->max_tx_rate) {
6219 u64 credits = ch->max_tx_rate;
6220
6221 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6222 return -EINVAL;
6223
6224 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6225 dev_dbg(&pf->pdev->dev,
6226 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6227 ch->max_tx_rate,
6228 credits,
6229 ch->seid);
6230 }
6231
6232
6233 ch->parent_vsi = vsi;
6234
6235
6236 vsi->cnt_q_avail -= ch->num_queue_pairs;
6237
6238 return 0;
6239}
6240
6241
6242
6243
6244
6245
6246
6247static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6248{
6249 struct i40e_channel *ch;
6250 u64 max_rate = 0;
6251 int ret = 0, i;
6252
6253
6254 vsi->tc_seid_map[0] = vsi->seid;
6255 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6256 if (vsi->tc_config.enabled_tc & BIT(i)) {
6257 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6258 if (!ch) {
6259 ret = -ENOMEM;
6260 goto err_free;
6261 }
6262
6263 INIT_LIST_HEAD(&ch->list);
6264 ch->num_queue_pairs =
6265 vsi->tc_config.tc_info[i].qcount;
6266 ch->base_queue =
6267 vsi->tc_config.tc_info[i].qoffset;
6268
6269
6270
6271
6272 max_rate = vsi->mqprio_qopt.max_rate[i];
6273 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6274 ch->max_tx_rate = max_rate;
6275
6276 list_add_tail(&ch->list, &vsi->ch_list);
6277
6278 ret = i40e_create_queue_channel(vsi, ch);
6279 if (ret) {
6280 dev_err(&vsi->back->pdev->dev,
6281 "Failed creating queue channel with TC%d: queues %d\n",
6282 i, ch->num_queue_pairs);
6283 goto err_free;
6284 }
6285 vsi->tc_seid_map[i] = ch->seid;
6286 }
6287 }
6288 return ret;
6289
6290err_free:
6291 i40e_remove_queue_channels(vsi);
6292 return ret;
6293}
6294
6295
6296
6297
6298
6299
6300
6301
6302int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6303{
6304 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6305 struct i40e_pf *pf = veb->pf;
6306 int ret = 0;
6307 int i;
6308
6309
6310 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6311 return ret;
6312
6313 bw_data.tc_valid_bits = enabled_tc;
6314
6315
6316
6317 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6318 if (enabled_tc & BIT(i))
6319 bw_data.tc_bw_share_credits[i] = 1;
6320 }
6321
6322 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6323 &bw_data, NULL);
6324 if (ret) {
6325 dev_info(&pf->pdev->dev,
6326 "VEB bw config failed, err %s aq_err %s\n",
6327 i40e_stat_str(&pf->hw, ret),
6328 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6329 goto out;
6330 }
6331
6332
6333 ret = i40e_veb_get_bw_info(veb);
6334 if (ret) {
6335 dev_info(&pf->pdev->dev,
6336 "Failed getting veb bw config, err %s aq_err %s\n",
6337 i40e_stat_str(&pf->hw, ret),
6338 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6339 }
6340
6341out:
6342 return ret;
6343}
6344
6345#ifdef CONFIG_I40E_DCB
6346
6347
6348
6349
6350
6351
6352
6353
6354static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6355{
6356 u8 tc_map = 0;
6357 int ret;
6358 u8 v;
6359
6360
6361 tc_map = i40e_pf_get_tc_map(pf);
6362 for (v = 0; v < I40E_MAX_VEB; v++) {
6363 if (!pf->veb[v])
6364 continue;
6365 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6366 if (ret) {
6367 dev_info(&pf->pdev->dev,
6368 "Failed configuring TC for VEB seid=%d\n",
6369 pf->veb[v]->seid);
6370
6371 }
6372 }
6373
6374
6375 for (v = 0; v < pf->num_alloc_vsi; v++) {
6376 if (!pf->vsi[v])
6377 continue;
6378
6379
6380
6381
6382 if (v == pf->lan_vsi)
6383 tc_map = i40e_pf_get_tc_map(pf);
6384 else
6385 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6386
6387 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6388 if (ret) {
6389 dev_info(&pf->pdev->dev,
6390 "Failed configuring TC for VSI seid=%d\n",
6391 pf->vsi[v]->seid);
6392
6393 } else {
6394
6395 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6396 if (pf->vsi[v]->netdev)
6397 i40e_dcbnl_set_all(pf->vsi[v]);
6398 }
6399 }
6400}
6401
6402
6403
6404
6405
6406
6407
6408
6409static int i40e_resume_port_tx(struct i40e_pf *pf)
6410{
6411 struct i40e_hw *hw = &pf->hw;
6412 int ret;
6413
6414 ret = i40e_aq_resume_port_tx(hw, NULL);
6415 if (ret) {
6416 dev_info(&pf->pdev->dev,
6417 "Resume Port Tx failed, err %s aq_err %s\n",
6418 i40e_stat_str(&pf->hw, ret),
6419 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6420
6421 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6422 i40e_service_event_schedule(pf);
6423 }
6424
6425 return ret;
6426}
6427
6428
6429
6430
6431
6432
6433
6434
6435static int i40e_init_pf_dcb(struct i40e_pf *pf)
6436{
6437 struct i40e_hw *hw = &pf->hw;
6438 int err = 0;
6439
6440
6441
6442
6443 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6444 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
6445 dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
6446 err = I40E_NOT_SUPPORTED;
6447 goto out;
6448 }
6449
6450 err = i40e_init_dcb(hw, true);
6451 if (!err) {
6452
6453 if ((!hw->func_caps.dcb) ||
6454 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6455 dev_info(&pf->pdev->dev,
6456 "DCBX offload is not supported or is disabled for this PF.\n");
6457 } else {
6458
6459 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6460 DCB_CAP_DCBX_VER_IEEE;
6461
6462 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6463
6464
6465
6466 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6467 pf->flags |= I40E_FLAG_DCB_ENABLED;
6468 else
6469 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6470 dev_dbg(&pf->pdev->dev,
6471 "DCBX offload is supported for this PF.\n");
6472 }
6473 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6474 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6475 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6476 } else {
6477 dev_info(&pf->pdev->dev,
6478 "Query for DCB configuration failed, err %s aq_err %s\n",
6479 i40e_stat_str(&pf->hw, err),
6480 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6481 }
6482
6483out:
6484 return err;
6485}
6486#endif
6487#define SPEED_SIZE 14
6488#define FC_SIZE 8
6489
6490
6491
6492
6493
6494void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6495{
6496 enum i40e_aq_link_speed new_speed;
6497 struct i40e_pf *pf = vsi->back;
6498 char *speed = "Unknown";
6499 char *fc = "Unknown";
6500 char *fec = "";
6501 char *req_fec = "";
6502 char *an = "";
6503
6504 if (isup)
6505 new_speed = pf->hw.phy.link_info.link_speed;
6506 else
6507 new_speed = I40E_LINK_SPEED_UNKNOWN;
6508
6509 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6510 return;
6511 vsi->current_isup = isup;
6512 vsi->current_speed = new_speed;
6513 if (!isup) {
6514 netdev_info(vsi->netdev, "NIC Link is Down\n");
6515 return;
6516 }
6517
6518
6519
6520
6521 if (pf->hw.func_caps.npar_enable &&
6522 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6523 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6524 netdev_warn(vsi->netdev,
6525 "The partition detected link speed that is less than 10Gbps\n");
6526
6527 switch (pf->hw.phy.link_info.link_speed) {
6528 case I40E_LINK_SPEED_40GB:
6529 speed = "40 G";
6530 break;
6531 case I40E_LINK_SPEED_20GB:
6532 speed = "20 G";
6533 break;
6534 case I40E_LINK_SPEED_25GB:
6535 speed = "25 G";
6536 break;
6537 case I40E_LINK_SPEED_10GB:
6538 speed = "10 G";
6539 break;
6540 case I40E_LINK_SPEED_5GB:
6541 speed = "5 G";
6542 break;
6543 case I40E_LINK_SPEED_2_5GB:
6544 speed = "2.5 G";
6545 break;
6546 case I40E_LINK_SPEED_1GB:
6547 speed = "1000 M";
6548 break;
6549 case I40E_LINK_SPEED_100MB:
6550 speed = "100 M";
6551 break;
6552 default:
6553 break;
6554 }
6555
6556 switch (pf->hw.fc.current_mode) {
6557 case I40E_FC_FULL:
6558 fc = "RX/TX";
6559 break;
6560 case I40E_FC_TX_PAUSE:
6561 fc = "TX";
6562 break;
6563 case I40E_FC_RX_PAUSE:
6564 fc = "RX";
6565 break;
6566 default:
6567 fc = "None";
6568 break;
6569 }
6570
6571 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6572 req_fec = "None";
6573 fec = "None";
6574 an = "False";
6575
6576 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6577 an = "True";
6578
6579 if (pf->hw.phy.link_info.fec_info &
6580 I40E_AQ_CONFIG_FEC_KR_ENA)
6581 fec = "CL74 FC-FEC/BASE-R";
6582 else if (pf->hw.phy.link_info.fec_info &
6583 I40E_AQ_CONFIG_FEC_RS_ENA)
6584 fec = "CL108 RS-FEC";
6585
6586
6587
6588
6589 if (vsi->back->hw.phy.link_info.req_fec_info &
6590 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6591 if (vsi->back->hw.phy.link_info.req_fec_info &
6592 I40E_AQ_REQUEST_FEC_RS)
6593 req_fec = "CL108 RS-FEC";
6594 else
6595 req_fec = "CL74 FC-FEC/BASE-R";
6596 }
6597 netdev_info(vsi->netdev,
6598 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6599 speed, req_fec, fec, an, fc);
6600 } else {
6601 netdev_info(vsi->netdev,
6602 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
6603 speed, fc);
6604 }
6605
6606}
6607
6608
6609
6610
6611
6612static int i40e_up_complete(struct i40e_vsi *vsi)
6613{
6614 struct i40e_pf *pf = vsi->back;
6615 int err;
6616
6617 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6618 i40e_vsi_configure_msix(vsi);
6619 else
6620 i40e_configure_msi_and_legacy(vsi);
6621
6622
6623 err = i40e_vsi_start_rings(vsi);
6624 if (err)
6625 return err;
6626
6627 clear_bit(__I40E_VSI_DOWN, vsi->state);
6628 i40e_napi_enable_all(vsi);
6629 i40e_vsi_enable_irq(vsi);
6630
6631 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6632 (vsi->netdev)) {
6633 i40e_print_link_message(vsi, true);
6634 netif_tx_start_all_queues(vsi->netdev);
6635 netif_carrier_on(vsi->netdev);
6636 }
6637
6638
6639 if (vsi->type == I40E_VSI_FDIR) {
6640
6641 pf->fd_add_err = 0;
6642 pf->fd_atr_cnt = 0;
6643 i40e_fdir_filter_restore(vsi);
6644 }
6645
6646
6647
6648
6649 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6650 i40e_service_event_schedule(pf);
6651
6652 return 0;
6653}
6654
6655
6656
6657
6658
6659
6660
6661
6662static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6663{
6664 struct i40e_pf *pf = vsi->back;
6665
6666 WARN_ON(in_interrupt());
6667 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6668 usleep_range(1000, 2000);
6669 i40e_down(vsi);
6670
6671 i40e_up(vsi);
6672 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6673}
6674
6675
6676
6677
6678
6679int i40e_up(struct i40e_vsi *vsi)
6680{
6681 int err;
6682
6683 err = i40e_vsi_configure(vsi);
6684 if (!err)
6685 err = i40e_up_complete(vsi);
6686
6687 return err;
6688}
6689
6690
6691
6692
6693
6694
6695static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6696{
6697 struct i40e_aq_get_phy_abilities_resp abilities;
6698 struct i40e_aq_set_phy_config config = {0};
6699 struct i40e_hw *hw = &pf->hw;
6700 i40e_status err;
6701 u64 mask;
6702 u8 speed;
6703
6704
6705
6706
6707
6708
6709
6710 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6711 NULL);
6712 if (err) {
6713 dev_err(&pf->pdev->dev,
6714 "failed to get phy cap., ret = %s last_status = %s\n",
6715 i40e_stat_str(hw, err),
6716 i40e_aq_str(hw, hw->aq.asq_last_status));
6717 return err;
6718 }
6719 speed = abilities.link_speed;
6720
6721
6722 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6723 NULL);
6724 if (err) {
6725 dev_err(&pf->pdev->dev,
6726 "failed to get phy cap., ret = %s last_status = %s\n",
6727 i40e_stat_str(hw, err),
6728 i40e_aq_str(hw, hw->aq.asq_last_status));
6729 return err;
6730 }
6731
6732
6733
6734
6735 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6736 return I40E_SUCCESS;
6737
6738
6739
6740
6741
6742 mask = I40E_PHY_TYPES_BITMASK;
6743 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6744 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6745
6746 config.abilities = abilities.abilities;
6747 if (abilities.link_speed != 0)
6748 config.link_speed = abilities.link_speed;
6749 else
6750 config.link_speed = speed;
6751 config.eee_capability = abilities.eee_capability;
6752 config.eeer = abilities.eeer_val;
6753 config.low_power_ctrl = abilities.d3_lpan;
6754 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6755 I40E_AQ_PHY_FEC_CONFIG_MASK;
6756 err = i40e_aq_set_phy_config(hw, &config, NULL);
6757
6758 if (err) {
6759 dev_err(&pf->pdev->dev,
6760 "set phy config ret = %s last_status = %s\n",
6761 i40e_stat_str(&pf->hw, err),
6762 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6763 return err;
6764 }
6765
6766
6767 err = i40e_update_link_info(hw);
6768 if (err) {
6769
6770
6771
6772
6773 msleep(1000);
6774 i40e_update_link_info(hw);
6775 }
6776
6777 i40e_aq_set_link_restart_an(hw, true, NULL);
6778
6779 return I40E_SUCCESS;
6780}
6781
6782
6783
6784
6785
6786void i40e_down(struct i40e_vsi *vsi)
6787{
6788 int i;
6789
6790
6791
6792
6793 if (vsi->netdev) {
6794 netif_carrier_off(vsi->netdev);
6795 netif_tx_disable(vsi->netdev);
6796 }
6797 i40e_vsi_disable_irq(vsi);
6798 i40e_vsi_stop_rings(vsi);
6799 if (vsi->type == I40E_VSI_MAIN &&
6800 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6801 i40e_force_link_state(vsi->back, false);
6802 i40e_napi_disable_all(vsi);
6803
6804 for (i = 0; i < vsi->num_queue_pairs; i++) {
6805 i40e_clean_tx_ring(vsi->tx_rings[i]);
6806 if (i40e_enabled_xdp_vsi(vsi)) {
6807
6808
6809
6810 synchronize_rcu();
6811 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6812 }
6813 i40e_clean_rx_ring(vsi->rx_rings[i]);
6814 }
6815
6816}
6817
6818
6819
6820
6821
6822
6823static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6824 struct tc_mqprio_qopt_offload *mqprio_qopt)
6825{
6826 u64 sum_max_rate = 0;
6827 u64 max_rate = 0;
6828 int i;
6829
6830 if (mqprio_qopt->qopt.offset[0] != 0 ||
6831 mqprio_qopt->qopt.num_tc < 1 ||
6832 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6833 return -EINVAL;
6834 for (i = 0; ; i++) {
6835 if (!mqprio_qopt->qopt.count[i])
6836 return -EINVAL;
6837 if (mqprio_qopt->min_rate[i]) {
6838 dev_err(&vsi->back->pdev->dev,
6839 "Invalid min tx rate (greater than 0) specified\n");
6840 return -EINVAL;
6841 }
6842 max_rate = mqprio_qopt->max_rate[i];
6843 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6844 sum_max_rate += max_rate;
6845
6846 if (i >= mqprio_qopt->qopt.num_tc - 1)
6847 break;
6848 if (mqprio_qopt->qopt.offset[i + 1] !=
6849 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6850 return -EINVAL;
6851 }
6852 if (vsi->num_queue_pairs <
6853 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6854 return -EINVAL;
6855 }
6856 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6857 dev_err(&vsi->back->pdev->dev,
6858 "Invalid max tx rate specified\n");
6859 return -EINVAL;
6860 }
6861 return 0;
6862}
6863
6864
6865
6866
6867
6868static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6869{
6870 u16 qcount;
6871 int i;
6872
6873
6874 vsi->tc_config.numtc = 1;
6875 vsi->tc_config.enabled_tc = 1;
6876 qcount = min_t(int, vsi->alloc_queue_pairs,
6877 i40e_pf_get_max_q_per_tc(vsi->back));
6878 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6879
6880
6881
6882 vsi->tc_config.tc_info[i].qoffset = 0;
6883 if (i == 0)
6884 vsi->tc_config.tc_info[i].qcount = qcount;
6885 else
6886 vsi->tc_config.tc_info[i].qcount = 1;
6887 vsi->tc_config.tc_info[i].netdev_tc = 0;
6888 }
6889}
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
6902 const u8 *macaddr, int *aq_err)
6903{
6904 struct i40e_aqc_remove_macvlan_element_data element;
6905 i40e_status status;
6906
6907 memset(&element, 0, sizeof(element));
6908 ether_addr_copy(element.mac_addr, macaddr);
6909 element.vlan_tag = 0;
6910 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6911 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
6912 *aq_err = hw->aq.asq_last_status;
6913
6914 return status;
6915}
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
6928 const u8 *macaddr, int *aq_err)
6929{
6930 struct i40e_aqc_add_macvlan_element_data element;
6931 i40e_status status;
6932 u16 cmd_flags = 0;
6933
6934 ether_addr_copy(element.mac_addr, macaddr);
6935 element.vlan_tag = 0;
6936 element.queue_number = 0;
6937 element.match_method = I40E_AQC_MM_ERR_NO_RES;
6938 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6939 element.flags = cpu_to_le16(cmd_flags);
6940 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
6941 *aq_err = hw->aq.asq_last_status;
6942
6943 return status;
6944}
6945
6946
6947
6948
6949
6950
6951static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
6952{
6953 struct i40e_ring *tx_ring, *rx_ring;
6954 u16 pf_q;
6955 int i;
6956
6957 for (i = 0; i < ch->num_queue_pairs; i++) {
6958 pf_q = ch->base_queue + i;
6959 tx_ring = vsi->tx_rings[pf_q];
6960 tx_ring->ch = NULL;
6961 rx_ring = vsi->rx_rings[pf_q];
6962 rx_ring->ch = NULL;
6963 }
6964}
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
6975{
6976 struct i40e_channel *ch, *ch_tmp;
6977 int ret;
6978
6979 if (list_empty(&vsi->macvlan_list))
6980 return;
6981
6982 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
6983 struct i40e_vsi *parent_vsi;
6984
6985 if (i40e_is_channel_macvlan(ch)) {
6986 i40e_reset_ch_rings(vsi, ch);
6987 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
6988 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
6989 netdev_set_sb_channel(ch->fwd->netdev, 0);
6990 kfree(ch->fwd);
6991 ch->fwd = NULL;
6992 }
6993
6994 list_del(&ch->list);
6995 parent_vsi = ch->parent_vsi;
6996 if (!parent_vsi || !ch->initialized) {
6997 kfree(ch);
6998 continue;
6999 }
7000
7001
7002 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7003 NULL);
7004 if (ret)
7005 dev_err(&vsi->back->pdev->dev,
7006 "unable to remove channel (%d) for parent VSI(%d)\n",
7007 ch->seid, parent_vsi->seid);
7008 kfree(ch);
7009 }
7010 vsi->macvlan_cnt = 0;
7011}
7012
7013
7014
7015
7016
7017
7018
7019static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7020 struct i40e_fwd_adapter *fwd)
7021{
7022 int ret = 0, num_tc = 1, i, aq_err;
7023 struct i40e_channel *ch, *ch_tmp;
7024 struct i40e_pf *pf = vsi->back;
7025 struct i40e_hw *hw = &pf->hw;
7026
7027 if (list_empty(&vsi->macvlan_list))
7028 return -EINVAL;
7029
7030
7031 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7032 if (!i40e_is_channel_macvlan(ch)) {
7033 ch->fwd = fwd;
7034
7035 for (i = 0; i < num_tc; i++)
7036 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7037 i,
7038 ch->num_queue_pairs,
7039 ch->base_queue);
7040 for (i = 0; i < ch->num_queue_pairs; i++) {
7041 struct i40e_ring *tx_ring, *rx_ring;
7042 u16 pf_q;
7043
7044 pf_q = ch->base_queue + i;
7045
7046
7047 tx_ring = vsi->tx_rings[pf_q];
7048 tx_ring->ch = ch;
7049
7050
7051 rx_ring = vsi->rx_rings[pf_q];
7052 rx_ring->ch = ch;
7053 }
7054 break;
7055 }
7056 }
7057
7058
7059
7060
7061 wmb();
7062
7063
7064 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7065 if (ret) {
7066
7067 macvlan_release_l2fw_offload(vdev);
7068 for (i = 0; i < ch->num_queue_pairs; i++) {
7069 struct i40e_ring *rx_ring;
7070 u16 pf_q;
7071
7072 pf_q = ch->base_queue + i;
7073 rx_ring = vsi->rx_rings[pf_q];
7074 rx_ring->netdev = NULL;
7075 }
7076 dev_info(&pf->pdev->dev,
7077 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7078 i40e_stat_str(hw, ret),
7079 i40e_aq_str(hw, aq_err));
7080 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7081 }
7082
7083 return ret;
7084}
7085
7086
7087
7088
7089
7090
7091
7092
7093static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7094 struct net_device *vdev)
7095{
7096 struct i40e_pf *pf = vsi->back;
7097 struct i40e_hw *hw = &pf->hw;
7098 struct i40e_vsi_context ctxt;
7099 u16 sections, qmap, num_qps;
7100 struct i40e_channel *ch;
7101 int i, pow, ret = 0;
7102 u8 offset = 0;
7103
7104 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7105 return -EINVAL;
7106
7107 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7108
7109
7110 pow = fls(roundup_pow_of_two(num_qps) - 1);
7111
7112 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7113 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7114
7115
7116 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7117 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7118 memset(&ctxt, 0, sizeof(ctxt));
7119 ctxt.seid = vsi->seid;
7120 ctxt.pf_num = vsi->back->hw.pf_id;
7121 ctxt.vf_num = 0;
7122 ctxt.uplink_seid = vsi->uplink_seid;
7123 ctxt.info = vsi->info;
7124 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7125 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7126 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7127 ctxt.info.valid_sections |= cpu_to_le16(sections);
7128
7129
7130 vsi->rss_size = max_t(u16, num_qps, qcnt);
7131 ret = i40e_vsi_config_rss(vsi);
7132 if (ret) {
7133 dev_info(&pf->pdev->dev,
7134 "Failed to reconfig RSS for num_queues (%u)\n",
7135 vsi->rss_size);
7136 return ret;
7137 }
7138 vsi->reconfig_rss = true;
7139 dev_dbg(&vsi->back->pdev->dev,
7140 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7141 vsi->next_base_queue = num_qps;
7142 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7143
7144
7145
7146
7147 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7148 if (ret) {
7149 dev_info(&pf->pdev->dev,
7150 "Update vsi tc config failed, err %s aq_err %s\n",
7151 i40e_stat_str(hw, ret),
7152 i40e_aq_str(hw, hw->aq.asq_last_status));
7153 return ret;
7154 }
7155
7156 i40e_vsi_update_queue_map(vsi, &ctxt);
7157 vsi->info.valid_sections = 0;
7158
7159
7160 INIT_LIST_HEAD(&vsi->macvlan_list);
7161 for (i = 0; i < macvlan_cnt; i++) {
7162 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7163 if (!ch) {
7164 ret = -ENOMEM;
7165 goto err_free;
7166 }
7167 INIT_LIST_HEAD(&ch->list);
7168 ch->num_queue_pairs = qcnt;
7169 if (!i40e_setup_channel(pf, vsi, ch)) {
7170 ret = -EINVAL;
7171 goto err_free;
7172 }
7173 ch->parent_vsi = vsi;
7174 vsi->cnt_q_avail -= ch->num_queue_pairs;
7175 vsi->macvlan_cnt++;
7176 list_add_tail(&ch->list, &vsi->macvlan_list);
7177 }
7178
7179 return ret;
7180
7181err_free:
7182 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7183 i40e_free_macvlan_channels(vsi);
7184
7185 return ret;
7186}
7187
7188
7189
7190
7191
7192
7193static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7194{
7195 struct i40e_netdev_priv *np = netdev_priv(netdev);
7196 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7197 struct i40e_vsi *vsi = np->vsi;
7198 struct i40e_pf *pf = vsi->back;
7199 struct i40e_fwd_adapter *fwd;
7200 int avail_macvlan, ret;
7201
7202 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7203 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7204 return ERR_PTR(-EINVAL);
7205 }
7206 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7207 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7208 return ERR_PTR(-EINVAL);
7209 }
7210 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7211 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7212 return ERR_PTR(-EINVAL);
7213 }
7214
7215
7216
7217
7218 if (netif_is_multiqueue(vdev))
7219 return ERR_PTR(-ERANGE);
7220
7221 if (!vsi->macvlan_cnt) {
7222
7223 set_bit(0, vsi->fwd_bitmask);
7224
7225
7226
7227
7228
7229 vectors = pf->num_lan_msix;
7230 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7231
7232 q_per_macvlan = 4;
7233 macvlan_cnt = (vectors - 32) / 4;
7234 } else if (vectors <= 64 && vectors > 32) {
7235
7236 q_per_macvlan = 2;
7237 macvlan_cnt = (vectors - 16) / 2;
7238 } else if (vectors <= 32 && vectors > 16) {
7239
7240 q_per_macvlan = 1;
7241 macvlan_cnt = vectors - 16;
7242 } else if (vectors <= 16 && vectors > 8) {
7243
7244 q_per_macvlan = 1;
7245 macvlan_cnt = vectors - 8;
7246 } else {
7247
7248 q_per_macvlan = 1;
7249 macvlan_cnt = vectors - 1;
7250 }
7251
7252 if (macvlan_cnt == 0)
7253 return ERR_PTR(-EBUSY);
7254
7255
7256 i40e_quiesce_vsi(vsi);
7257
7258
7259 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7260 vdev);
7261 if (ret)
7262 return ERR_PTR(ret);
7263
7264
7265 i40e_unquiesce_vsi(vsi);
7266 }
7267 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7268 vsi->macvlan_cnt);
7269 if (avail_macvlan >= I40E_MAX_MACVLANS)
7270 return ERR_PTR(-EBUSY);
7271
7272
7273 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7274 if (!fwd)
7275 return ERR_PTR(-ENOMEM);
7276
7277 set_bit(avail_macvlan, vsi->fwd_bitmask);
7278 fwd->bit_no = avail_macvlan;
7279 netdev_set_sb_channel(vdev, avail_macvlan);
7280 fwd->netdev = vdev;
7281
7282 if (!netif_running(netdev))
7283 return fwd;
7284
7285
7286 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7287 if (ret) {
7288
7289 netdev_unbind_sb_channel(netdev, vdev);
7290 netdev_set_sb_channel(vdev, 0);
7291
7292 kfree(fwd);
7293 return ERR_PTR(-EINVAL);
7294 }
7295
7296 return fwd;
7297}
7298
7299
7300
7301
7302
7303static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7304{
7305 struct i40e_channel *ch, *ch_tmp;
7306 struct i40e_pf *pf = vsi->back;
7307 struct i40e_hw *hw = &pf->hw;
7308 int aq_err, ret = 0;
7309
7310 if (list_empty(&vsi->macvlan_list))
7311 return;
7312
7313 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7314 if (i40e_is_channel_macvlan(ch)) {
7315 ret = i40e_del_macvlan_filter(hw, ch->seid,
7316 i40e_channel_mac(ch),
7317 &aq_err);
7318 if (!ret) {
7319
7320 i40e_reset_ch_rings(vsi, ch);
7321 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7322 netdev_unbind_sb_channel(vsi->netdev,
7323 ch->fwd->netdev);
7324 netdev_set_sb_channel(ch->fwd->netdev, 0);
7325 kfree(ch->fwd);
7326 ch->fwd = NULL;
7327 }
7328 }
7329 }
7330}
7331
7332
7333
7334
7335
7336
7337static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7338{
7339 struct i40e_netdev_priv *np = netdev_priv(netdev);
7340 struct i40e_fwd_adapter *fwd = vdev;
7341 struct i40e_channel *ch, *ch_tmp;
7342 struct i40e_vsi *vsi = np->vsi;
7343 struct i40e_pf *pf = vsi->back;
7344 struct i40e_hw *hw = &pf->hw;
7345 int aq_err, ret = 0;
7346
7347
7348 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7349 if (i40e_is_channel_macvlan(ch) &&
7350 ether_addr_equal(i40e_channel_mac(ch),
7351 fwd->netdev->dev_addr)) {
7352 ret = i40e_del_macvlan_filter(hw, ch->seid,
7353 i40e_channel_mac(ch),
7354 &aq_err);
7355 if (!ret) {
7356
7357 i40e_reset_ch_rings(vsi, ch);
7358 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7359 netdev_unbind_sb_channel(netdev, fwd->netdev);
7360 netdev_set_sb_channel(fwd->netdev, 0);
7361 kfree(ch->fwd);
7362 ch->fwd = NULL;
7363 } else {
7364 dev_info(&pf->pdev->dev,
7365 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7366 i40e_stat_str(hw, ret),
7367 i40e_aq_str(hw, aq_err));
7368 }
7369 break;
7370 }
7371 }
7372}
7373
7374
7375
7376
7377
7378
7379static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7380{
7381 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7382 struct i40e_netdev_priv *np = netdev_priv(netdev);
7383 struct i40e_vsi *vsi = np->vsi;
7384 struct i40e_pf *pf = vsi->back;
7385 u8 enabled_tc = 0, num_tc, hw;
7386 bool need_reset = false;
7387 int old_queue_pairs;
7388 int ret = -EINVAL;
7389 u16 mode;
7390 int i;
7391
7392 old_queue_pairs = vsi->num_queue_pairs;
7393 num_tc = mqprio_qopt->qopt.num_tc;
7394 hw = mqprio_qopt->qopt.hw;
7395 mode = mqprio_qopt->mode;
7396 if (!hw) {
7397 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7398 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7399 goto config_tc;
7400 }
7401
7402
7403 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7404 netdev_info(netdev,
7405 "Configuring TC not supported in MFP mode\n");
7406 return ret;
7407 }
7408 switch (mode) {
7409 case TC_MQPRIO_MODE_DCB:
7410 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7411
7412
7413 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7414 netdev_info(netdev,
7415 "DCB is not enabled for adapter\n");
7416 return ret;
7417 }
7418
7419
7420 if (num_tc > i40e_pf_get_num_tc(pf)) {
7421 netdev_info(netdev,
7422 "TC count greater than enabled on link for adapter\n");
7423 return ret;
7424 }
7425 break;
7426 case TC_MQPRIO_MODE_CHANNEL:
7427 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7428 netdev_info(netdev,
7429 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7430 return ret;
7431 }
7432 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7433 return ret;
7434 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7435 if (ret)
7436 return ret;
7437 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7438 sizeof(*mqprio_qopt));
7439 pf->flags |= I40E_FLAG_TC_MQPRIO;
7440 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7441 break;
7442 default:
7443 return -EINVAL;
7444 }
7445
7446config_tc:
7447
7448 for (i = 0; i < num_tc; i++)
7449 enabled_tc |= BIT(i);
7450
7451
7452 if (enabled_tc == vsi->tc_config.enabled_tc &&
7453 mode != TC_MQPRIO_MODE_CHANNEL)
7454 return 0;
7455
7456
7457 i40e_quiesce_vsi(vsi);
7458
7459 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7460 i40e_remove_queue_channels(vsi);
7461
7462
7463 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7464 if (ret) {
7465 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7466 vsi->seid);
7467 need_reset = true;
7468 goto exit;
7469 } else {
7470 dev_info(&vsi->back->pdev->dev,
7471 "Setup channel (id:%u) utilizing num_queues %d\n",
7472 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7473 }
7474
7475 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7476 if (vsi->mqprio_qopt.max_rate[0]) {
7477 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7478
7479 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7480 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7481 if (!ret) {
7482 u64 credits = max_tx_rate;
7483
7484 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7485 dev_dbg(&vsi->back->pdev->dev,
7486 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7487 max_tx_rate,
7488 credits,
7489 vsi->seid);
7490 } else {
7491 need_reset = true;
7492 goto exit;
7493 }
7494 }
7495 ret = i40e_configure_queue_channels(vsi);
7496 if (ret) {
7497 vsi->num_queue_pairs = old_queue_pairs;
7498 netdev_info(netdev,
7499 "Failed configuring queue channels\n");
7500 need_reset = true;
7501 goto exit;
7502 }
7503 }
7504
7505exit:
7506
7507 if (need_reset) {
7508 i40e_vsi_set_default_tc_config(vsi);
7509 need_reset = false;
7510 }
7511
7512
7513 i40e_unquiesce_vsi(vsi);
7514 return ret;
7515}
7516
7517
7518
7519
7520
7521
7522
7523
7524static inline void
7525i40e_set_cld_element(struct i40e_cloud_filter *filter,
7526 struct i40e_aqc_cloud_filters_element_data *cld)
7527{
7528 int i, j;
7529 u32 ipa;
7530
7531 memset(cld, 0, sizeof(*cld));
7532 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7533 ether_addr_copy(cld->inner_mac, filter->src_mac);
7534
7535 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7536 return;
7537
7538 if (filter->n_proto == ETH_P_IPV6) {
7539#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7540 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7541 i++, j += 2) {
7542 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7543 ipa = cpu_to_le32(ipa);
7544 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7545 }
7546 } else {
7547 ipa = be32_to_cpu(filter->dst_ipv4);
7548 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7549 }
7550
7551 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7552
7553
7554
7555
7556 if (filter->tenant_id)
7557 return;
7558}
7559
7560
7561
7562
7563
7564
7565
7566
7567
7568
7569int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7570 struct i40e_cloud_filter *filter, bool add)
7571{
7572 struct i40e_aqc_cloud_filters_element_data cld_filter;
7573 struct i40e_pf *pf = vsi->back;
7574 int ret;
7575 static const u16 flag_table[128] = {
7576 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7577 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7578 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7579 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7580 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7581 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7582 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7583 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7584 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7585 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7586 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7587 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7588 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7589 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7590 };
7591
7592 if (filter->flags >= ARRAY_SIZE(flag_table))
7593 return I40E_ERR_CONFIG;
7594
7595
7596 i40e_set_cld_element(filter, &cld_filter);
7597
7598 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7599 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7600 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7601
7602 if (filter->n_proto == ETH_P_IPV6)
7603 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7604 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7605 else
7606 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7607 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7608
7609 if (add)
7610 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7611 &cld_filter, 1);
7612 else
7613 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7614 &cld_filter, 1);
7615 if (ret)
7616 dev_dbg(&pf->pdev->dev,
7617 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7618 add ? "add" : "delete", filter->dst_port, ret,
7619 pf->hw.aq.asq_last_status);
7620 else
7621 dev_info(&pf->pdev->dev,
7622 "%s cloud filter for VSI: %d\n",
7623 add ? "Added" : "Deleted", filter->seid);
7624 return ret;
7625}
7626
7627
7628
7629
7630
7631
7632
7633
7634
7635
7636int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7637 struct i40e_cloud_filter *filter,
7638 bool add)
7639{
7640 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7641 struct i40e_pf *pf = vsi->back;
7642 int ret;
7643
7644
7645 if ((is_valid_ether_addr(filter->dst_mac) &&
7646 is_valid_ether_addr(filter->src_mac)) ||
7647 (is_multicast_ether_addr(filter->dst_mac) &&
7648 is_multicast_ether_addr(filter->src_mac)))
7649 return -EOPNOTSUPP;
7650
7651
7652
7653
7654 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7655 return -EOPNOTSUPP;
7656
7657
7658 if (filter->src_port || filter->src_ipv4 ||
7659 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7660 return -EOPNOTSUPP;
7661
7662
7663 i40e_set_cld_element(filter, &cld_filter.element);
7664
7665 if (is_valid_ether_addr(filter->dst_mac) ||
7666 is_valid_ether_addr(filter->src_mac) ||
7667 is_multicast_ether_addr(filter->dst_mac) ||
7668 is_multicast_ether_addr(filter->src_mac)) {
7669
7670 if (filter->dst_ipv4)
7671 return -EOPNOTSUPP;
7672
7673
7674
7675
7676
7677 cld_filter.element.flags =
7678 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7679
7680 if (filter->vlan_id) {
7681 cld_filter.element.flags =
7682 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7683 }
7684
7685 } else if (filter->dst_ipv4 ||
7686 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7687 cld_filter.element.flags =
7688 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7689 if (filter->n_proto == ETH_P_IPV6)
7690 cld_filter.element.flags |=
7691 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7692 else
7693 cld_filter.element.flags |=
7694 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7695 } else {
7696 dev_err(&pf->pdev->dev,
7697 "either mac or ip has to be valid for cloud filter\n");
7698 return -EINVAL;
7699 }
7700
7701
7702 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7703 be16_to_cpu(filter->dst_port);
7704
7705 if (add) {
7706
7707 ret = i40e_validate_and_set_switch_mode(vsi);
7708 if (ret) {
7709 dev_err(&pf->pdev->dev,
7710 "failed to set switch mode, ret %d\n",
7711 ret);
7712 return ret;
7713 }
7714
7715 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7716 &cld_filter, 1);
7717 } else {
7718 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7719 &cld_filter, 1);
7720 }
7721
7722 if (ret)
7723 dev_dbg(&pf->pdev->dev,
7724 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7725 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7726 else
7727 dev_info(&pf->pdev->dev,
7728 "%s cloud filter for VSI: %d, L4 port: %d\n",
7729 add ? "add" : "delete", filter->seid,
7730 ntohs(filter->dst_port));
7731 return ret;
7732}
7733
7734
7735
7736
7737
7738
7739
7740
7741static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7742 struct flow_cls_offload *f,
7743 struct i40e_cloud_filter *filter)
7744{
7745 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
7746 struct flow_dissector *dissector = rule->match.dissector;
7747 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7748 struct i40e_pf *pf = vsi->back;
7749 u8 field_flags = 0;
7750
7751 if (dissector->used_keys &
7752 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7753 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7754 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7755 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7756 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7757 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7758 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7759 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7760 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7761 dissector->used_keys);
7762 return -EOPNOTSUPP;
7763 }
7764
7765 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7766 struct flow_match_enc_keyid match;
7767
7768 flow_rule_match_enc_keyid(rule, &match);
7769 if (match.mask->keyid != 0)
7770 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7771
7772 filter->tenant_id = be32_to_cpu(match.key->keyid);
7773 }
7774
7775 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7776 struct flow_match_basic match;
7777
7778 flow_rule_match_basic(rule, &match);
7779 n_proto_key = ntohs(match.key->n_proto);
7780 n_proto_mask = ntohs(match.mask->n_proto);
7781
7782 if (n_proto_key == ETH_P_ALL) {
7783 n_proto_key = 0;
7784 n_proto_mask = 0;
7785 }
7786 filter->n_proto = n_proto_key & n_proto_mask;
7787 filter->ip_proto = match.key->ip_proto;
7788 }
7789
7790 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7791 struct flow_match_eth_addrs match;
7792
7793 flow_rule_match_eth_addrs(rule, &match);
7794
7795
7796 if (!is_zero_ether_addr(match.mask->dst)) {
7797 if (is_broadcast_ether_addr(match.mask->dst)) {
7798 field_flags |= I40E_CLOUD_FIELD_OMAC;
7799 } else {
7800 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7801 match.mask->dst);
7802 return I40E_ERR_CONFIG;
7803 }
7804 }
7805
7806 if (!is_zero_ether_addr(match.mask->src)) {
7807 if (is_broadcast_ether_addr(match.mask->src)) {
7808 field_flags |= I40E_CLOUD_FIELD_IMAC;
7809 } else {
7810 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7811 match.mask->src);
7812 return I40E_ERR_CONFIG;
7813 }
7814 }
7815 ether_addr_copy(filter->dst_mac, match.key->dst);
7816 ether_addr_copy(filter->src_mac, match.key->src);
7817 }
7818
7819 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7820 struct flow_match_vlan match;
7821
7822 flow_rule_match_vlan(rule, &match);
7823 if (match.mask->vlan_id) {
7824 if (match.mask->vlan_id == VLAN_VID_MASK) {
7825 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7826
7827 } else {
7828 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7829 match.mask->vlan_id);
7830 return I40E_ERR_CONFIG;
7831 }
7832 }
7833
7834 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7835 }
7836
7837 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7838 struct flow_match_control match;
7839
7840 flow_rule_match_control(rule, &match);
7841 addr_type = match.key->addr_type;
7842 }
7843
7844 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7845 struct flow_match_ipv4_addrs match;
7846
7847 flow_rule_match_ipv4_addrs(rule, &match);
7848 if (match.mask->dst) {
7849 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7850 field_flags |= I40E_CLOUD_FIELD_IIP;
7851 } else {
7852 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7853 &match.mask->dst);
7854 return I40E_ERR_CONFIG;
7855 }
7856 }
7857
7858 if (match.mask->src) {
7859 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7860 field_flags |= I40E_CLOUD_FIELD_IIP;
7861 } else {
7862 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7863 &match.mask->src);
7864 return I40E_ERR_CONFIG;
7865 }
7866 }
7867
7868 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7869 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7870 return I40E_ERR_CONFIG;
7871 }
7872 filter->dst_ipv4 = match.key->dst;
7873 filter->src_ipv4 = match.key->src;
7874 }
7875
7876 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7877 struct flow_match_ipv6_addrs match;
7878
7879 flow_rule_match_ipv6_addrs(rule, &match);
7880
7881
7882
7883
7884 if (ipv6_addr_loopback(&match.key->dst) ||
7885 ipv6_addr_loopback(&match.key->src)) {
7886 dev_err(&pf->pdev->dev,
7887 "Bad ipv6, addr is LOOPBACK\n");
7888 return I40E_ERR_CONFIG;
7889 }
7890 if (!ipv6_addr_any(&match.mask->dst) ||
7891 !ipv6_addr_any(&match.mask->src))
7892 field_flags |= I40E_CLOUD_FIELD_IIP;
7893
7894 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7895 sizeof(filter->src_ipv6));
7896 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7897 sizeof(filter->dst_ipv6));
7898 }
7899
7900 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7901 struct flow_match_ports match;
7902
7903 flow_rule_match_ports(rule, &match);
7904 if (match.mask->src) {
7905 if (match.mask->src == cpu_to_be16(0xffff)) {
7906 field_flags |= I40E_CLOUD_FIELD_IIP;
7907 } else {
7908 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7909 be16_to_cpu(match.mask->src));
7910 return I40E_ERR_CONFIG;
7911 }
7912 }
7913
7914 if (match.mask->dst) {
7915 if (match.mask->dst == cpu_to_be16(0xffff)) {
7916 field_flags |= I40E_CLOUD_FIELD_IIP;
7917 } else {
7918 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7919 be16_to_cpu(match.mask->dst));
7920 return I40E_ERR_CONFIG;
7921 }
7922 }
7923
7924 filter->dst_port = match.key->dst;
7925 filter->src_port = match.key->src;
7926
7927 switch (filter->ip_proto) {
7928 case IPPROTO_TCP:
7929 case IPPROTO_UDP:
7930 break;
7931 default:
7932 dev_err(&pf->pdev->dev,
7933 "Only UDP and TCP transport are supported\n");
7934 return -EINVAL;
7935 }
7936 }
7937 filter->flags = field_flags;
7938 return 0;
7939}
7940
7941
7942
7943
7944
7945
7946
7947
7948static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7949 struct i40e_cloud_filter *filter)
7950{
7951 struct i40e_channel *ch, *ch_tmp;
7952
7953
7954 if (tc == 0) {
7955 filter->seid = vsi->seid;
7956 return 0;
7957 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7958 if (!filter->dst_port) {
7959 dev_err(&vsi->back->pdev->dev,
7960 "Specify destination port to direct to traffic class that is not default\n");
7961 return -EINVAL;
7962 }
7963 if (list_empty(&vsi->ch_list))
7964 return -EINVAL;
7965 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7966 list) {
7967 if (ch->seid == vsi->tc_seid_map[tc])
7968 filter->seid = ch->seid;
7969 }
7970 return 0;
7971 }
7972 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7973 return -EINVAL;
7974}
7975
7976
7977
7978
7979
7980
7981
7982static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7983 struct flow_cls_offload *cls_flower)
7984{
7985 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7986 struct i40e_cloud_filter *filter = NULL;
7987 struct i40e_pf *pf = vsi->back;
7988 int err = 0;
7989
7990 if (tc < 0) {
7991 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7992 return -EOPNOTSUPP;
7993 }
7994
7995 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7996 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7997 return -EBUSY;
7998
7999 if (pf->fdir_pf_active_filters ||
8000 (!hlist_empty(&pf->fdir_filter_list))) {
8001 dev_err(&vsi->back->pdev->dev,
8002 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8003 return -EINVAL;
8004 }
8005
8006 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8007 dev_err(&vsi->back->pdev->dev,
8008 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8009 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8010 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8011 }
8012
8013 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8014 if (!filter)
8015 return -ENOMEM;
8016
8017 filter->cookie = cls_flower->cookie;
8018
8019 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8020 if (err < 0)
8021 goto err;
8022
8023 err = i40e_handle_tclass(vsi, tc, filter);
8024 if (err < 0)
8025 goto err;
8026
8027
8028 if (filter->dst_port)
8029 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8030 else
8031 err = i40e_add_del_cloud_filter(vsi, filter, true);
8032
8033 if (err) {
8034 dev_err(&pf->pdev->dev,
8035 "Failed to add cloud filter, err %s\n",
8036 i40e_stat_str(&pf->hw, err));
8037 goto err;
8038 }
8039
8040
8041 INIT_HLIST_NODE(&filter->cloud_node);
8042
8043 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8044
8045 pf->num_cloud_filters++;
8046
8047 return err;
8048err:
8049 kfree(filter);
8050 return err;
8051}
8052
8053
8054
8055
8056
8057
8058
8059static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8060 unsigned long *cookie)
8061{
8062 struct i40e_cloud_filter *filter = NULL;
8063 struct hlist_node *node2;
8064
8065 hlist_for_each_entry_safe(filter, node2,
8066 &vsi->back->cloud_filter_list, cloud_node)
8067 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8068 return filter;
8069 return NULL;
8070}
8071
8072
8073
8074
8075
8076
8077
8078static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8079 struct flow_cls_offload *cls_flower)
8080{
8081 struct i40e_cloud_filter *filter = NULL;
8082 struct i40e_pf *pf = vsi->back;
8083 int err = 0;
8084
8085 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8086
8087 if (!filter)
8088 return -EINVAL;
8089
8090 hash_del(&filter->cloud_node);
8091
8092 if (filter->dst_port)
8093 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8094 else
8095 err = i40e_add_del_cloud_filter(vsi, filter, false);
8096
8097 kfree(filter);
8098 if (err) {
8099 dev_err(&pf->pdev->dev,
8100 "Failed to delete cloud filter, err %s\n",
8101 i40e_stat_str(&pf->hw, err));
8102 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8103 }
8104
8105 pf->num_cloud_filters--;
8106 if (!pf->num_cloud_filters)
8107 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8108 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8109 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8110 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8111 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8112 }
8113 return 0;
8114}
8115
8116
8117
8118
8119
8120
8121static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8122 struct flow_cls_offload *cls_flower)
8123{
8124 struct i40e_vsi *vsi = np->vsi;
8125
8126 switch (cls_flower->command) {
8127 case FLOW_CLS_REPLACE:
8128 return i40e_configure_clsflower(vsi, cls_flower);
8129 case FLOW_CLS_DESTROY:
8130 return i40e_delete_clsflower(vsi, cls_flower);
8131 case FLOW_CLS_STATS:
8132 return -EOPNOTSUPP;
8133 default:
8134 return -EOPNOTSUPP;
8135 }
8136}
8137
8138static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8139 void *cb_priv)
8140{
8141 struct i40e_netdev_priv *np = cb_priv;
8142
8143 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8144 return -EOPNOTSUPP;
8145
8146 switch (type) {
8147 case TC_SETUP_CLSFLOWER:
8148 return i40e_setup_tc_cls_flower(np, type_data);
8149
8150 default:
8151 return -EOPNOTSUPP;
8152 }
8153}
8154
8155static LIST_HEAD(i40e_block_cb_list);
8156
8157static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8158 void *type_data)
8159{
8160 struct i40e_netdev_priv *np = netdev_priv(netdev);
8161
8162 switch (type) {
8163 case TC_SETUP_QDISC_MQPRIO:
8164 return i40e_setup_tc(netdev, type_data);
8165 case TC_SETUP_BLOCK:
8166 return flow_block_cb_setup_simple(type_data,
8167 &i40e_block_cb_list,
8168 i40e_setup_tc_block_cb,
8169 np, np, true);
8170 default:
8171 return -EOPNOTSUPP;
8172 }
8173}
8174
8175
8176
8177
8178
8179
8180
8181
8182
8183
8184
8185
8186
8187int i40e_open(struct net_device *netdev)
8188{
8189 struct i40e_netdev_priv *np = netdev_priv(netdev);
8190 struct i40e_vsi *vsi = np->vsi;
8191 struct i40e_pf *pf = vsi->back;
8192 int err;
8193
8194
8195 if (test_bit(__I40E_TESTING, pf->state) ||
8196 test_bit(__I40E_BAD_EEPROM, pf->state))
8197 return -EBUSY;
8198
8199 netif_carrier_off(netdev);
8200
8201 if (i40e_force_link_state(pf, true))
8202 return -EAGAIN;
8203
8204 err = i40e_vsi_open(vsi);
8205 if (err)
8206 return err;
8207
8208
8209 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8210 TCP_FLAG_FIN) >> 16);
8211 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8212 TCP_FLAG_FIN |
8213 TCP_FLAG_CWR) >> 16);
8214 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8215
8216 udp_tunnel_get_rx_info(netdev);
8217
8218 return 0;
8219}
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230
8231int i40e_vsi_open(struct i40e_vsi *vsi)
8232{
8233 struct i40e_pf *pf = vsi->back;
8234 char int_name[I40E_INT_NAME_STR_LEN];
8235 int err;
8236
8237
8238 err = i40e_vsi_setup_tx_resources(vsi);
8239 if (err)
8240 goto err_setup_tx;
8241 err = i40e_vsi_setup_rx_resources(vsi);
8242 if (err)
8243 goto err_setup_rx;
8244
8245 err = i40e_vsi_configure(vsi);
8246 if (err)
8247 goto err_setup_rx;
8248
8249 if (vsi->netdev) {
8250 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8251 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8252 err = i40e_vsi_request_irq(vsi, int_name);
8253 if (err)
8254 goto err_setup_rx;
8255
8256
8257 err = netif_set_real_num_tx_queues(vsi->netdev,
8258 vsi->num_queue_pairs);
8259 if (err)
8260 goto err_set_queues;
8261
8262 err = netif_set_real_num_rx_queues(vsi->netdev,
8263 vsi->num_queue_pairs);
8264 if (err)
8265 goto err_set_queues;
8266
8267 } else if (vsi->type == I40E_VSI_FDIR) {
8268 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8269 dev_driver_string(&pf->pdev->dev),
8270 dev_name(&pf->pdev->dev));
8271 err = i40e_vsi_request_irq(vsi, int_name);
8272
8273 } else {
8274 err = -EINVAL;
8275 goto err_setup_rx;
8276 }
8277
8278 err = i40e_up_complete(vsi);
8279 if (err)
8280 goto err_up_complete;
8281
8282 return 0;
8283
8284err_up_complete:
8285 i40e_down(vsi);
8286err_set_queues:
8287 i40e_vsi_free_irq(vsi);
8288err_setup_rx:
8289 i40e_vsi_free_rx_resources(vsi);
8290err_setup_tx:
8291 i40e_vsi_free_tx_resources(vsi);
8292 if (vsi == pf->vsi[pf->lan_vsi])
8293 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8294
8295 return err;
8296}
8297
8298
8299
8300
8301
8302
8303
8304
8305static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8306{
8307 struct i40e_fdir_filter *filter;
8308 struct i40e_flex_pit *pit_entry, *tmp;
8309 struct hlist_node *node2;
8310
8311 hlist_for_each_entry_safe(filter, node2,
8312 &pf->fdir_filter_list, fdir_node) {
8313 hlist_del(&filter->fdir_node);
8314 kfree(filter);
8315 }
8316
8317 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8318 list_del(&pit_entry->list);
8319 kfree(pit_entry);
8320 }
8321 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8322
8323 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8324 list_del(&pit_entry->list);
8325 kfree(pit_entry);
8326 }
8327 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8328
8329 pf->fdir_pf_active_filters = 0;
8330 pf->fd_tcp4_filter_cnt = 0;
8331 pf->fd_udp4_filter_cnt = 0;
8332 pf->fd_sctp4_filter_cnt = 0;
8333 pf->fd_ip4_filter_cnt = 0;
8334
8335
8336 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8337 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8338 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8339
8340
8341 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8342 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8343 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8344
8345
8346 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8347 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8348 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8349
8350
8351 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8352 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8353
8354 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8355 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8356}
8357
8358
8359
8360
8361
8362
8363
8364
8365static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8366{
8367 struct i40e_cloud_filter *cfilter;
8368 struct hlist_node *node;
8369
8370 hlist_for_each_entry_safe(cfilter, node,
8371 &pf->cloud_filter_list, cloud_node) {
8372 hlist_del(&cfilter->cloud_node);
8373 kfree(cfilter);
8374 }
8375 pf->num_cloud_filters = 0;
8376
8377 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8378 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8379 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8380 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8381 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8382 }
8383}
8384
8385
8386
8387
8388
8389
8390
8391
8392
8393
8394
8395int i40e_close(struct net_device *netdev)
8396{
8397 struct i40e_netdev_priv *np = netdev_priv(netdev);
8398 struct i40e_vsi *vsi = np->vsi;
8399
8400 i40e_vsi_close(vsi);
8401
8402 return 0;
8403}
8404
8405
8406
8407
8408
8409
8410
8411
8412
8413
8414
8415
8416void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8417{
8418 u32 val;
8419
8420 WARN_ON(in_interrupt());
8421
8422
8423
8424 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8425
8426
8427
8428
8429
8430
8431
8432
8433
8434 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8435 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8436 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8437 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8438
8439 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8440
8441
8442
8443
8444
8445 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8446 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8447 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8448 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8449 i40e_flush(&pf->hw);
8450
8451 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8452
8453
8454
8455
8456
8457
8458
8459
8460
8461 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8462 i40e_handle_reset_warning(pf, lock_acquired);
8463
8464 dev_info(&pf->pdev->dev,
8465 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8466 "FW LLDP is disabled\n" :
8467 "FW LLDP is enabled\n");
8468
8469 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8470 int v;
8471
8472
8473 dev_info(&pf->pdev->dev,
8474 "VSI reinit requested\n");
8475 for (v = 0; v < pf->num_alloc_vsi; v++) {
8476 struct i40e_vsi *vsi = pf->vsi[v];
8477
8478 if (vsi != NULL &&
8479 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8480 vsi->state))
8481 i40e_vsi_reinit_locked(pf->vsi[v]);
8482 }
8483 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8484 int v;
8485
8486
8487 dev_info(&pf->pdev->dev, "VSI down requested\n");
8488 for (v = 0; v < pf->num_alloc_vsi; v++) {
8489 struct i40e_vsi *vsi = pf->vsi[v];
8490
8491 if (vsi != NULL &&
8492 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8493 vsi->state)) {
8494 set_bit(__I40E_VSI_DOWN, vsi->state);
8495 i40e_down(vsi);
8496 }
8497 }
8498 } else {
8499 dev_info(&pf->pdev->dev,
8500 "bad reset request 0x%08x\n", reset_flags);
8501 }
8502}
8503
8504#ifdef CONFIG_I40E_DCB
8505
8506
8507
8508
8509
8510
8511bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8512 struct i40e_dcbx_config *old_cfg,
8513 struct i40e_dcbx_config *new_cfg)
8514{
8515 bool need_reconfig = false;
8516
8517
8518 if (memcmp(&new_cfg->etscfg,
8519 &old_cfg->etscfg,
8520 sizeof(new_cfg->etscfg))) {
8521
8522 if (memcmp(&new_cfg->etscfg.prioritytable,
8523 &old_cfg->etscfg.prioritytable,
8524 sizeof(new_cfg->etscfg.prioritytable))) {
8525 need_reconfig = true;
8526 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8527 }
8528
8529 if (memcmp(&new_cfg->etscfg.tcbwtable,
8530 &old_cfg->etscfg.tcbwtable,
8531 sizeof(new_cfg->etscfg.tcbwtable)))
8532 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8533
8534 if (memcmp(&new_cfg->etscfg.tsatable,
8535 &old_cfg->etscfg.tsatable,
8536 sizeof(new_cfg->etscfg.tsatable)))
8537 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8538 }
8539
8540
8541 if (memcmp(&new_cfg->pfc,
8542 &old_cfg->pfc,
8543 sizeof(new_cfg->pfc))) {
8544 need_reconfig = true;
8545 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8546 }
8547
8548
8549 if (memcmp(&new_cfg->app,
8550 &old_cfg->app,
8551 sizeof(new_cfg->app))) {
8552 need_reconfig = true;
8553 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8554 }
8555
8556 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8557 return need_reconfig;
8558}
8559
8560
8561
8562
8563
8564
8565static int i40e_handle_lldp_event(struct i40e_pf *pf,
8566 struct i40e_arq_event_info *e)
8567{
8568 struct i40e_aqc_lldp_get_mib *mib =
8569 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8570 struct i40e_hw *hw = &pf->hw;
8571 struct i40e_dcbx_config tmp_dcbx_cfg;
8572 bool need_reconfig = false;
8573 int ret = 0;
8574 u8 type;
8575
8576
8577 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8578 return ret;
8579
8580
8581 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8582 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8583 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8584 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8585 return ret;
8586
8587
8588 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8589 dev_dbg(&pf->pdev->dev,
8590 "LLDP event mib type %s\n", type ? "remote" : "local");
8591 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8592
8593 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8594 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8595 &hw->remote_dcbx_config);
8596 goto exit;
8597 }
8598
8599
8600 tmp_dcbx_cfg = hw->local_dcbx_config;
8601
8602
8603 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8604
8605 ret = i40e_get_dcb_config(&pf->hw);
8606 if (ret) {
8607 dev_info(&pf->pdev->dev,
8608 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8609 i40e_stat_str(&pf->hw, ret),
8610 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8611 goto exit;
8612 }
8613
8614
8615 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8616 sizeof(tmp_dcbx_cfg))) {
8617 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8618 goto exit;
8619 }
8620
8621 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8622 &hw->local_dcbx_config);
8623
8624 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8625
8626 if (!need_reconfig)
8627 goto exit;
8628
8629
8630 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8631 pf->flags |= I40E_FLAG_DCB_ENABLED;
8632 else
8633 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8634
8635 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8636
8637 i40e_pf_quiesce_all_vsi(pf);
8638
8639
8640 i40e_dcb_reconfigure(pf);
8641
8642 ret = i40e_resume_port_tx(pf);
8643
8644 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8645
8646 if (ret)
8647 goto exit;
8648
8649
8650 ret = i40e_pf_wait_queues_disabled(pf);
8651 if (ret) {
8652
8653 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8654 i40e_service_event_schedule(pf);
8655 } else {
8656 i40e_pf_unquiesce_all_vsi(pf);
8657 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8658 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8659 }
8660
8661exit:
8662 return ret;
8663}
8664#endif
8665
8666
8667
8668
8669
8670
8671
8672void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8673{
8674 rtnl_lock();
8675 i40e_do_reset(pf, reset_flags, true);
8676 rtnl_unlock();
8677}
8678
8679
8680
8681
8682
8683
8684
8685
8686
8687static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8688 struct i40e_arq_event_info *e)
8689{
8690 struct i40e_aqc_lan_overflow *data =
8691 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8692 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8693 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8694 struct i40e_hw *hw = &pf->hw;
8695 struct i40e_vf *vf;
8696 u16 vf_id;
8697
8698 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8699 queue, qtx_ctl);
8700
8701
8702 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8703 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8704 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8705 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8706 vf_id -= hw->func_caps.vf_base_id;
8707 vf = &pf->vf[vf_id];
8708 i40e_vc_notify_vf_reset(vf);
8709
8710 msleep(20);
8711 i40e_reset_vf(vf, false);
8712 }
8713}
8714
8715
8716
8717
8718
8719u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8720{
8721 u32 val, fcnt_prog;
8722
8723 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8724 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8725 return fcnt_prog;
8726}
8727
8728
8729
8730
8731
8732u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8733{
8734 u32 val, fcnt_prog;
8735
8736 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8737 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8738 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8739 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8740 return fcnt_prog;
8741}
8742
8743
8744
8745
8746
8747u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8748{
8749 u32 val, fcnt_prog;
8750
8751 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8752 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8753 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8754 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8755 return fcnt_prog;
8756}
8757
8758
8759
8760
8761
8762static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8763{
8764 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8765 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8766 (I40E_DEBUG_FD & pf->hw.debug_mask))
8767 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8768}
8769
8770
8771
8772
8773
8774static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8775{
8776 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8777
8778
8779
8780
8781
8782 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8783 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8784 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8785
8786 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8787 (I40E_DEBUG_FD & pf->hw.debug_mask))
8788 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8789 }
8790}
8791
8792
8793
8794
8795
8796
8797static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8798 struct i40e_fdir_filter *filter)
8799{
8800
8801 pf->fdir_pf_active_filters--;
8802 pf->fd_inv = 0;
8803
8804 switch (filter->flow_type) {
8805 case TCP_V4_FLOW:
8806 pf->fd_tcp4_filter_cnt--;
8807 break;
8808 case UDP_V4_FLOW:
8809 pf->fd_udp4_filter_cnt--;
8810 break;
8811 case SCTP_V4_FLOW:
8812 pf->fd_sctp4_filter_cnt--;
8813 break;
8814 case IP_USER_FLOW:
8815 switch (filter->ip4_proto) {
8816 case IPPROTO_TCP:
8817 pf->fd_tcp4_filter_cnt--;
8818 break;
8819 case IPPROTO_UDP:
8820 pf->fd_udp4_filter_cnt--;
8821 break;
8822 case IPPROTO_SCTP:
8823 pf->fd_sctp4_filter_cnt--;
8824 break;
8825 case IPPROTO_IP:
8826 pf->fd_ip4_filter_cnt--;
8827 break;
8828 }
8829 break;
8830 }
8831
8832
8833 hlist_del(&filter->fdir_node);
8834 kfree(filter);
8835}
8836
8837
8838
8839
8840
8841void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8842{
8843 struct i40e_fdir_filter *filter;
8844 u32 fcnt_prog, fcnt_avail;
8845 struct hlist_node *node;
8846
8847 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8848 return;
8849
8850
8851 fcnt_prog = i40e_get_global_fd_count(pf);
8852 fcnt_avail = pf->fdir_pf_filter_count;
8853 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8854 (pf->fd_add_err == 0) ||
8855 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8856 i40e_reenable_fdir_sb(pf);
8857
8858
8859
8860
8861
8862 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8863 (pf->fd_tcp4_filter_cnt == 0))
8864 i40e_reenable_fdir_atr(pf);
8865
8866
8867 if (pf->fd_inv > 0) {
8868 hlist_for_each_entry_safe(filter, node,
8869 &pf->fdir_filter_list, fdir_node)
8870 if (filter->fd_id == pf->fd_inv)
8871 i40e_delete_invalid_filter(pf, filter);
8872 }
8873}
8874
8875#define I40E_MIN_FD_FLUSH_INTERVAL 10
8876#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8877
8878
8879
8880
8881static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8882{
8883 unsigned long min_flush_time;
8884 int flush_wait_retry = 50;
8885 bool disable_atr = false;
8886 int fd_room;
8887 int reg;
8888
8889 if (!time_after(jiffies, pf->fd_flush_timestamp +
8890 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8891 return;
8892
8893
8894
8895
8896 min_flush_time = pf->fd_flush_timestamp +
8897 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8898 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8899
8900 if (!(time_after(jiffies, min_flush_time)) &&
8901 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8902 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8903 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8904 disable_atr = true;
8905 }
8906
8907 pf->fd_flush_timestamp = jiffies;
8908 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8909
8910 wr32(&pf->hw, I40E_PFQF_CTL_1,
8911 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8912 i40e_flush(&pf->hw);
8913 pf->fd_flush_cnt++;
8914 pf->fd_add_err = 0;
8915 do {
8916
8917 usleep_range(5000, 6000);
8918 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8919 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8920 break;
8921 } while (flush_wait_retry--);
8922 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8923 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8924 } else {
8925
8926 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8927 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8928 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8929 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8930 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8931 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8932 }
8933}
8934
8935
8936
8937
8938
8939u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8940{
8941 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8942}
8943
8944
8945
8946
8947
8948
8949#define I40E_MAX_FD_PROGRAM_ERROR 256
8950
8951
8952
8953
8954
8955static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8956{
8957
8958
8959 if (test_bit(__I40E_DOWN, pf->state))
8960 return;
8961
8962 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8963 i40e_fdir_flush_and_replay(pf);
8964
8965 i40e_fdir_check_and_reenable(pf);
8966
8967}
8968
8969
8970
8971
8972
8973
8974static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8975{
8976 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8977 return;
8978
8979 switch (vsi->type) {
8980 case I40E_VSI_MAIN:
8981 if (!vsi->netdev || !vsi->netdev_registered)
8982 break;
8983
8984 if (link_up) {
8985 netif_carrier_on(vsi->netdev);
8986 netif_tx_wake_all_queues(vsi->netdev);
8987 } else {
8988 netif_carrier_off(vsi->netdev);
8989 netif_tx_stop_all_queues(vsi->netdev);
8990 }
8991 break;
8992
8993 case I40E_VSI_SRIOV:
8994 case I40E_VSI_VMDQ2:
8995 case I40E_VSI_CTRL:
8996 case I40E_VSI_IWARP:
8997 case I40E_VSI_MIRROR:
8998 default:
8999
9000 break;
9001 }
9002}
9003
9004
9005
9006
9007
9008
9009static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9010{
9011 struct i40e_pf *pf;
9012 int i;
9013
9014 if (!veb || !veb->pf)
9015 return;
9016 pf = veb->pf;
9017
9018
9019 for (i = 0; i < I40E_MAX_VEB; i++)
9020 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9021 i40e_veb_link_event(pf->veb[i], link_up);
9022
9023
9024 for (i = 0; i < pf->num_alloc_vsi; i++)
9025 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9026 i40e_vsi_link_event(pf->vsi[i], link_up);
9027}
9028
9029
9030
9031
9032
9033static void i40e_link_event(struct i40e_pf *pf)
9034{
9035 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9036 u8 new_link_speed, old_link_speed;
9037 i40e_status status;
9038 bool new_link, old_link;
9039
9040
9041 pf->hw.phy.get_link_info = true;
9042 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9043 status = i40e_get_link_status(&pf->hw, &new_link);
9044
9045
9046 if (status == I40E_SUCCESS) {
9047 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9048 } else {
9049
9050
9051
9052 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9053 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9054 status);
9055 return;
9056 }
9057
9058 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9059 new_link_speed = pf->hw.phy.link_info.link_speed;
9060
9061 if (new_link == old_link &&
9062 new_link_speed == old_link_speed &&
9063 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9064 new_link == netif_carrier_ok(vsi->netdev)))
9065 return;
9066
9067 i40e_print_link_message(vsi, new_link);
9068
9069
9070
9071
9072 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9073 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9074 else
9075 i40e_vsi_link_event(vsi, new_link);
9076
9077 if (pf->vf)
9078 i40e_vc_notify_link_state(pf);
9079
9080 if (pf->flags & I40E_FLAG_PTP)
9081 i40e_ptp_set_increment(pf);
9082}
9083
9084
9085
9086
9087
9088static void i40e_watchdog_subtask(struct i40e_pf *pf)
9089{
9090 int i;
9091
9092
9093 if (test_bit(__I40E_DOWN, pf->state) ||
9094 test_bit(__I40E_CONFIG_BUSY, pf->state))
9095 return;
9096
9097
9098 if (time_before(jiffies, (pf->service_timer_previous +
9099 pf->service_timer_period)))
9100 return;
9101 pf->service_timer_previous = jiffies;
9102
9103 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9104 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9105 i40e_link_event(pf);
9106
9107
9108
9109
9110 for (i = 0; i < pf->num_alloc_vsi; i++)
9111 if (pf->vsi[i] && pf->vsi[i]->netdev)
9112 i40e_update_stats(pf->vsi[i]);
9113
9114 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9115
9116 for (i = 0; i < I40E_MAX_VEB; i++)
9117 if (pf->veb[i])
9118 i40e_update_veb_stats(pf->veb[i]);
9119 }
9120
9121 i40e_ptp_rx_hang(pf);
9122 i40e_ptp_tx_hang(pf);
9123}
9124
9125
9126
9127
9128
9129static void i40e_reset_subtask(struct i40e_pf *pf)
9130{
9131 u32 reset_flags = 0;
9132
9133 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9134 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9135 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9136 }
9137 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9138 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9139 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9140 }
9141 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9142 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9143 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9144 }
9145 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9146 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9147 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9148 }
9149 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9150 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9151 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9152 }
9153
9154
9155
9156
9157 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9158 i40e_prep_for_reset(pf, false);
9159 i40e_reset(pf);
9160 i40e_rebuild(pf, false, false);
9161 }
9162
9163
9164 if (reset_flags &&
9165 !test_bit(__I40E_DOWN, pf->state) &&
9166 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9167 i40e_do_reset(pf, reset_flags, false);
9168 }
9169}
9170
9171
9172
9173
9174
9175
9176static void i40e_handle_link_event(struct i40e_pf *pf,
9177 struct i40e_arq_event_info *e)
9178{
9179 struct i40e_aqc_get_link_status *status =
9180 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9181
9182
9183
9184
9185
9186
9187
9188 i40e_link_event(pf);
9189
9190
9191 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9192 dev_err(&pf->pdev->dev,
9193 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9194 dev_err(&pf->pdev->dev,
9195 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9196 } else {
9197
9198
9199
9200 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9201 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9202 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9203 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9204 dev_err(&pf->pdev->dev,
9205 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9206 dev_err(&pf->pdev->dev,
9207 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9208 }
9209 }
9210}
9211
9212
9213
9214
9215
9216static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9217{
9218 struct i40e_arq_event_info event;
9219 struct i40e_hw *hw = &pf->hw;
9220 u16 pending, i = 0;
9221 i40e_status ret;
9222 u16 opcode;
9223 u32 oldval;
9224 u32 val;
9225
9226
9227 if (test_bit(__I40E_RESET_FAILED, pf->state))
9228 return;
9229
9230
9231 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9232 oldval = val;
9233 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9234 if (hw->debug_mask & I40E_DEBUG_AQ)
9235 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9236 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9237 }
9238 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9239 if (hw->debug_mask & I40E_DEBUG_AQ)
9240 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9241 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9242 pf->arq_overflows++;
9243 }
9244 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9245 if (hw->debug_mask & I40E_DEBUG_AQ)
9246 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9247 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9248 }
9249 if (oldval != val)
9250 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9251
9252 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9253 oldval = val;
9254 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9255 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9256 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9257 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9258 }
9259 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9260 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9261 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9262 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9263 }
9264 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9265 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9266 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9267 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9268 }
9269 if (oldval != val)
9270 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9271
9272 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9273 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9274 if (!event.msg_buf)
9275 return;
9276
9277 do {
9278 ret = i40e_clean_arq_element(hw, &event, &pending);
9279 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9280 break;
9281 else if (ret) {
9282 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9283 break;
9284 }
9285
9286 opcode = le16_to_cpu(event.desc.opcode);
9287 switch (opcode) {
9288
9289 case i40e_aqc_opc_get_link_status:
9290 i40e_handle_link_event(pf, &event);
9291 break;
9292 case i40e_aqc_opc_send_msg_to_pf:
9293 ret = i40e_vc_process_vf_msg(pf,
9294 le16_to_cpu(event.desc.retval),
9295 le32_to_cpu(event.desc.cookie_high),
9296 le32_to_cpu(event.desc.cookie_low),
9297 event.msg_buf,
9298 event.msg_len);
9299 break;
9300 case i40e_aqc_opc_lldp_update_mib:
9301 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9302#ifdef CONFIG_I40E_DCB
9303 rtnl_lock();
9304 ret = i40e_handle_lldp_event(pf, &event);
9305 rtnl_unlock();
9306#endif
9307 break;
9308 case i40e_aqc_opc_event_lan_overflow:
9309 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9310 i40e_handle_lan_overflow_event(pf, &event);
9311 break;
9312 case i40e_aqc_opc_send_msg_to_peer:
9313 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9314 break;
9315 case i40e_aqc_opc_nvm_erase:
9316 case i40e_aqc_opc_nvm_update:
9317 case i40e_aqc_opc_oem_post_update:
9318 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9319 "ARQ NVM operation 0x%04x completed\n",
9320 opcode);
9321 break;
9322 default:
9323 dev_info(&pf->pdev->dev,
9324 "ARQ: Unknown event 0x%04x ignored\n",
9325 opcode);
9326 break;
9327 }
9328 } while (i++ < pf->adminq_work_limit);
9329
9330 if (i < pf->adminq_work_limit)
9331 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9332
9333
9334 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9335 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9336 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9337 i40e_flush(hw);
9338
9339 kfree(event.msg_buf);
9340}
9341
9342
9343
9344
9345
9346static void i40e_verify_eeprom(struct i40e_pf *pf)
9347{
9348 int err;
9349
9350 err = i40e_diag_eeprom_test(&pf->hw);
9351 if (err) {
9352
9353 err = i40e_diag_eeprom_test(&pf->hw);
9354 if (err) {
9355 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9356 err);
9357 set_bit(__I40E_BAD_EEPROM, pf->state);
9358 }
9359 }
9360
9361 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9362 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9363 clear_bit(__I40E_BAD_EEPROM, pf->state);
9364 }
9365}
9366
9367
9368
9369
9370
9371
9372
9373static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9374{
9375 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9376 struct i40e_vsi_context ctxt;
9377 int ret;
9378
9379 ctxt.seid = pf->main_vsi_seid;
9380 ctxt.pf_num = pf->hw.pf_id;
9381 ctxt.vf_num = 0;
9382 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9383 if (ret) {
9384 dev_info(&pf->pdev->dev,
9385 "couldn't get PF vsi config, err %s aq_err %s\n",
9386 i40e_stat_str(&pf->hw, ret),
9387 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9388 return;
9389 }
9390 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9391 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9392 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9393
9394 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9395 if (ret) {
9396 dev_info(&pf->pdev->dev,
9397 "update vsi switch failed, err %s aq_err %s\n",
9398 i40e_stat_str(&pf->hw, ret),
9399 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9400 }
9401}
9402
9403
9404
9405
9406
9407
9408
9409static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9410{
9411 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9412 struct i40e_vsi_context ctxt;
9413 int ret;
9414
9415 ctxt.seid = pf->main_vsi_seid;
9416 ctxt.pf_num = pf->hw.pf_id;
9417 ctxt.vf_num = 0;
9418 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9419 if (ret) {
9420 dev_info(&pf->pdev->dev,
9421 "couldn't get PF vsi config, err %s aq_err %s\n",
9422 i40e_stat_str(&pf->hw, ret),
9423 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9424 return;
9425 }
9426 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9427 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9428 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9429
9430 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9431 if (ret) {
9432 dev_info(&pf->pdev->dev,
9433 "update vsi switch failed, err %s aq_err %s\n",
9434 i40e_stat_str(&pf->hw, ret),
9435 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9436 }
9437}
9438
9439
9440
9441
9442
9443
9444
9445
9446
9447static void i40e_config_bridge_mode(struct i40e_veb *veb)
9448{
9449 struct i40e_pf *pf = veb->pf;
9450
9451 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9452 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9453 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9454 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9455 i40e_disable_pf_switch_lb(pf);
9456 else
9457 i40e_enable_pf_switch_lb(pf);
9458}
9459
9460
9461
9462
9463
9464
9465
9466
9467
9468
9469static int i40e_reconstitute_veb(struct i40e_veb *veb)
9470{
9471 struct i40e_vsi *ctl_vsi = NULL;
9472 struct i40e_pf *pf = veb->pf;
9473 int v, veb_idx;
9474 int ret;
9475
9476
9477 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
9478 if (pf->vsi[v] &&
9479 pf->vsi[v]->veb_idx == veb->idx &&
9480 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
9481 ctl_vsi = pf->vsi[v];
9482 break;
9483 }
9484 }
9485 if (!ctl_vsi) {
9486 dev_info(&pf->pdev->dev,
9487 "missing owner VSI for veb_idx %d\n", veb->idx);
9488 ret = -ENOENT;
9489 goto end_reconstitute;
9490 }
9491 if (ctl_vsi != pf->vsi[pf->lan_vsi])
9492 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9493 ret = i40e_add_vsi(ctl_vsi);
9494 if (ret) {
9495 dev_info(&pf->pdev->dev,
9496 "rebuild of veb_idx %d owner VSI failed: %d\n",
9497 veb->idx, ret);
9498 goto end_reconstitute;
9499 }
9500 i40e_vsi_reset_stats(ctl_vsi);
9501
9502
9503 ret = i40e_add_veb(veb, ctl_vsi);
9504 if (ret)
9505 goto end_reconstitute;
9506
9507 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9508 veb->bridge_mode = BRIDGE_MODE_VEB;
9509 else
9510 veb->bridge_mode = BRIDGE_MODE_VEPA;
9511 i40e_config_bridge_mode(veb);
9512
9513
9514 for (v = 0; v < pf->num_alloc_vsi; v++) {
9515 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9516 continue;
9517
9518 if (pf->vsi[v]->veb_idx == veb->idx) {
9519 struct i40e_vsi *vsi = pf->vsi[v];
9520
9521 vsi->uplink_seid = veb->seid;
9522 ret = i40e_add_vsi(vsi);
9523 if (ret) {
9524 dev_info(&pf->pdev->dev,
9525 "rebuild of vsi_idx %d failed: %d\n",
9526 v, ret);
9527 goto end_reconstitute;
9528 }
9529 i40e_vsi_reset_stats(vsi);
9530 }
9531 }
9532
9533
9534 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9535 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9536 pf->veb[veb_idx]->uplink_seid = veb->seid;
9537 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9538 if (ret)
9539 break;
9540 }
9541 }
9542
9543end_reconstitute:
9544 return ret;
9545}
9546
9547
9548
9549
9550
9551static int i40e_get_capabilities(struct i40e_pf *pf,
9552 enum i40e_admin_queue_opc list_type)
9553{
9554 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9555 u16 data_size;
9556 int buf_len;
9557 int err;
9558
9559 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9560 do {
9561 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9562 if (!cap_buf)
9563 return -ENOMEM;
9564
9565
9566 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9567 &data_size, list_type,
9568 NULL);
9569
9570 kfree(cap_buf);
9571
9572 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9573
9574 buf_len = data_size;
9575 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9576 dev_info(&pf->pdev->dev,
9577 "capability discovery failed, err %s aq_err %s\n",
9578 i40e_stat_str(&pf->hw, err),
9579 i40e_aq_str(&pf->hw,
9580 pf->hw.aq.asq_last_status));
9581 return -ENODEV;
9582 }
9583 } while (err);
9584
9585 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9586 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9587 dev_info(&pf->pdev->dev,
9588 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9589 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9590 pf->hw.func_caps.num_msix_vectors,
9591 pf->hw.func_caps.num_msix_vectors_vf,
9592 pf->hw.func_caps.fd_filters_guaranteed,
9593 pf->hw.func_caps.fd_filters_best_effort,
9594 pf->hw.func_caps.num_tx_qp,
9595 pf->hw.func_caps.num_vsis);
9596 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9597 dev_info(&pf->pdev->dev,
9598 "switch_mode=0x%04x, function_valid=0x%08x\n",
9599 pf->hw.dev_caps.switch_mode,
9600 pf->hw.dev_caps.valid_functions);
9601 dev_info(&pf->pdev->dev,
9602 "SR-IOV=%d, num_vfs for all function=%u\n",
9603 pf->hw.dev_caps.sr_iov_1_1,
9604 pf->hw.dev_caps.num_vfs);
9605 dev_info(&pf->pdev->dev,
9606 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9607 pf->hw.dev_caps.num_vsis,
9608 pf->hw.dev_caps.num_rx_qp,
9609 pf->hw.dev_caps.num_tx_qp);
9610 }
9611 }
9612 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9613#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9614 + pf->hw.func_caps.num_vfs)
9615 if (pf->hw.revision_id == 0 &&
9616 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9617 dev_info(&pf->pdev->dev,
9618 "got num_vsis %d, setting num_vsis to %d\n",
9619 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9620 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9621 }
9622 }
9623 return 0;
9624}
9625
9626static int i40e_vsi_clear(struct i40e_vsi *vsi);
9627
9628
9629
9630
9631
9632static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9633{
9634 struct i40e_vsi *vsi;
9635
9636
9637
9638
9639 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9640 static const u32 hkey[] = {
9641 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9642 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9643 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9644 0x95b3a76d};
9645 int i;
9646
9647 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9648 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9649 }
9650
9651 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9652 return;
9653
9654
9655 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9656
9657
9658 if (!vsi) {
9659 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9660 pf->vsi[pf->lan_vsi]->seid, 0);
9661 if (!vsi) {
9662 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9663 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9664 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9665 return;
9666 }
9667 }
9668
9669 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9670}
9671
9672
9673
9674
9675
9676static void i40e_fdir_teardown(struct i40e_pf *pf)
9677{
9678 struct i40e_vsi *vsi;
9679
9680 i40e_fdir_filter_exit(pf);
9681 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9682 if (vsi)
9683 i40e_vsi_release(vsi);
9684}
9685
9686
9687
9688
9689
9690
9691
9692
9693
9694static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9695{
9696 struct i40e_cloud_filter *cfilter;
9697 struct i40e_pf *pf = vsi->back;
9698 struct hlist_node *node;
9699 i40e_status ret;
9700
9701
9702 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9703 cloud_node) {
9704 if (cfilter->seid != seid)
9705 continue;
9706
9707 if (cfilter->dst_port)
9708 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9709 true);
9710 else
9711 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9712
9713 if (ret) {
9714 dev_dbg(&pf->pdev->dev,
9715 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9716 i40e_stat_str(&pf->hw, ret),
9717 i40e_aq_str(&pf->hw,
9718 pf->hw.aq.asq_last_status));
9719 return ret;
9720 }
9721 }
9722 return 0;
9723}
9724
9725
9726
9727
9728
9729
9730
9731static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9732{
9733 struct i40e_channel *ch, *ch_tmp;
9734 i40e_status ret;
9735
9736 if (list_empty(&vsi->ch_list))
9737 return 0;
9738
9739 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9740 if (!ch->initialized)
9741 break;
9742
9743 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9744 if (ret) {
9745 dev_info(&vsi->back->pdev->dev,
9746 "failed to rebuild channels using uplink_seid %u\n",
9747 vsi->uplink_seid);
9748 return ret;
9749 }
9750
9751 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9752 if (ret) {
9753 dev_info(&vsi->back->pdev->dev,
9754 "failed to configure TX rings for channel %u\n",
9755 ch->seid);
9756 return ret;
9757 }
9758
9759 vsi->next_base_queue = vsi->next_base_queue +
9760 ch->num_queue_pairs;
9761 if (ch->max_tx_rate) {
9762 u64 credits = ch->max_tx_rate;
9763
9764 if (i40e_set_bw_limit(vsi, ch->seid,
9765 ch->max_tx_rate))
9766 return -EINVAL;
9767
9768 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9769 dev_dbg(&vsi->back->pdev->dev,
9770 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9771 ch->max_tx_rate,
9772 credits,
9773 ch->seid);
9774 }
9775 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9776 if (ret) {
9777 dev_dbg(&vsi->back->pdev->dev,
9778 "Failed to rebuild cloud filters for channel VSI %u\n",
9779 ch->seid);
9780 return ret;
9781 }
9782 }
9783 return 0;
9784}
9785
9786
9787
9788
9789
9790
9791
9792
9793
9794static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9795{
9796 struct i40e_hw *hw = &pf->hw;
9797 i40e_status ret = 0;
9798 u32 v;
9799
9800 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9801 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9802 return;
9803 if (i40e_check_asq_alive(&pf->hw))
9804 i40e_vc_notify_reset(pf);
9805
9806 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9807
9808
9809
9810 if (!lock_acquired)
9811 rtnl_lock();
9812 i40e_pf_quiesce_all_vsi(pf);
9813 if (!lock_acquired)
9814 rtnl_unlock();
9815
9816 for (v = 0; v < pf->num_alloc_vsi; v++) {
9817 if (pf->vsi[v])
9818 pf->vsi[v]->seid = 0;
9819 }
9820
9821 i40e_shutdown_adminq(&pf->hw);
9822
9823
9824 if (hw->hmc.hmc_obj) {
9825 ret = i40e_shutdown_lan_hmc(hw);
9826 if (ret)
9827 dev_warn(&pf->pdev->dev,
9828 "shutdown_lan_hmc failed: %d\n", ret);
9829 }
9830
9831
9832
9833
9834 i40e_ptp_save_hw_time(pf);
9835}
9836
9837
9838
9839
9840
9841static void i40e_send_version(struct i40e_pf *pf)
9842{
9843 struct i40e_driver_version dv;
9844
9845 dv.major_version = DRV_VERSION_MAJOR;
9846 dv.minor_version = DRV_VERSION_MINOR;
9847 dv.build_version = DRV_VERSION_BUILD;
9848 dv.subbuild_version = 0;
9849 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9850 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9851}
9852
9853
9854
9855
9856
9857static void i40e_get_oem_version(struct i40e_hw *hw)
9858{
9859 u16 block_offset = 0xffff;
9860 u16 block_length = 0;
9861 u16 capabilities = 0;
9862 u16 gen_snap = 0;
9863 u16 release = 0;
9864
9865#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9866#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9867#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9868#define I40E_NVM_OEM_GEN_OFFSET 0x02
9869#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9870#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9871#define I40E_NVM_OEM_LENGTH 3
9872
9873
9874 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9875 if (block_offset == 0xffff)
9876 return;
9877
9878
9879 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9880 &block_length);
9881 if (block_length < I40E_NVM_OEM_LENGTH)
9882 return;
9883
9884
9885 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9886 &capabilities);
9887 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9888 return;
9889
9890 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9891 &gen_snap);
9892 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9893 &release);
9894 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9895 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9896}
9897
9898
9899
9900
9901
9902static int i40e_reset(struct i40e_pf *pf)
9903{
9904 struct i40e_hw *hw = &pf->hw;
9905 i40e_status ret;
9906
9907 ret = i40e_pf_reset(hw);
9908 if (ret) {
9909 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9910 set_bit(__I40E_RESET_FAILED, pf->state);
9911 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9912 } else {
9913 pf->pfr_count++;
9914 }
9915 return ret;
9916}
9917
9918
9919
9920
9921
9922
9923
9924
9925static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9926{
9927 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
9928 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9929 struct i40e_hw *hw = &pf->hw;
9930 u8 set_fc_aq_fail = 0;
9931 i40e_status ret;
9932 u32 val;
9933 int v;
9934
9935 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9936 i40e_check_recovery_mode(pf)) {
9937 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
9938 }
9939
9940 if (test_bit(__I40E_DOWN, pf->state) &&
9941 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
9942 !old_recovery_mode_bit)
9943 goto clear_recovery;
9944 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9945
9946
9947 ret = i40e_init_adminq(&pf->hw);
9948 if (ret) {
9949 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9950 i40e_stat_str(&pf->hw, ret),
9951 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9952 goto clear_recovery;
9953 }
9954 i40e_get_oem_version(&pf->hw);
9955
9956 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9957 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9958 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9959
9960
9961
9962
9963
9964 mdelay(300);
9965 }
9966
9967
9968 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9969 i40e_verify_eeprom(pf);
9970
9971
9972
9973
9974
9975 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
9976 old_recovery_mode_bit) {
9977 if (i40e_get_capabilities(pf,
9978 i40e_aqc_opc_list_func_capabilities))
9979 goto end_unlock;
9980
9981 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
9982
9983
9984
9985 if (i40e_setup_misc_vector_for_recovery_mode(pf))
9986 goto end_unlock;
9987 } else {
9988 if (!lock_acquired)
9989 rtnl_lock();
9990
9991
9992
9993
9994 free_irq(pf->pdev->irq, pf);
9995 i40e_clear_interrupt_scheme(pf);
9996 if (i40e_restore_interrupt_scheme(pf))
9997 goto end_unlock;
9998 }
9999
10000
10001 i40e_send_version(pf);
10002
10003
10004
10005
10006 goto end_unlock;
10007 }
10008
10009 i40e_clear_pxe_mode(hw);
10010 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10011 if (ret)
10012 goto end_core_reset;
10013
10014 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10015 hw->func_caps.num_rx_qp, 0, 0);
10016 if (ret) {
10017 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10018 goto end_core_reset;
10019 }
10020 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10021 if (ret) {
10022 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10023 goto end_core_reset;
10024 }
10025
10026
10027 i40e_aq_set_dcb_parameters(hw, true, NULL);
10028
10029#ifdef CONFIG_I40E_DCB
10030 ret = i40e_init_pf_dcb(pf);
10031 if (ret) {
10032 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
10033 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10034
10035 }
10036#endif
10037
10038 if (!lock_acquired)
10039 rtnl_lock();
10040 ret = i40e_setup_pf_switch(pf, reinit);
10041 if (ret)
10042 goto end_unlock;
10043
10044
10045
10046
10047 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10048 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10049 I40E_AQ_EVENT_MEDIA_NA |
10050 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10051 if (ret)
10052 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10053 i40e_stat_str(&pf->hw, ret),
10054 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10055
10056
10057 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
10058 if (ret)
10059 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
10060 i40e_stat_str(&pf->hw, ret),
10061 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10062
10063
10064
10065
10066
10067
10068
10069
10070 if (vsi->uplink_seid != pf->mac_seid) {
10071 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10072
10073 for (v = 0; v < I40E_MAX_VEB; v++) {
10074 if (!pf->veb[v])
10075 continue;
10076
10077 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10078 pf->veb[v]->uplink_seid == 0) {
10079 ret = i40e_reconstitute_veb(pf->veb[v]);
10080
10081 if (!ret)
10082 continue;
10083
10084
10085
10086
10087
10088
10089
10090 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10091 dev_info(&pf->pdev->dev,
10092 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10093 ret);
10094 vsi->uplink_seid = pf->mac_seid;
10095 break;
10096 } else if (pf->veb[v]->uplink_seid == 0) {
10097 dev_info(&pf->pdev->dev,
10098 "rebuild of orphan VEB failed: %d\n",
10099 ret);
10100 }
10101 }
10102 }
10103 }
10104
10105 if (vsi->uplink_seid == pf->mac_seid) {
10106 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10107
10108 ret = i40e_add_vsi(vsi);
10109 if (ret) {
10110 dev_info(&pf->pdev->dev,
10111 "rebuild of Main VSI failed: %d\n", ret);
10112 goto end_unlock;
10113 }
10114 }
10115
10116 if (vsi->mqprio_qopt.max_rate[0]) {
10117 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10118 u64 credits = 0;
10119
10120 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10121 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10122 if (ret)
10123 goto end_unlock;
10124
10125 credits = max_tx_rate;
10126 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10127 dev_dbg(&vsi->back->pdev->dev,
10128 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10129 max_tx_rate,
10130 credits,
10131 vsi->seid);
10132 }
10133
10134 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10135 if (ret)
10136 goto end_unlock;
10137
10138
10139
10140
10141 ret = i40e_rebuild_channels(vsi);
10142 if (ret)
10143 goto end_unlock;
10144
10145
10146
10147
10148
10149#define I40E_REG_MSS 0x000E64DC
10150#define I40E_REG_MSS_MIN_MASK 0x3FF0000
10151#define I40E_64BYTE_MSS 0x400000
10152 val = rd32(hw, I40E_REG_MSS);
10153 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10154 val &= ~I40E_REG_MSS_MIN_MASK;
10155 val |= I40E_64BYTE_MSS;
10156 wr32(hw, I40E_REG_MSS, val);
10157 }
10158
10159 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10160 msleep(75);
10161 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10162 if (ret)
10163 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10164 i40e_stat_str(&pf->hw, ret),
10165 i40e_aq_str(&pf->hw,
10166 pf->hw.aq.asq_last_status));
10167 }
10168
10169 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10170 ret = i40e_setup_misc_vector(pf);
10171
10172
10173
10174
10175
10176
10177
10178 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10179 pf->main_vsi_seid);
10180
10181
10182 i40e_pf_unquiesce_all_vsi(pf);
10183
10184
10185 if (!lock_acquired)
10186 rtnl_unlock();
10187
10188
10189 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10190 if (ret)
10191 dev_warn(&pf->pdev->dev,
10192 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10193 pf->cur_promisc ? "on" : "off",
10194 i40e_stat_str(&pf->hw, ret),
10195 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10196
10197 i40e_reset_all_vfs(pf, true);
10198
10199
10200 i40e_send_version(pf);
10201
10202
10203 goto end_core_reset;
10204
10205end_unlock:
10206 if (!lock_acquired)
10207 rtnl_unlock();
10208end_core_reset:
10209 clear_bit(__I40E_RESET_FAILED, pf->state);
10210clear_recovery:
10211 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10212 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10213}
10214
10215
10216
10217
10218
10219
10220
10221
10222static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10223 bool lock_acquired)
10224{
10225 int ret;
10226
10227
10228
10229
10230 ret = i40e_reset(pf);
10231 if (!ret)
10232 i40e_rebuild(pf, reinit, lock_acquired);
10233}
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10245{
10246 i40e_prep_for_reset(pf, lock_acquired);
10247 i40e_reset_and_rebuild(pf, false, lock_acquired);
10248}
10249
10250
10251
10252
10253
10254
10255
10256static void i40e_handle_mdd_event(struct i40e_pf *pf)
10257{
10258 struct i40e_hw *hw = &pf->hw;
10259 bool mdd_detected = false;
10260 struct i40e_vf *vf;
10261 u32 reg;
10262 int i;
10263
10264 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10265 return;
10266
10267
10268 reg = rd32(hw, I40E_GL_MDET_TX);
10269 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10270 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10271 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10272 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10273 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10274 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10275 I40E_GL_MDET_TX_EVENT_SHIFT;
10276 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10277 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10278 pf->hw.func_caps.base_queue;
10279 if (netif_msg_tx_err(pf))
10280 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10281 event, queue, pf_num, vf_num);
10282 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10283 mdd_detected = true;
10284 }
10285 reg = rd32(hw, I40E_GL_MDET_RX);
10286 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10287 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10288 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10289 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10290 I40E_GL_MDET_RX_EVENT_SHIFT;
10291 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10292 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10293 pf->hw.func_caps.base_queue;
10294 if (netif_msg_rx_err(pf))
10295 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10296 event, queue, func);
10297 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10298 mdd_detected = true;
10299 }
10300
10301 if (mdd_detected) {
10302 reg = rd32(hw, I40E_PF_MDET_TX);
10303 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10304 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10305 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10306 }
10307 reg = rd32(hw, I40E_PF_MDET_RX);
10308 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10309 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10310 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10311 }
10312 }
10313
10314
10315 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10316 vf = &(pf->vf[i]);
10317 reg = rd32(hw, I40E_VP_MDET_TX(i));
10318 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10319 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10320 vf->num_mdd_events++;
10321 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10322 i);
10323 dev_info(&pf->pdev->dev,
10324 "Use PF Control I/F to re-enable the VF\n");
10325 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10326 }
10327
10328 reg = rd32(hw, I40E_VP_MDET_RX(i));
10329 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10330 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10331 vf->num_mdd_events++;
10332 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10333 i);
10334 dev_info(&pf->pdev->dev,
10335 "Use PF Control I/F to re-enable the VF\n");
10336 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10337 }
10338 }
10339
10340
10341 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10342 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10343 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10344 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10345 i40e_flush(hw);
10346}
10347
10348static const char *i40e_tunnel_name(u8 type)
10349{
10350 switch (type) {
10351 case UDP_TUNNEL_TYPE_VXLAN:
10352 return "vxlan";
10353 case UDP_TUNNEL_TYPE_GENEVE:
10354 return "geneve";
10355 default:
10356 return "unknown";
10357 }
10358}
10359
10360
10361
10362
10363
10364static void i40e_sync_udp_filters(struct i40e_pf *pf)
10365{
10366 int i;
10367
10368
10369 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10370 if (pf->udp_ports[i].port)
10371 pf->pending_udp_bitmap |= BIT_ULL(i);
10372 }
10373
10374 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
10375}
10376
10377
10378
10379
10380
10381static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
10382{
10383 struct i40e_hw *hw = &pf->hw;
10384 u8 filter_index, type;
10385 u16 port;
10386 int i;
10387
10388 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
10389 return;
10390
10391
10392 rtnl_lock();
10393
10394 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10395 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
10396 struct i40e_udp_port_config *udp_port;
10397 i40e_status ret = 0;
10398
10399 udp_port = &pf->udp_ports[i];
10400 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10401
10402 port = READ_ONCE(udp_port->port);
10403 type = READ_ONCE(udp_port->type);
10404 filter_index = READ_ONCE(udp_port->filter_index);
10405
10406
10407 rtnl_unlock();
10408
10409 if (port)
10410 ret = i40e_aq_add_udp_tunnel(hw, port,
10411 type,
10412 &filter_index,
10413 NULL);
10414 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
10415 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
10416 NULL);
10417
10418
10419 rtnl_lock();
10420
10421 if (ret) {
10422 dev_info(&pf->pdev->dev,
10423 "%s %s port %d, index %d failed, err %s aq_err %s\n",
10424 i40e_tunnel_name(type),
10425 port ? "add" : "delete",
10426 port,
10427 filter_index,
10428 i40e_stat_str(&pf->hw, ret),
10429 i40e_aq_str(&pf->hw,
10430 pf->hw.aq.asq_last_status));
10431 if (port) {
10432
10433
10434
10435 udp_port->port = 0;
10436 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10437 }
10438 } else if (port) {
10439
10440 udp_port->filter_index = filter_index;
10441 }
10442 }
10443 }
10444
10445 rtnl_unlock();
10446}
10447
10448
10449
10450
10451
10452static void i40e_service_task(struct work_struct *work)
10453{
10454 struct i40e_pf *pf = container_of(work,
10455 struct i40e_pf,
10456 service_task);
10457 unsigned long start_time = jiffies;
10458
10459
10460 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10461 test_bit(__I40E_SUSPENDED, pf->state))
10462 return;
10463
10464 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10465 return;
10466
10467 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10468 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10469 i40e_sync_filters_subtask(pf);
10470 i40e_reset_subtask(pf);
10471 i40e_handle_mdd_event(pf);
10472 i40e_vc_process_vflr_event(pf);
10473 i40e_watchdog_subtask(pf);
10474 i40e_fdir_reinit_subtask(pf);
10475 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10476
10477 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10478 true);
10479 } else {
10480 i40e_client_subtask(pf);
10481 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10482 pf->state))
10483 i40e_notify_client_of_l2_param_changes(
10484 pf->vsi[pf->lan_vsi]);
10485 }
10486 i40e_sync_filters_subtask(pf);
10487 i40e_sync_udp_filters_subtask(pf);
10488 } else {
10489 i40e_reset_subtask(pf);
10490 }
10491
10492 i40e_clean_adminq_subtask(pf);
10493
10494
10495 smp_mb__before_atomic();
10496 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10497
10498
10499
10500
10501
10502 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10503 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10504 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10505 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10506 i40e_service_event_schedule(pf);
10507}
10508
10509
10510
10511
10512
10513static void i40e_service_timer(struct timer_list *t)
10514{
10515 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10516
10517 mod_timer(&pf->service_timer,
10518 round_jiffies(jiffies + pf->service_timer_period));
10519 i40e_service_event_schedule(pf);
10520}
10521
10522
10523
10524
10525
10526static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10527{
10528 struct i40e_pf *pf = vsi->back;
10529
10530 switch (vsi->type) {
10531 case I40E_VSI_MAIN:
10532 vsi->alloc_queue_pairs = pf->num_lan_qps;
10533 if (!vsi->num_tx_desc)
10534 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10535 I40E_REQ_DESCRIPTOR_MULTIPLE);
10536 if (!vsi->num_rx_desc)
10537 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10538 I40E_REQ_DESCRIPTOR_MULTIPLE);
10539 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10540 vsi->num_q_vectors = pf->num_lan_msix;
10541 else
10542 vsi->num_q_vectors = 1;
10543
10544 break;
10545
10546 case I40E_VSI_FDIR:
10547 vsi->alloc_queue_pairs = 1;
10548 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10549 I40E_REQ_DESCRIPTOR_MULTIPLE);
10550 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10551 I40E_REQ_DESCRIPTOR_MULTIPLE);
10552 vsi->num_q_vectors = pf->num_fdsb_msix;
10553 break;
10554
10555 case I40E_VSI_VMDQ2:
10556 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10557 if (!vsi->num_tx_desc)
10558 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10559 I40E_REQ_DESCRIPTOR_MULTIPLE);
10560 if (!vsi->num_rx_desc)
10561 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10562 I40E_REQ_DESCRIPTOR_MULTIPLE);
10563 vsi->num_q_vectors = pf->num_vmdq_msix;
10564 break;
10565
10566 case I40E_VSI_SRIOV:
10567 vsi->alloc_queue_pairs = pf->num_vf_qps;
10568 if (!vsi->num_tx_desc)
10569 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10570 I40E_REQ_DESCRIPTOR_MULTIPLE);
10571 if (!vsi->num_rx_desc)
10572 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10573 I40E_REQ_DESCRIPTOR_MULTIPLE);
10574 break;
10575
10576 default:
10577 WARN_ON(1);
10578 return -ENODATA;
10579 }
10580
10581 return 0;
10582}
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10593{
10594 struct i40e_ring **next_rings;
10595 int size;
10596 int ret = 0;
10597
10598
10599 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10600 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10601 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10602 if (!vsi->tx_rings)
10603 return -ENOMEM;
10604 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10605 if (i40e_enabled_xdp_vsi(vsi)) {
10606 vsi->xdp_rings = next_rings;
10607 next_rings += vsi->alloc_queue_pairs;
10608 }
10609 vsi->rx_rings = next_rings;
10610
10611 if (alloc_qvectors) {
10612
10613 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10614 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10615 if (!vsi->q_vectors) {
10616 ret = -ENOMEM;
10617 goto err_vectors;
10618 }
10619 }
10620 return ret;
10621
10622err_vectors:
10623 kfree(vsi->tx_rings);
10624 return ret;
10625}
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10636{
10637 int ret = -ENODEV;
10638 struct i40e_vsi *vsi;
10639 int vsi_idx;
10640 int i;
10641
10642
10643 mutex_lock(&pf->switch_mutex);
10644
10645
10646
10647
10648
10649
10650
10651 i = pf->next_vsi;
10652 while (i < pf->num_alloc_vsi && pf->vsi[i])
10653 i++;
10654 if (i >= pf->num_alloc_vsi) {
10655 i = 0;
10656 while (i < pf->next_vsi && pf->vsi[i])
10657 i++;
10658 }
10659
10660 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10661 vsi_idx = i;
10662 } else {
10663 ret = -ENODEV;
10664 goto unlock_pf;
10665 }
10666 pf->next_vsi = ++i;
10667
10668 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10669 if (!vsi) {
10670 ret = -ENOMEM;
10671 goto unlock_pf;
10672 }
10673 vsi->type = type;
10674 vsi->back = pf;
10675 set_bit(__I40E_VSI_DOWN, vsi->state);
10676 vsi->flags = 0;
10677 vsi->idx = vsi_idx;
10678 vsi->int_rate_limit = 0;
10679 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10680 pf->rss_table_size : 64;
10681 vsi->netdev_registered = false;
10682 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10683 hash_init(vsi->mac_filter_hash);
10684 vsi->irqs_ready = false;
10685
10686 if (type == I40E_VSI_MAIN) {
10687 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10688 if (!vsi->af_xdp_zc_qps)
10689 goto err_rings;
10690 }
10691
10692 ret = i40e_set_num_rings_in_vsi(vsi);
10693 if (ret)
10694 goto err_rings;
10695
10696 ret = i40e_vsi_alloc_arrays(vsi, true);
10697 if (ret)
10698 goto err_rings;
10699
10700
10701 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10702
10703
10704 spin_lock_init(&vsi->mac_filter_hash_lock);
10705 pf->vsi[vsi_idx] = vsi;
10706 ret = vsi_idx;
10707 goto unlock_pf;
10708
10709err_rings:
10710 bitmap_free(vsi->af_xdp_zc_qps);
10711 pf->next_vsi = i - 1;
10712 kfree(vsi);
10713unlock_pf:
10714 mutex_unlock(&pf->switch_mutex);
10715 return ret;
10716}
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10727{
10728
10729 if (free_qvectors) {
10730 kfree(vsi->q_vectors);
10731 vsi->q_vectors = NULL;
10732 }
10733 kfree(vsi->tx_rings);
10734 vsi->tx_rings = NULL;
10735 vsi->rx_rings = NULL;
10736 vsi->xdp_rings = NULL;
10737}
10738
10739
10740
10741
10742
10743
10744static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10745{
10746 if (!vsi)
10747 return;
10748
10749 kfree(vsi->rss_hkey_user);
10750 vsi->rss_hkey_user = NULL;
10751
10752 kfree(vsi->rss_lut_user);
10753 vsi->rss_lut_user = NULL;
10754}
10755
10756
10757
10758
10759
10760static int i40e_vsi_clear(struct i40e_vsi *vsi)
10761{
10762 struct i40e_pf *pf;
10763
10764 if (!vsi)
10765 return 0;
10766
10767 if (!vsi->back)
10768 goto free_vsi;
10769 pf = vsi->back;
10770
10771 mutex_lock(&pf->switch_mutex);
10772 if (!pf->vsi[vsi->idx]) {
10773 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10774 vsi->idx, vsi->idx, vsi->type);
10775 goto unlock_vsi;
10776 }
10777
10778 if (pf->vsi[vsi->idx] != vsi) {
10779 dev_err(&pf->pdev->dev,
10780 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10781 pf->vsi[vsi->idx]->idx,
10782 pf->vsi[vsi->idx]->type,
10783 vsi->idx, vsi->type);
10784 goto unlock_vsi;
10785 }
10786
10787
10788 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10789 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10790
10791 bitmap_free(vsi->af_xdp_zc_qps);
10792 i40e_vsi_free_arrays(vsi, true);
10793 i40e_clear_rss_config_user(vsi);
10794
10795 pf->vsi[vsi->idx] = NULL;
10796 if (vsi->idx < pf->next_vsi)
10797 pf->next_vsi = vsi->idx;
10798
10799unlock_vsi:
10800 mutex_unlock(&pf->switch_mutex);
10801free_vsi:
10802 kfree(vsi);
10803
10804 return 0;
10805}
10806
10807
10808
10809
10810
10811static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10812{
10813 int i;
10814
10815 if (vsi->tx_rings && vsi->tx_rings[0]) {
10816 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10817 kfree_rcu(vsi->tx_rings[i], rcu);
10818 vsi->tx_rings[i] = NULL;
10819 vsi->rx_rings[i] = NULL;
10820 if (vsi->xdp_rings)
10821 vsi->xdp_rings[i] = NULL;
10822 }
10823 }
10824}
10825
10826
10827
10828
10829
10830static int i40e_alloc_rings(struct i40e_vsi *vsi)
10831{
10832 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10833 struct i40e_pf *pf = vsi->back;
10834 struct i40e_ring *ring;
10835
10836
10837 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10838
10839 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10840 if (!ring)
10841 goto err_out;
10842
10843 ring->queue_index = i;
10844 ring->reg_idx = vsi->base_queue + i;
10845 ring->ring_active = false;
10846 ring->vsi = vsi;
10847 ring->netdev = vsi->netdev;
10848 ring->dev = &pf->pdev->dev;
10849 ring->count = vsi->num_tx_desc;
10850 ring->size = 0;
10851 ring->dcb_tc = 0;
10852 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10853 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10854 ring->itr_setting = pf->tx_itr_default;
10855 vsi->tx_rings[i] = ring++;
10856
10857 if (!i40e_enabled_xdp_vsi(vsi))
10858 goto setup_rx;
10859
10860 ring->queue_index = vsi->alloc_queue_pairs + i;
10861 ring->reg_idx = vsi->base_queue + ring->queue_index;
10862 ring->ring_active = false;
10863 ring->vsi = vsi;
10864 ring->netdev = NULL;
10865 ring->dev = &pf->pdev->dev;
10866 ring->count = vsi->num_tx_desc;
10867 ring->size = 0;
10868 ring->dcb_tc = 0;
10869 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10870 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10871 set_ring_xdp(ring);
10872 ring->itr_setting = pf->tx_itr_default;
10873 vsi->xdp_rings[i] = ring++;
10874
10875setup_rx:
10876 ring->queue_index = i;
10877 ring->reg_idx = vsi->base_queue + i;
10878 ring->ring_active = false;
10879 ring->vsi = vsi;
10880 ring->netdev = vsi->netdev;
10881 ring->dev = &pf->pdev->dev;
10882 ring->count = vsi->num_rx_desc;
10883 ring->size = 0;
10884 ring->dcb_tc = 0;
10885 ring->itr_setting = pf->rx_itr_default;
10886 vsi->rx_rings[i] = ring;
10887 }
10888
10889 return 0;
10890
10891err_out:
10892 i40e_vsi_clear_rings(vsi);
10893 return -ENOMEM;
10894}
10895
10896
10897
10898
10899
10900
10901
10902
10903static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10904{
10905 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10906 I40E_MIN_MSIX, vectors);
10907 if (vectors < 0) {
10908 dev_info(&pf->pdev->dev,
10909 "MSI-X vector reservation failed: %d\n", vectors);
10910 vectors = 0;
10911 }
10912
10913 return vectors;
10914}
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924static int i40e_init_msix(struct i40e_pf *pf)
10925{
10926 struct i40e_hw *hw = &pf->hw;
10927 int cpus, extra_vectors;
10928 int vectors_left;
10929 int v_budget, i;
10930 int v_actual;
10931 int iwarp_requested = 0;
10932
10933 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10934 return -ENODEV;
10935
10936
10937
10938
10939
10940
10941
10942
10943
10944
10945
10946
10947
10948
10949
10950
10951 vectors_left = hw->func_caps.num_msix_vectors;
10952 v_budget = 0;
10953
10954
10955 if (vectors_left) {
10956 v_budget++;
10957 vectors_left--;
10958 }
10959
10960
10961
10962
10963
10964
10965
10966
10967 cpus = num_online_cpus();
10968 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10969 vectors_left -= pf->num_lan_msix;
10970
10971
10972 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10973 if (vectors_left) {
10974 pf->num_fdsb_msix = 1;
10975 v_budget++;
10976 vectors_left--;
10977 } else {
10978 pf->num_fdsb_msix = 0;
10979 }
10980 }
10981
10982
10983 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10984 iwarp_requested = pf->num_iwarp_msix;
10985
10986 if (!vectors_left)
10987 pf->num_iwarp_msix = 0;
10988 else if (vectors_left < pf->num_iwarp_msix)
10989 pf->num_iwarp_msix = 1;
10990 v_budget += pf->num_iwarp_msix;
10991 vectors_left -= pf->num_iwarp_msix;
10992 }
10993
10994
10995 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10996 if (!vectors_left) {
10997 pf->num_vmdq_msix = 0;
10998 pf->num_vmdq_qps = 0;
10999 } else {
11000 int vmdq_vecs_wanted =
11001 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11002 int vmdq_vecs =
11003 min_t(int, vectors_left, vmdq_vecs_wanted);
11004
11005
11006
11007
11008
11009
11010
11011 if (vectors_left < vmdq_vecs_wanted) {
11012 pf->num_vmdq_qps = 1;
11013 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11014 vmdq_vecs = min_t(int,
11015 vectors_left,
11016 vmdq_vecs_wanted);
11017 }
11018 pf->num_vmdq_msix = pf->num_vmdq_qps;
11019
11020 v_budget += vmdq_vecs;
11021 vectors_left -= vmdq_vecs;
11022 }
11023 }
11024
11025
11026
11027
11028
11029
11030
11031
11032
11033
11034 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11035 pf->num_lan_msix += extra_vectors;
11036 vectors_left -= extra_vectors;
11037
11038 WARN(vectors_left < 0,
11039 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11040
11041 v_budget += pf->num_lan_msix;
11042 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11043 GFP_KERNEL);
11044 if (!pf->msix_entries)
11045 return -ENOMEM;
11046
11047 for (i = 0; i < v_budget; i++)
11048 pf->msix_entries[i].entry = i;
11049 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11050
11051 if (v_actual < I40E_MIN_MSIX) {
11052 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11053 kfree(pf->msix_entries);
11054 pf->msix_entries = NULL;
11055 pci_disable_msix(pf->pdev);
11056 return -ENODEV;
11057
11058 } else if (v_actual == I40E_MIN_MSIX) {
11059
11060 pf->num_vmdq_vsis = 0;
11061 pf->num_vmdq_qps = 0;
11062 pf->num_lan_qps = 1;
11063 pf->num_lan_msix = 1;
11064
11065 } else if (v_actual != v_budget) {
11066
11067
11068
11069
11070
11071 int vec;
11072
11073 dev_info(&pf->pdev->dev,
11074 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11075 v_actual, v_budget);
11076
11077 vec = v_actual - 1;
11078
11079
11080 pf->num_vmdq_msix = 1;
11081 pf->num_vmdq_vsis = 1;
11082 pf->num_vmdq_qps = 1;
11083
11084
11085 switch (vec) {
11086 case 2:
11087 pf->num_lan_msix = 1;
11088 break;
11089 case 3:
11090 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11091 pf->num_lan_msix = 1;
11092 pf->num_iwarp_msix = 1;
11093 } else {
11094 pf->num_lan_msix = 2;
11095 }
11096 break;
11097 default:
11098 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11099 pf->num_iwarp_msix = min_t(int, (vec / 3),
11100 iwarp_requested);
11101 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11102 I40E_DEFAULT_NUM_VMDQ_VSI);
11103 } else {
11104 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11105 I40E_DEFAULT_NUM_VMDQ_VSI);
11106 }
11107 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11108 pf->num_fdsb_msix = 1;
11109 vec--;
11110 }
11111 pf->num_lan_msix = min_t(int,
11112 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11113 pf->num_lan_msix);
11114 pf->num_lan_qps = pf->num_lan_msix;
11115 break;
11116 }
11117 }
11118
11119 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11120 (pf->num_fdsb_msix == 0)) {
11121 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11122 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11123 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11124 }
11125 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11126 (pf->num_vmdq_msix == 0)) {
11127 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11128 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11129 }
11130
11131 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11132 (pf->num_iwarp_msix == 0)) {
11133 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11134 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11135 }
11136 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11137 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11138 pf->num_lan_msix,
11139 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11140 pf->num_fdsb_msix,
11141 pf->num_iwarp_msix);
11142
11143 return v_actual;
11144}
11145
11146
11147
11148
11149
11150
11151
11152
11153
11154static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
11155{
11156 struct i40e_q_vector *q_vector;
11157
11158
11159 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11160 if (!q_vector)
11161 return -ENOMEM;
11162
11163 q_vector->vsi = vsi;
11164 q_vector->v_idx = v_idx;
11165 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11166
11167 if (vsi->netdev)
11168 netif_napi_add(vsi->netdev, &q_vector->napi,
11169 i40e_napi_poll, NAPI_POLL_WEIGHT);
11170
11171
11172 vsi->q_vectors[v_idx] = q_vector;
11173
11174 return 0;
11175}
11176
11177
11178
11179
11180
11181
11182
11183
11184static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11185{
11186 struct i40e_pf *pf = vsi->back;
11187 int err, v_idx, num_q_vectors, current_cpu;
11188
11189
11190 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11191 num_q_vectors = vsi->num_q_vectors;
11192 else if (vsi == pf->vsi[pf->lan_vsi])
11193 num_q_vectors = 1;
11194 else
11195 return -EINVAL;
11196
11197 current_cpu = cpumask_first(cpu_online_mask);
11198
11199 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11200 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
11201 if (err)
11202 goto err_out;
11203 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
11204 if (unlikely(current_cpu >= nr_cpu_ids))
11205 current_cpu = cpumask_first(cpu_online_mask);
11206 }
11207
11208 return 0;
11209
11210err_out:
11211 while (v_idx--)
11212 i40e_free_q_vector(vsi, v_idx);
11213
11214 return err;
11215}
11216
11217
11218
11219
11220
11221static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11222{
11223 int vectors = 0;
11224 ssize_t size;
11225
11226 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11227 vectors = i40e_init_msix(pf);
11228 if (vectors < 0) {
11229 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11230 I40E_FLAG_IWARP_ENABLED |
11231 I40E_FLAG_RSS_ENABLED |
11232 I40E_FLAG_DCB_CAPABLE |
11233 I40E_FLAG_DCB_ENABLED |
11234 I40E_FLAG_SRIOV_ENABLED |
11235 I40E_FLAG_FD_SB_ENABLED |
11236 I40E_FLAG_FD_ATR_ENABLED |
11237 I40E_FLAG_VMDQ_ENABLED);
11238 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11239
11240
11241 i40e_determine_queue_usage(pf);
11242 }
11243 }
11244
11245 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11246 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11247 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11248 vectors = pci_enable_msi(pf->pdev);
11249 if (vectors < 0) {
11250 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11251 vectors);
11252 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11253 }
11254 vectors = 1;
11255 }
11256
11257 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11258 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11259
11260
11261 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11262 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11263 if (!pf->irq_pile)
11264 return -ENOMEM;
11265
11266 pf->irq_pile->num_entries = vectors;
11267 pf->irq_pile->search_hint = 0;
11268
11269
11270 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11271
11272 return 0;
11273}
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11284{
11285 int err, i;
11286
11287
11288
11289
11290
11291 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11292
11293 err = i40e_init_interrupt_scheme(pf);
11294 if (err)
11295 return err;
11296
11297
11298
11299
11300 for (i = 0; i < pf->num_alloc_vsi; i++) {
11301 if (pf->vsi[i]) {
11302 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11303 if (err)
11304 goto err_unwind;
11305 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11306 }
11307 }
11308
11309 err = i40e_setup_misc_vector(pf);
11310 if (err)
11311 goto err_unwind;
11312
11313 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11314 i40e_client_update_msix_info(pf);
11315
11316 return 0;
11317
11318err_unwind:
11319 while (i--) {
11320 if (pf->vsi[i])
11321 i40e_vsi_free_q_vectors(pf->vsi[i]);
11322 }
11323
11324 return err;
11325}
11326
11327
11328
11329
11330
11331
11332
11333
11334
11335
11336
11337static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11338{
11339 int err;
11340
11341 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11342 err = i40e_setup_misc_vector(pf);
11343
11344 if (err) {
11345 dev_info(&pf->pdev->dev,
11346 "MSI-X misc vector request failed, error %d\n",
11347 err);
11348 return err;
11349 }
11350 } else {
11351 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11352
11353 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11354 pf->int_name, pf);
11355
11356 if (err) {
11357 dev_info(&pf->pdev->dev,
11358 "MSI/legacy misc vector request failed, error %d\n",
11359 err);
11360 return err;
11361 }
11362 i40e_enable_misc_int_causes(pf);
11363 i40e_irq_dynamic_enable_icr0(pf);
11364 }
11365
11366 return 0;
11367}
11368
11369
11370
11371
11372
11373
11374
11375
11376
11377static int i40e_setup_misc_vector(struct i40e_pf *pf)
11378{
11379 struct i40e_hw *hw = &pf->hw;
11380 int err = 0;
11381
11382
11383 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11384 err = request_irq(pf->msix_entries[0].vector,
11385 i40e_intr, 0, pf->int_name, pf);
11386 if (err) {
11387 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11388 dev_info(&pf->pdev->dev,
11389 "request_irq for %s failed: %d\n",
11390 pf->int_name, err);
11391 return -EFAULT;
11392 }
11393 }
11394
11395 i40e_enable_misc_int_causes(pf);
11396
11397
11398 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11399 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
11400
11401 i40e_flush(hw);
11402
11403 i40e_irq_dynamic_enable_icr0(pf);
11404
11405 return err;
11406}
11407
11408
11409
11410
11411
11412
11413
11414
11415
11416
11417static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11418 u8 *lut, u16 lut_size)
11419{
11420 struct i40e_pf *pf = vsi->back;
11421 struct i40e_hw *hw = &pf->hw;
11422 int ret = 0;
11423
11424 if (seed) {
11425 ret = i40e_aq_get_rss_key(hw, vsi->id,
11426 (struct i40e_aqc_get_set_rss_key_data *)seed);
11427 if (ret) {
11428 dev_info(&pf->pdev->dev,
11429 "Cannot get RSS key, err %s aq_err %s\n",
11430 i40e_stat_str(&pf->hw, ret),
11431 i40e_aq_str(&pf->hw,
11432 pf->hw.aq.asq_last_status));
11433 return ret;
11434 }
11435 }
11436
11437 if (lut) {
11438 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
11439
11440 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11441 if (ret) {
11442 dev_info(&pf->pdev->dev,
11443 "Cannot get RSS lut, err %s aq_err %s\n",
11444 i40e_stat_str(&pf->hw, ret),
11445 i40e_aq_str(&pf->hw,
11446 pf->hw.aq.asq_last_status));
11447 return ret;
11448 }
11449 }
11450
11451 return ret;
11452}
11453
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11464 const u8 *lut, u16 lut_size)
11465{
11466 struct i40e_pf *pf = vsi->back;
11467 struct i40e_hw *hw = &pf->hw;
11468 u16 vf_id = vsi->vf_id;
11469 u8 i;
11470
11471
11472 if (seed) {
11473 u32 *seed_dw = (u32 *)seed;
11474
11475 if (vsi->type == I40E_VSI_MAIN) {
11476 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11477 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11478 } else if (vsi->type == I40E_VSI_SRIOV) {
11479 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11480 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11481 } else {
11482 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11483 }
11484 }
11485
11486 if (lut) {
11487 u32 *lut_dw = (u32 *)lut;
11488
11489 if (vsi->type == I40E_VSI_MAIN) {
11490 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11491 return -EINVAL;
11492 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11493 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11494 } else if (vsi->type == I40E_VSI_SRIOV) {
11495 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11496 return -EINVAL;
11497 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11498 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11499 } else {
11500 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11501 }
11502 }
11503 i40e_flush(hw);
11504
11505 return 0;
11506}
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11518 u8 *lut, u16 lut_size)
11519{
11520 struct i40e_pf *pf = vsi->back;
11521 struct i40e_hw *hw = &pf->hw;
11522 u16 i;
11523
11524 if (seed) {
11525 u32 *seed_dw = (u32 *)seed;
11526
11527 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11528 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11529 }
11530 if (lut) {
11531 u32 *lut_dw = (u32 *)lut;
11532
11533 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11534 return -EINVAL;
11535 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11536 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11537 }
11538
11539 return 0;
11540}
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11552{
11553 struct i40e_pf *pf = vsi->back;
11554
11555 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11556 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11557 else
11558 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11559}
11560
11561
11562
11563
11564
11565
11566
11567
11568
11569
11570int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11571{
11572 struct i40e_pf *pf = vsi->back;
11573
11574 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11575 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11576 else
11577 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11578}
11579
11580
11581
11582
11583
11584
11585
11586
11587void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11588 u16 rss_table_size, u16 rss_size)
11589{
11590 u16 i;
11591
11592 for (i = 0; i < rss_table_size; i++)
11593 lut[i] = i % rss_size;
11594}
11595
11596
11597
11598
11599
11600static int i40e_pf_config_rss(struct i40e_pf *pf)
11601{
11602 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11603 u8 seed[I40E_HKEY_ARRAY_SIZE];
11604 u8 *lut;
11605 struct i40e_hw *hw = &pf->hw;
11606 u32 reg_val;
11607 u64 hena;
11608 int ret;
11609
11610
11611 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11612 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11613 hena |= i40e_pf_get_default_rss_hena(pf);
11614
11615 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11616 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11617
11618
11619 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11620 reg_val = (pf->rss_table_size == 512) ?
11621 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11622 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11623 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11624
11625
11626 if (!vsi->rss_size) {
11627 u16 qcount;
11628
11629
11630
11631
11632
11633 qcount = vsi->num_queue_pairs /
11634 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11635 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11636 }
11637 if (!vsi->rss_size)
11638 return -EINVAL;
11639
11640 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11641 if (!lut)
11642 return -ENOMEM;
11643
11644
11645 if (vsi->rss_lut_user)
11646 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11647 else
11648 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11649
11650
11651
11652
11653 if (vsi->rss_hkey_user)
11654 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11655 else
11656 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11657 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11658 kfree(lut);
11659
11660 return ret;
11661}
11662
11663
11664
11665
11666
11667
11668
11669
11670
11671
11672int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11673{
11674 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11675 int new_rss_size;
11676
11677 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11678 return 0;
11679
11680 queue_count = min_t(int, queue_count, num_online_cpus());
11681 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11682
11683 if (queue_count != vsi->num_queue_pairs) {
11684 u16 qcount;
11685
11686 vsi->req_queue_pairs = queue_count;
11687 i40e_prep_for_reset(pf, true);
11688
11689 pf->alloc_rss_size = new_rss_size;
11690
11691 i40e_reset_and_rebuild(pf, true, true);
11692
11693
11694
11695
11696 if (queue_count < vsi->rss_size) {
11697 i40e_clear_rss_config_user(vsi);
11698 dev_dbg(&pf->pdev->dev,
11699 "discard user configured hash keys and lut\n");
11700 }
11701
11702
11703 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11704 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11705
11706 i40e_pf_config_rss(pf);
11707 }
11708 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11709 vsi->req_queue_pairs, pf->rss_size_max);
11710 return pf->alloc_rss_size;
11711}
11712
11713
11714
11715
11716
11717i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11718{
11719 i40e_status status;
11720 bool min_valid, max_valid;
11721 u32 max_bw, min_bw;
11722
11723 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11724 &min_valid, &max_valid);
11725
11726 if (!status) {
11727 if (min_valid)
11728 pf->min_bw = min_bw;
11729 if (max_valid)
11730 pf->max_bw = max_bw;
11731 }
11732
11733 return status;
11734}
11735
11736
11737
11738
11739
11740i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11741{
11742 struct i40e_aqc_configure_partition_bw_data bw_data;
11743 i40e_status status;
11744
11745
11746 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11747 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11748 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11749
11750
11751 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11752
11753 return status;
11754}
11755
11756
11757
11758
11759
11760i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11761{
11762
11763 enum i40e_admin_queue_err last_aq_status;
11764 i40e_status ret;
11765 u16 nvm_word;
11766
11767 if (pf->hw.partition_id != 1) {
11768 dev_info(&pf->pdev->dev,
11769 "Commit BW only works on partition 1! This is partition %d",
11770 pf->hw.partition_id);
11771 ret = I40E_NOT_SUPPORTED;
11772 goto bw_commit_out;
11773 }
11774
11775
11776 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11777 last_aq_status = pf->hw.aq.asq_last_status;
11778 if (ret) {
11779 dev_info(&pf->pdev->dev,
11780 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11781 i40e_stat_str(&pf->hw, ret),
11782 i40e_aq_str(&pf->hw, last_aq_status));
11783 goto bw_commit_out;
11784 }
11785
11786
11787 ret = i40e_aq_read_nvm(&pf->hw,
11788 I40E_SR_NVM_CONTROL_WORD,
11789 0x10, sizeof(nvm_word), &nvm_word,
11790 false, NULL);
11791
11792
11793
11794 last_aq_status = pf->hw.aq.asq_last_status;
11795 i40e_release_nvm(&pf->hw);
11796 if (ret) {
11797 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11798 i40e_stat_str(&pf->hw, ret),
11799 i40e_aq_str(&pf->hw, last_aq_status));
11800 goto bw_commit_out;
11801 }
11802
11803
11804 msleep(50);
11805
11806
11807 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11808 last_aq_status = pf->hw.aq.asq_last_status;
11809 if (ret) {
11810 dev_info(&pf->pdev->dev,
11811 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11812 i40e_stat_str(&pf->hw, ret),
11813 i40e_aq_str(&pf->hw, last_aq_status));
11814 goto bw_commit_out;
11815 }
11816
11817
11818
11819
11820 ret = i40e_aq_update_nvm(&pf->hw,
11821 I40E_SR_NVM_CONTROL_WORD,
11822 0x10, sizeof(nvm_word),
11823 &nvm_word, true, 0, NULL);
11824
11825
11826
11827 last_aq_status = pf->hw.aq.asq_last_status;
11828 i40e_release_nvm(&pf->hw);
11829 if (ret)
11830 dev_info(&pf->pdev->dev,
11831 "BW settings NOT SAVED, err %s aq_err %s\n",
11832 i40e_stat_str(&pf->hw, ret),
11833 i40e_aq_str(&pf->hw, last_aq_status));
11834bw_commit_out:
11835
11836 return ret;
11837}
11838
11839
11840
11841
11842
11843
11844
11845
11846
11847static int i40e_sw_init(struct i40e_pf *pf)
11848{
11849 int err = 0;
11850 int size;
11851
11852
11853 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11854 I40E_FLAG_MSI_ENABLED |
11855 I40E_FLAG_MSIX_ENABLED;
11856
11857
11858 pf->rx_itr_default = I40E_ITR_RX_DEF;
11859 pf->tx_itr_default = I40E_ITR_TX_DEF;
11860
11861
11862
11863
11864 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11865 pf->alloc_rss_size = 1;
11866 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11867 pf->rss_size_max = min_t(int, pf->rss_size_max,
11868 pf->hw.func_caps.num_tx_qp);
11869 if (pf->hw.func_caps.rss) {
11870 pf->flags |= I40E_FLAG_RSS_ENABLED;
11871 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11872 num_online_cpus());
11873 }
11874
11875
11876 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11877 pf->flags |= I40E_FLAG_MFP_ENABLED;
11878 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11879 if (i40e_get_partition_bw_setting(pf)) {
11880 dev_warn(&pf->pdev->dev,
11881 "Could not get partition bw settings\n");
11882 } else {
11883 dev_info(&pf->pdev->dev,
11884 "Partition BW Min = %8.8x, Max = %8.8x\n",
11885 pf->min_bw, pf->max_bw);
11886
11887
11888 i40e_set_partition_bw_setting(pf);
11889 }
11890 }
11891
11892 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11893 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11894 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11895 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11896 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11897 pf->hw.num_partitions > 1)
11898 dev_info(&pf->pdev->dev,
11899 "Flow Director Sideband mode Disabled in MFP mode\n");
11900 else
11901 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11902 pf->fdir_pf_filter_count =
11903 pf->hw.func_caps.fd_filters_guaranteed;
11904 pf->hw.fdir_shared_filter_count =
11905 pf->hw.func_caps.fd_filters_best_effort;
11906 }
11907
11908 if (pf->hw.mac.type == I40E_MAC_X722) {
11909 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11910 I40E_HW_128_QP_RSS_CAPABLE |
11911 I40E_HW_ATR_EVICT_CAPABLE |
11912 I40E_HW_WB_ON_ITR_CAPABLE |
11913 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11914 I40E_HW_NO_PCI_LINK_CHECK |
11915 I40E_HW_USE_SET_LLDP_MIB |
11916 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11917 I40E_HW_PTP_L4_CAPABLE |
11918 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11919 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11920
11921#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11922 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11923 I40E_FDEVICT_PCTYPE_DEFAULT) {
11924 dev_warn(&pf->pdev->dev,
11925 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11926 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11927 }
11928 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11929 ((pf->hw.aq.api_maj_ver == 1) &&
11930 (pf->hw.aq.api_min_ver > 4))) {
11931
11932 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11933 }
11934
11935
11936 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11937 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11938
11939 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11940 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11941 (pf->hw.aq.fw_maj_ver < 4))) {
11942 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11943
11944 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11945 }
11946
11947
11948 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11949 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11950 (pf->hw.aq.fw_maj_ver < 4)))
11951 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11952
11953
11954 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11955 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11956 (pf->hw.aq.fw_maj_ver >= 5)))
11957 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11958
11959
11960 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11961 pf->hw.aq.fw_maj_ver >= 6)
11962 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11963
11964 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11965 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11966 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11967 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11968 }
11969
11970 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11971 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11972
11973 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11974 }
11975
11976
11977
11978
11979
11980 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11981 pf->hw.func_caps.npar_enable &&
11982 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
11983 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
11984
11985#ifdef CONFIG_PCI_IOV
11986 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11987 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11988 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11989 pf->num_req_vfs = min_t(int,
11990 pf->hw.func_caps.num_vfs,
11991 I40E_MAX_VF_COUNT);
11992 }
11993#endif
11994 pf->eeprom_version = 0xDEAD;
11995 pf->lan_veb = I40E_NO_VEB;
11996 pf->lan_vsi = I40E_NO_VSI;
11997
11998
11999 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12000
12001
12002 size = sizeof(struct i40e_lump_tracking)
12003 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12004 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12005 if (!pf->qp_pile) {
12006 err = -ENOMEM;
12007 goto sw_init_done;
12008 }
12009 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12010 pf->qp_pile->search_hint = 0;
12011
12012 pf->tx_timeout_recovery_level = 1;
12013
12014 mutex_init(&pf->switch_mutex);
12015
12016sw_init_done:
12017 return err;
12018}
12019
12020
12021
12022
12023
12024
12025
12026
12027bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12028{
12029 bool need_reset = false;
12030
12031
12032
12033
12034 if (features & NETIF_F_NTUPLE) {
12035
12036 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12037 need_reset = true;
12038
12039
12040
12041 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12042 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12043 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12044 }
12045 } else {
12046
12047 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12048 need_reset = true;
12049 i40e_fdir_filter_exit(pf);
12050 }
12051 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12052 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12053 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12054
12055
12056 pf->fd_add_err = 0;
12057 pf->fd_atr_cnt = 0;
12058
12059 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12060 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12061 (I40E_DEBUG_FD & pf->hw.debug_mask))
12062 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12063 }
12064 return need_reset;
12065}
12066
12067
12068
12069
12070
12071static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12072{
12073 struct i40e_pf *pf = vsi->back;
12074 struct i40e_hw *hw = &pf->hw;
12075 u16 vf_id = vsi->vf_id;
12076 u8 i;
12077
12078 if (vsi->type == I40E_VSI_MAIN) {
12079 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12080 wr32(hw, I40E_PFQF_HLUT(i), 0);
12081 } else if (vsi->type == I40E_VSI_SRIOV) {
12082 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12083 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12084 } else {
12085 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12086 }
12087}
12088
12089
12090
12091
12092
12093
12094
12095static int i40e_set_features(struct net_device *netdev,
12096 netdev_features_t features)
12097{
12098 struct i40e_netdev_priv *np = netdev_priv(netdev);
12099 struct i40e_vsi *vsi = np->vsi;
12100 struct i40e_pf *pf = vsi->back;
12101 bool need_reset;
12102
12103 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12104 i40e_pf_config_rss(pf);
12105 else if (!(features & NETIF_F_RXHASH) &&
12106 netdev->features & NETIF_F_RXHASH)
12107 i40e_clear_rss_lut(vsi);
12108
12109 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12110 i40e_vlan_stripping_enable(vsi);
12111 else
12112 i40e_vlan_stripping_disable(vsi);
12113
12114 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12115 dev_err(&pf->pdev->dev,
12116 "Offloaded tc filters active, can't turn hw_tc_offload off");
12117 return -EINVAL;
12118 }
12119
12120 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12121 i40e_del_all_macvlans(vsi);
12122
12123 need_reset = i40e_set_ntuple(pf, features);
12124
12125 if (need_reset)
12126 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12127
12128 return 0;
12129}
12130
12131
12132
12133
12134
12135
12136
12137
12138static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
12139{
12140 u8 i;
12141
12142 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
12143
12144
12145
12146 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
12147 continue;
12148 if (pf->udp_ports[i].port == port)
12149 return i;
12150 }
12151
12152 return i;
12153}
12154
12155
12156
12157
12158
12159
12160static void i40e_udp_tunnel_add(struct net_device *netdev,
12161 struct udp_tunnel_info *ti)
12162{
12163 struct i40e_netdev_priv *np = netdev_priv(netdev);
12164 struct i40e_vsi *vsi = np->vsi;
12165 struct i40e_pf *pf = vsi->back;
12166 u16 port = ntohs(ti->port);
12167 u8 next_idx;
12168 u8 idx;
12169
12170 idx = i40e_get_udp_port_idx(pf, port);
12171
12172
12173 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12174 netdev_info(netdev, "port %d already offloaded\n", port);
12175 return;
12176 }
12177
12178
12179 next_idx = i40e_get_udp_port_idx(pf, 0);
12180
12181 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12182 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
12183 port);
12184 return;
12185 }
12186
12187 switch (ti->type) {
12188 case UDP_TUNNEL_TYPE_VXLAN:
12189 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
12190 break;
12191 case UDP_TUNNEL_TYPE_GENEVE:
12192 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
12193 return;
12194 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
12195 break;
12196 default:
12197 return;
12198 }
12199
12200
12201 pf->udp_ports[next_idx].port = port;
12202 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
12203 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
12204 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12205}
12206
12207
12208
12209
12210
12211
12212static void i40e_udp_tunnel_del(struct net_device *netdev,
12213 struct udp_tunnel_info *ti)
12214{
12215 struct i40e_netdev_priv *np = netdev_priv(netdev);
12216 struct i40e_vsi *vsi = np->vsi;
12217 struct i40e_pf *pf = vsi->back;
12218 u16 port = ntohs(ti->port);
12219 u8 idx;
12220
12221 idx = i40e_get_udp_port_idx(pf, port);
12222
12223
12224 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
12225 goto not_found;
12226
12227 switch (ti->type) {
12228 case UDP_TUNNEL_TYPE_VXLAN:
12229 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
12230 goto not_found;
12231 break;
12232 case UDP_TUNNEL_TYPE_GENEVE:
12233 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
12234 goto not_found;
12235 break;
12236 default:
12237 goto not_found;
12238 }
12239
12240
12241
12242
12243 pf->udp_ports[idx].port = 0;
12244
12245
12246
12247
12248
12249 pf->pending_udp_bitmap ^= BIT_ULL(idx);
12250 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12251
12252 return;
12253not_found:
12254 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
12255 port);
12256}
12257
12258static int i40e_get_phys_port_id(struct net_device *netdev,
12259 struct netdev_phys_item_id *ppid)
12260{
12261 struct i40e_netdev_priv *np = netdev_priv(netdev);
12262 struct i40e_pf *pf = np->vsi->back;
12263 struct i40e_hw *hw = &pf->hw;
12264
12265 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12266 return -EOPNOTSUPP;
12267
12268 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12269 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12270
12271 return 0;
12272}
12273
12274
12275
12276
12277
12278
12279
12280
12281
12282
12283static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12284 struct net_device *dev,
12285 const unsigned char *addr, u16 vid,
12286 u16 flags,
12287 struct netlink_ext_ack *extack)
12288{
12289 struct i40e_netdev_priv *np = netdev_priv(dev);
12290 struct i40e_pf *pf = np->vsi->back;
12291 int err = 0;
12292
12293 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12294 return -EOPNOTSUPP;
12295
12296 if (vid) {
12297 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12298 return -EINVAL;
12299 }
12300
12301
12302
12303
12304 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12305 netdev_info(dev, "FDB only supports static addresses\n");
12306 return -EINVAL;
12307 }
12308
12309 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12310 err = dev_uc_add_excl(dev, addr);
12311 else if (is_multicast_ether_addr(addr))
12312 err = dev_mc_add_excl(dev, addr);
12313 else
12314 err = -EINVAL;
12315
12316
12317 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12318 err = 0;
12319
12320 return err;
12321}
12322
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339static int i40e_ndo_bridge_setlink(struct net_device *dev,
12340 struct nlmsghdr *nlh,
12341 u16 flags,
12342 struct netlink_ext_ack *extack)
12343{
12344 struct i40e_netdev_priv *np = netdev_priv(dev);
12345 struct i40e_vsi *vsi = np->vsi;
12346 struct i40e_pf *pf = vsi->back;
12347 struct i40e_veb *veb = NULL;
12348 struct nlattr *attr, *br_spec;
12349 int i, rem;
12350
12351
12352 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12353 return -EOPNOTSUPP;
12354
12355
12356 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12357 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12358 veb = pf->veb[i];
12359 }
12360
12361 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12362
12363 nla_for_each_nested(attr, br_spec, rem) {
12364 __u16 mode;
12365
12366 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12367 continue;
12368
12369 mode = nla_get_u16(attr);
12370 if ((mode != BRIDGE_MODE_VEPA) &&
12371 (mode != BRIDGE_MODE_VEB))
12372 return -EINVAL;
12373
12374
12375 if (!veb) {
12376 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12377 vsi->tc_config.enabled_tc);
12378 if (veb) {
12379 veb->bridge_mode = mode;
12380 i40e_config_bridge_mode(veb);
12381 } else {
12382
12383 return -ENOENT;
12384 }
12385 break;
12386 } else if (mode != veb->bridge_mode) {
12387
12388 veb->bridge_mode = mode;
12389
12390 if (mode == BRIDGE_MODE_VEB)
12391 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12392 else
12393 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12394 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12395 break;
12396 }
12397 }
12398
12399 return 0;
12400}
12401
12402
12403
12404
12405
12406
12407
12408
12409
12410
12411
12412
12413
12414static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12415 struct net_device *dev,
12416 u32 __always_unused filter_mask,
12417 int nlflags)
12418{
12419 struct i40e_netdev_priv *np = netdev_priv(dev);
12420 struct i40e_vsi *vsi = np->vsi;
12421 struct i40e_pf *pf = vsi->back;
12422 struct i40e_veb *veb = NULL;
12423 int i;
12424
12425
12426 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12427 return -EOPNOTSUPP;
12428
12429
12430 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12431 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12432 veb = pf->veb[i];
12433 }
12434
12435 if (!veb)
12436 return 0;
12437
12438 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12439 0, 0, nlflags, filter_mask, NULL);
12440}
12441
12442
12443
12444
12445
12446
12447
12448static netdev_features_t i40e_features_check(struct sk_buff *skb,
12449 struct net_device *dev,
12450 netdev_features_t features)
12451{
12452 size_t len;
12453
12454
12455
12456
12457
12458 if (skb->ip_summed != CHECKSUM_PARTIAL)
12459 return features;
12460
12461
12462
12463
12464 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12465 features &= ~NETIF_F_GSO_MASK;
12466
12467
12468 len = skb_network_header(skb) - skb->data;
12469 if (len & ~(63 * 2))
12470 goto out_err;
12471
12472
12473 len = skb_transport_header(skb) - skb_network_header(skb);
12474 if (len & ~(127 * 4))
12475 goto out_err;
12476
12477 if (skb->encapsulation) {
12478
12479 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12480 if (len & ~(127 * 2))
12481 goto out_err;
12482
12483
12484 len = skb_inner_transport_header(skb) -
12485 skb_inner_network_header(skb);
12486 if (len & ~(127 * 4))
12487 goto out_err;
12488 }
12489
12490
12491
12492
12493
12494
12495 return features;
12496out_err:
12497 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12498}
12499
12500
12501
12502
12503
12504
12505static int i40e_xdp_setup(struct i40e_vsi *vsi,
12506 struct bpf_prog *prog)
12507{
12508 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12509 struct i40e_pf *pf = vsi->back;
12510 struct bpf_prog *old_prog;
12511 bool need_reset;
12512 int i;
12513
12514
12515 if (frame_size > vsi->rx_buf_len)
12516 return -EINVAL;
12517
12518 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12519 return 0;
12520
12521
12522 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12523
12524 if (need_reset)
12525 i40e_prep_for_reset(pf, true);
12526
12527 old_prog = xchg(&vsi->xdp_prog, prog);
12528
12529 if (need_reset)
12530 i40e_reset_and_rebuild(pf, true, true);
12531
12532 for (i = 0; i < vsi->num_queue_pairs; i++)
12533 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12534
12535 if (old_prog)
12536 bpf_prog_put(old_prog);
12537
12538
12539
12540
12541 if (need_reset && prog)
12542 for (i = 0; i < vsi->num_queue_pairs; i++)
12543 if (vsi->xdp_rings[i]->xsk_umem)
12544 (void)i40e_xsk_wakeup(vsi->netdev, i,
12545 XDP_WAKEUP_RX);
12546
12547 return 0;
12548}
12549
12550
12551
12552
12553
12554
12555
12556static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12557{
12558 struct i40e_pf *pf = vsi->back;
12559 int timeout = 50;
12560
12561 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12562 timeout--;
12563 if (!timeout)
12564 return -EBUSY;
12565 usleep_range(1000, 2000);
12566 }
12567
12568 return 0;
12569}
12570
12571
12572
12573
12574
12575static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12576{
12577 struct i40e_pf *pf = vsi->back;
12578
12579 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12580}
12581
12582
12583
12584
12585
12586
12587static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12588{
12589 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12590 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12591 memset(&vsi->tx_rings[queue_pair]->stats, 0,
12592 sizeof(vsi->tx_rings[queue_pair]->stats));
12593 if (i40e_enabled_xdp_vsi(vsi)) {
12594 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12595 sizeof(vsi->xdp_rings[queue_pair]->stats));
12596 }
12597}
12598
12599
12600
12601
12602
12603
12604static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12605{
12606 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12607 if (i40e_enabled_xdp_vsi(vsi)) {
12608
12609
12610
12611 synchronize_rcu();
12612 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12613 }
12614 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12615}
12616
12617
12618
12619
12620
12621
12622
12623static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12624 bool enable)
12625{
12626 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12627 struct i40e_q_vector *q_vector = rxr->q_vector;
12628
12629 if (!vsi->netdev)
12630 return;
12631
12632
12633 if (q_vector->rx.ring || q_vector->tx.ring) {
12634 if (enable)
12635 napi_enable(&q_vector->napi);
12636 else
12637 napi_disable(&q_vector->napi);
12638 }
12639}
12640
12641
12642
12643
12644
12645
12646
12647
12648
12649static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12650 bool enable)
12651{
12652 struct i40e_pf *pf = vsi->back;
12653 int pf_q, ret = 0;
12654
12655 pf_q = vsi->base_queue + queue_pair;
12656 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12657 false , enable);
12658 if (ret) {
12659 dev_info(&pf->pdev->dev,
12660 "VSI seid %d Tx ring %d %sable timeout\n",
12661 vsi->seid, pf_q, (enable ? "en" : "dis"));
12662 return ret;
12663 }
12664
12665 i40e_control_rx_q(pf, pf_q, enable);
12666 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12667 if (ret) {
12668 dev_info(&pf->pdev->dev,
12669 "VSI seid %d Rx ring %d %sable timeout\n",
12670 vsi->seid, pf_q, (enable ? "en" : "dis"));
12671 return ret;
12672 }
12673
12674
12675
12676
12677 if (!enable)
12678 mdelay(50);
12679
12680 if (!i40e_enabled_xdp_vsi(vsi))
12681 return ret;
12682
12683 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12684 pf_q + vsi->alloc_queue_pairs,
12685 true , enable);
12686 if (ret) {
12687 dev_info(&pf->pdev->dev,
12688 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12689 vsi->seid, pf_q, (enable ? "en" : "dis"));
12690 }
12691
12692 return ret;
12693}
12694
12695
12696
12697
12698
12699
12700static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12701{
12702 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12703 struct i40e_pf *pf = vsi->back;
12704 struct i40e_hw *hw = &pf->hw;
12705
12706
12707 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12708 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12709 else
12710 i40e_irq_dynamic_enable_icr0(pf);
12711
12712 i40e_flush(hw);
12713}
12714
12715
12716
12717
12718
12719
12720static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12721{
12722 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12723 struct i40e_pf *pf = vsi->back;
12724 struct i40e_hw *hw = &pf->hw;
12725
12726
12727
12728
12729
12730
12731
12732 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12733 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12734
12735 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12736 i40e_flush(hw);
12737 synchronize_irq(pf->msix_entries[intpf].vector);
12738 } else {
12739
12740 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12741 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12742 i40e_flush(hw);
12743 synchronize_irq(pf->pdev->irq);
12744 }
12745}
12746
12747
12748
12749
12750
12751
12752
12753
12754int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12755{
12756 int err;
12757
12758 err = i40e_enter_busy_conf(vsi);
12759 if (err)
12760 return err;
12761
12762 i40e_queue_pair_disable_irq(vsi, queue_pair);
12763 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false );
12764 i40e_queue_pair_toggle_napi(vsi, queue_pair, false );
12765 i40e_queue_pair_clean_rings(vsi, queue_pair);
12766 i40e_queue_pair_reset_stats(vsi, queue_pair);
12767
12768 return err;
12769}
12770
12771
12772
12773
12774
12775
12776
12777
12778int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12779{
12780 int err;
12781
12782 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12783 if (err)
12784 return err;
12785
12786 if (i40e_enabled_xdp_vsi(vsi)) {
12787 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12788 if (err)
12789 return err;
12790 }
12791
12792 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12793 if (err)
12794 return err;
12795
12796 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true );
12797 i40e_queue_pair_toggle_napi(vsi, queue_pair, true );
12798 i40e_queue_pair_enable_irq(vsi, queue_pair);
12799
12800 i40e_exit_busy_conf(vsi);
12801
12802 return err;
12803}
12804
12805
12806
12807
12808
12809
12810static int i40e_xdp(struct net_device *dev,
12811 struct netdev_bpf *xdp)
12812{
12813 struct i40e_netdev_priv *np = netdev_priv(dev);
12814 struct i40e_vsi *vsi = np->vsi;
12815
12816 if (vsi->type != I40E_VSI_MAIN)
12817 return -EINVAL;
12818
12819 switch (xdp->command) {
12820 case XDP_SETUP_PROG:
12821 return i40e_xdp_setup(vsi, xdp->prog);
12822 case XDP_QUERY_PROG:
12823 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12824 return 0;
12825 case XDP_SETUP_XSK_UMEM:
12826 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12827 xdp->xsk.queue_id);
12828 default:
12829 return -EINVAL;
12830 }
12831}
12832
12833static const struct net_device_ops i40e_netdev_ops = {
12834 .ndo_open = i40e_open,
12835 .ndo_stop = i40e_close,
12836 .ndo_start_xmit = i40e_lan_xmit_frame,
12837 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12838 .ndo_set_rx_mode = i40e_set_rx_mode,
12839 .ndo_validate_addr = eth_validate_addr,
12840 .ndo_set_mac_address = i40e_set_mac,
12841 .ndo_change_mtu = i40e_change_mtu,
12842 .ndo_do_ioctl = i40e_ioctl,
12843 .ndo_tx_timeout = i40e_tx_timeout,
12844 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12845 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12846#ifdef CONFIG_NET_POLL_CONTROLLER
12847 .ndo_poll_controller = i40e_netpoll,
12848#endif
12849 .ndo_setup_tc = __i40e_setup_tc,
12850 .ndo_set_features = i40e_set_features,
12851 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12852 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12853 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12854 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12855 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12856 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12857 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12858 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12859 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12860 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12861 .ndo_fdb_add = i40e_ndo_fdb_add,
12862 .ndo_features_check = i40e_features_check,
12863 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12864 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12865 .ndo_bpf = i40e_xdp,
12866 .ndo_xdp_xmit = i40e_xdp_xmit,
12867 .ndo_xsk_wakeup = i40e_xsk_wakeup,
12868 .ndo_dfwd_add_station = i40e_fwd_add,
12869 .ndo_dfwd_del_station = i40e_fwd_del,
12870};
12871
12872
12873
12874
12875
12876
12877
12878static int i40e_config_netdev(struct i40e_vsi *vsi)
12879{
12880 struct i40e_pf *pf = vsi->back;
12881 struct i40e_hw *hw = &pf->hw;
12882 struct i40e_netdev_priv *np;
12883 struct net_device *netdev;
12884 u8 broadcast[ETH_ALEN];
12885 u8 mac_addr[ETH_ALEN];
12886 int etherdev_size;
12887 netdev_features_t hw_enc_features;
12888 netdev_features_t hw_features;
12889
12890 etherdev_size = sizeof(struct i40e_netdev_priv);
12891 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12892 if (!netdev)
12893 return -ENOMEM;
12894
12895 vsi->netdev = netdev;
12896 np = netdev_priv(netdev);
12897 np->vsi = vsi;
12898
12899 hw_enc_features = NETIF_F_SG |
12900 NETIF_F_IP_CSUM |
12901 NETIF_F_IPV6_CSUM |
12902 NETIF_F_HIGHDMA |
12903 NETIF_F_SOFT_FEATURES |
12904 NETIF_F_TSO |
12905 NETIF_F_TSO_ECN |
12906 NETIF_F_TSO6 |
12907 NETIF_F_GSO_GRE |
12908 NETIF_F_GSO_GRE_CSUM |
12909 NETIF_F_GSO_PARTIAL |
12910 NETIF_F_GSO_IPXIP4 |
12911 NETIF_F_GSO_IPXIP6 |
12912 NETIF_F_GSO_UDP_TUNNEL |
12913 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12914 NETIF_F_SCTP_CRC |
12915 NETIF_F_RXHASH |
12916 NETIF_F_RXCSUM |
12917 0;
12918
12919 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12920 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12921
12922 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12923
12924 netdev->hw_enc_features |= hw_enc_features;
12925
12926
12927 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12928
12929
12930 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
12931
12932 hw_features = hw_enc_features |
12933 NETIF_F_HW_VLAN_CTAG_TX |
12934 NETIF_F_HW_VLAN_CTAG_RX;
12935
12936 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12937 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12938
12939 netdev->hw_features |= hw_features;
12940
12941 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12942 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12943
12944 if (vsi->type == I40E_VSI_MAIN) {
12945 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12946 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12947
12948
12949
12950
12951
12952
12953
12954
12955
12956
12957 i40e_rm_default_mac_filter(vsi, mac_addr);
12958 spin_lock_bh(&vsi->mac_filter_hash_lock);
12959 i40e_add_mac_filter(vsi, mac_addr);
12960 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12961 } else {
12962
12963
12964
12965
12966
12967 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12968 IFNAMSIZ - 4,
12969 pf->vsi[pf->lan_vsi]->netdev->name);
12970 eth_random_addr(mac_addr);
12971
12972 spin_lock_bh(&vsi->mac_filter_hash_lock);
12973 i40e_add_mac_filter(vsi, mac_addr);
12974 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12975 }
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990 eth_broadcast_addr(broadcast);
12991 spin_lock_bh(&vsi->mac_filter_hash_lock);
12992 i40e_add_mac_filter(vsi, broadcast);
12993 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12994
12995 ether_addr_copy(netdev->dev_addr, mac_addr);
12996 ether_addr_copy(netdev->perm_addr, mac_addr);
12997
12998
12999 netdev->neigh_priv_len = sizeof(u32) * 4;
13000
13001 netdev->priv_flags |= IFF_UNICAST_FLT;
13002 netdev->priv_flags |= IFF_SUPP_NOFCS;
13003
13004 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13005
13006 netdev->netdev_ops = &i40e_netdev_ops;
13007 netdev->watchdog_timeo = 5 * HZ;
13008 i40e_set_ethtool_ops(netdev);
13009
13010
13011 netdev->min_mtu = ETH_MIN_MTU;
13012 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13013
13014 return 0;
13015}
13016
13017
13018
13019
13020
13021
13022
13023static void i40e_vsi_delete(struct i40e_vsi *vsi)
13024{
13025
13026 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13027 return;
13028
13029 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13030}
13031
13032
13033
13034
13035
13036
13037
13038int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13039{
13040 struct i40e_veb *veb;
13041 struct i40e_pf *pf = vsi->back;
13042
13043
13044 if (vsi->veb_idx >= I40E_MAX_VEB)
13045 return 1;
13046
13047 veb = pf->veb[vsi->veb_idx];
13048 if (!veb) {
13049 dev_info(&pf->pdev->dev,
13050 "There is no veb associated with the bridge\n");
13051 return -ENOENT;
13052 }
13053
13054
13055 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13056 return 0;
13057 } else {
13058
13059 return 1;
13060 }
13061
13062
13063 return 0;
13064}
13065
13066
13067
13068
13069
13070
13071
13072
13073static int i40e_add_vsi(struct i40e_vsi *vsi)
13074{
13075 int ret = -ENODEV;
13076 struct i40e_pf *pf = vsi->back;
13077 struct i40e_hw *hw = &pf->hw;
13078 struct i40e_vsi_context ctxt;
13079 struct i40e_mac_filter *f;
13080 struct hlist_node *h;
13081 int bkt;
13082
13083 u8 enabled_tc = 0x1;
13084 int f_count = 0;
13085
13086 memset(&ctxt, 0, sizeof(ctxt));
13087 switch (vsi->type) {
13088 case I40E_VSI_MAIN:
13089
13090
13091
13092
13093
13094 ctxt.seid = pf->main_vsi_seid;
13095 ctxt.pf_num = pf->hw.pf_id;
13096 ctxt.vf_num = 0;
13097 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13098 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13099 if (ret) {
13100 dev_info(&pf->pdev->dev,
13101 "couldn't get PF vsi config, err %s aq_err %s\n",
13102 i40e_stat_str(&pf->hw, ret),
13103 i40e_aq_str(&pf->hw,
13104 pf->hw.aq.asq_last_status));
13105 return -ENOENT;
13106 }
13107 vsi->info = ctxt.info;
13108 vsi->info.valid_sections = 0;
13109
13110 vsi->seid = ctxt.seid;
13111 vsi->id = ctxt.vsi_number;
13112
13113 enabled_tc = i40e_pf_get_tc_map(pf);
13114
13115
13116
13117
13118
13119 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13120 memset(&ctxt, 0, sizeof(ctxt));
13121 ctxt.seid = pf->main_vsi_seid;
13122 ctxt.pf_num = pf->hw.pf_id;
13123 ctxt.vf_num = 0;
13124 ctxt.info.valid_sections |=
13125 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13126 ctxt.info.switch_id =
13127 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13128 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13129 if (ret) {
13130 dev_info(&pf->pdev->dev,
13131 "update vsi failed, err %s aq_err %s\n",
13132 i40e_stat_str(&pf->hw, ret),
13133 i40e_aq_str(&pf->hw,
13134 pf->hw.aq.asq_last_status));
13135 ret = -ENOENT;
13136 goto err;
13137 }
13138 }
13139
13140
13141 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13142 !(pf->hw.func_caps.iscsi)) {
13143 memset(&ctxt, 0, sizeof(ctxt));
13144 ctxt.seid = pf->main_vsi_seid;
13145 ctxt.pf_num = pf->hw.pf_id;
13146 ctxt.vf_num = 0;
13147 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13148 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13149 if (ret) {
13150 dev_info(&pf->pdev->dev,
13151 "update vsi failed, err %s aq_err %s\n",
13152 i40e_stat_str(&pf->hw, ret),
13153 i40e_aq_str(&pf->hw,
13154 pf->hw.aq.asq_last_status));
13155 ret = -ENOENT;
13156 goto err;
13157 }
13158
13159 i40e_vsi_update_queue_map(vsi, &ctxt);
13160 vsi->info.valid_sections = 0;
13161 } else {
13162
13163
13164
13165
13166
13167
13168 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13169 if (ret) {
13170
13171
13172
13173 dev_info(&pf->pdev->dev,
13174 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13175 enabled_tc,
13176 i40e_stat_str(&pf->hw, ret),
13177 i40e_aq_str(&pf->hw,
13178 pf->hw.aq.asq_last_status));
13179 }
13180 }
13181 break;
13182
13183 case I40E_VSI_FDIR:
13184 ctxt.pf_num = hw->pf_id;
13185 ctxt.vf_num = 0;
13186 ctxt.uplink_seid = vsi->uplink_seid;
13187 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13188 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13189 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13190 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13191 ctxt.info.valid_sections |=
13192 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13193 ctxt.info.switch_id =
13194 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13195 }
13196 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13197 break;
13198
13199 case I40E_VSI_VMDQ2:
13200 ctxt.pf_num = hw->pf_id;
13201 ctxt.vf_num = 0;
13202 ctxt.uplink_seid = vsi->uplink_seid;
13203 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13204 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13205
13206
13207
13208
13209 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13210 ctxt.info.valid_sections |=
13211 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13212 ctxt.info.switch_id =
13213 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13214 }
13215
13216
13217 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13218 break;
13219
13220 case I40E_VSI_SRIOV:
13221 ctxt.pf_num = hw->pf_id;
13222 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13223 ctxt.uplink_seid = vsi->uplink_seid;
13224 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13225 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13226
13227
13228
13229
13230 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13231 ctxt.info.valid_sections |=
13232 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13233 ctxt.info.switch_id =
13234 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13235 }
13236
13237 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13238 ctxt.info.valid_sections |=
13239 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13240 ctxt.info.queueing_opt_flags |=
13241 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13242 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13243 }
13244
13245 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13246 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13247 if (pf->vf[vsi->vf_id].spoofchk) {
13248 ctxt.info.valid_sections |=
13249 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13250 ctxt.info.sec_flags |=
13251 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13252 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13253 }
13254
13255 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13256 break;
13257
13258 case I40E_VSI_IWARP:
13259
13260 break;
13261
13262 default:
13263 return -ENODEV;
13264 }
13265
13266 if (vsi->type != I40E_VSI_MAIN) {
13267 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13268 if (ret) {
13269 dev_info(&vsi->back->pdev->dev,
13270 "add vsi failed, err %s aq_err %s\n",
13271 i40e_stat_str(&pf->hw, ret),
13272 i40e_aq_str(&pf->hw,
13273 pf->hw.aq.asq_last_status));
13274 ret = -ENOENT;
13275 goto err;
13276 }
13277 vsi->info = ctxt.info;
13278 vsi->info.valid_sections = 0;
13279 vsi->seid = ctxt.seid;
13280 vsi->id = ctxt.vsi_number;
13281 }
13282
13283 vsi->active_filters = 0;
13284 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13285 spin_lock_bh(&vsi->mac_filter_hash_lock);
13286
13287 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13288 f->state = I40E_FILTER_NEW;
13289 f_count++;
13290 }
13291 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13292
13293 if (f_count) {
13294 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13295 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13296 }
13297
13298
13299 ret = i40e_vsi_get_bw_info(vsi);
13300 if (ret) {
13301 dev_info(&pf->pdev->dev,
13302 "couldn't get vsi bw info, err %s aq_err %s\n",
13303 i40e_stat_str(&pf->hw, ret),
13304 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13305
13306 ret = 0;
13307 }
13308
13309err:
13310 return ret;
13311}
13312
13313
13314
13315
13316
13317
13318
13319int i40e_vsi_release(struct i40e_vsi *vsi)
13320{
13321 struct i40e_mac_filter *f;
13322 struct hlist_node *h;
13323 struct i40e_veb *veb = NULL;
13324 struct i40e_pf *pf;
13325 u16 uplink_seid;
13326 int i, n, bkt;
13327
13328 pf = vsi->back;
13329
13330
13331 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13332 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13333 vsi->seid, vsi->uplink_seid);
13334 return -ENODEV;
13335 }
13336 if (vsi == pf->vsi[pf->lan_vsi] &&
13337 !test_bit(__I40E_DOWN, pf->state)) {
13338 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13339 return -ENODEV;
13340 }
13341
13342 uplink_seid = vsi->uplink_seid;
13343 if (vsi->type != I40E_VSI_SRIOV) {
13344 if (vsi->netdev_registered) {
13345 vsi->netdev_registered = false;
13346 if (vsi->netdev) {
13347
13348 unregister_netdev(vsi->netdev);
13349 }
13350 } else {
13351 i40e_vsi_close(vsi);
13352 }
13353 i40e_vsi_disable_irq(vsi);
13354 }
13355
13356 spin_lock_bh(&vsi->mac_filter_hash_lock);
13357
13358
13359 if (vsi->netdev) {
13360 __dev_uc_unsync(vsi->netdev, NULL);
13361 __dev_mc_unsync(vsi->netdev, NULL);
13362 }
13363
13364
13365 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13366 __i40e_del_filter(vsi, f);
13367
13368 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13369
13370 i40e_sync_vsi_filters(vsi);
13371
13372 i40e_vsi_delete(vsi);
13373 i40e_vsi_free_q_vectors(vsi);
13374 if (vsi->netdev) {
13375 free_netdev(vsi->netdev);
13376 vsi->netdev = NULL;
13377 }
13378 i40e_vsi_clear_rings(vsi);
13379 i40e_vsi_clear(vsi);
13380
13381
13382
13383
13384
13385
13386
13387
13388
13389 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13390 if (pf->vsi[i] &&
13391 pf->vsi[i]->uplink_seid == uplink_seid &&
13392 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13393 n++;
13394 }
13395 }
13396 for (i = 0; i < I40E_MAX_VEB; i++) {
13397 if (!pf->veb[i])
13398 continue;
13399 if (pf->veb[i]->uplink_seid == uplink_seid)
13400 n++;
13401 if (pf->veb[i]->seid == uplink_seid)
13402 veb = pf->veb[i];
13403 }
13404 if (n == 0 && veb && veb->uplink_seid != 0)
13405 i40e_veb_release(veb);
13406
13407 return 0;
13408}
13409
13410
13411
13412
13413
13414
13415
13416
13417
13418
13419
13420static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13421{
13422 int ret = -ENOENT;
13423 struct i40e_pf *pf = vsi->back;
13424
13425 if (vsi->q_vectors[0]) {
13426 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13427 vsi->seid);
13428 return -EEXIST;
13429 }
13430
13431 if (vsi->base_vector) {
13432 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13433 vsi->seid, vsi->base_vector);
13434 return -EEXIST;
13435 }
13436
13437 ret = i40e_vsi_alloc_q_vectors(vsi);
13438 if (ret) {
13439 dev_info(&pf->pdev->dev,
13440 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13441 vsi->num_q_vectors, vsi->seid, ret);
13442 vsi->num_q_vectors = 0;
13443 goto vector_setup_out;
13444 }
13445
13446
13447
13448
13449 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13450 return ret;
13451 if (vsi->num_q_vectors)
13452 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13453 vsi->num_q_vectors, vsi->idx);
13454 if (vsi->base_vector < 0) {
13455 dev_info(&pf->pdev->dev,
13456 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13457 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13458 i40e_vsi_free_q_vectors(vsi);
13459 ret = -ENOENT;
13460 goto vector_setup_out;
13461 }
13462
13463vector_setup_out:
13464 return ret;
13465}
13466
13467
13468
13469
13470
13471
13472
13473
13474
13475
13476static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13477{
13478 u16 alloc_queue_pairs;
13479 struct i40e_pf *pf;
13480 u8 enabled_tc;
13481 int ret;
13482
13483 if (!vsi)
13484 return NULL;
13485
13486 pf = vsi->back;
13487
13488 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13489 i40e_vsi_clear_rings(vsi);
13490
13491 i40e_vsi_free_arrays(vsi, false);
13492 i40e_set_num_rings_in_vsi(vsi);
13493 ret = i40e_vsi_alloc_arrays(vsi, false);
13494 if (ret)
13495 goto err_vsi;
13496
13497 alloc_queue_pairs = vsi->alloc_queue_pairs *
13498 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13499
13500 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13501 if (ret < 0) {
13502 dev_info(&pf->pdev->dev,
13503 "failed to get tracking for %d queues for VSI %d err %d\n",
13504 alloc_queue_pairs, vsi->seid, ret);
13505 goto err_vsi;
13506 }
13507 vsi->base_queue = ret;
13508
13509
13510
13511
13512 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13513 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13514 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13515 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13516 if (vsi->type == I40E_VSI_MAIN)
13517 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13518
13519
13520 ret = i40e_alloc_rings(vsi);
13521 if (ret)
13522 goto err_rings;
13523
13524
13525 i40e_vsi_map_rings_to_vectors(vsi);
13526 return vsi;
13527
13528err_rings:
13529 i40e_vsi_free_q_vectors(vsi);
13530 if (vsi->netdev_registered) {
13531 vsi->netdev_registered = false;
13532 unregister_netdev(vsi->netdev);
13533 free_netdev(vsi->netdev);
13534 vsi->netdev = NULL;
13535 }
13536 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13537err_vsi:
13538 i40e_vsi_clear(vsi);
13539 return NULL;
13540}
13541
13542
13543
13544
13545
13546
13547
13548
13549
13550
13551
13552
13553
13554
13555struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13556 u16 uplink_seid, u32 param1)
13557{
13558 struct i40e_vsi *vsi = NULL;
13559 struct i40e_veb *veb = NULL;
13560 u16 alloc_queue_pairs;
13561 int ret, i;
13562 int v_idx;
13563
13564
13565
13566
13567
13568
13569
13570
13571
13572
13573
13574
13575
13576
13577 for (i = 0; i < I40E_MAX_VEB; i++) {
13578 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13579 veb = pf->veb[i];
13580 break;
13581 }
13582 }
13583
13584 if (!veb && uplink_seid != pf->mac_seid) {
13585
13586 for (i = 0; i < pf->num_alloc_vsi; i++) {
13587 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
13588 vsi = pf->vsi[i];
13589 break;
13590 }
13591 }
13592 if (!vsi) {
13593 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
13594 uplink_seid);
13595 return NULL;
13596 }
13597
13598 if (vsi->uplink_seid == pf->mac_seid)
13599 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
13600 vsi->tc_config.enabled_tc);
13601 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
13602 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13603 vsi->tc_config.enabled_tc);
13604 if (veb) {
13605 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
13606 dev_info(&vsi->back->pdev->dev,
13607 "New VSI creation error, uplink seid of LAN VSI expected.\n");
13608 return NULL;
13609 }
13610
13611
13612
13613
13614 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
13615 veb->bridge_mode = BRIDGE_MODE_VEPA;
13616 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13617 }
13618 i40e_config_bridge_mode(veb);
13619 }
13620 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13621 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13622 veb = pf->veb[i];
13623 }
13624 if (!veb) {
13625 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
13626 return NULL;
13627 }
13628
13629 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13630 uplink_seid = veb->seid;
13631 }
13632
13633
13634 v_idx = i40e_vsi_mem_alloc(pf, type);
13635 if (v_idx < 0)
13636 goto err_alloc;
13637 vsi = pf->vsi[v_idx];
13638 if (!vsi)
13639 goto err_alloc;
13640 vsi->type = type;
13641 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
13642
13643 if (type == I40E_VSI_MAIN)
13644 pf->lan_vsi = v_idx;
13645 else if (type == I40E_VSI_SRIOV)
13646 vsi->vf_id = param1;
13647
13648 alloc_queue_pairs = vsi->alloc_queue_pairs *
13649 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13650
13651 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13652 if (ret < 0) {
13653 dev_info(&pf->pdev->dev,
13654 "failed to get tracking for %d queues for VSI %d err=%d\n",
13655 alloc_queue_pairs, vsi->seid, ret);
13656 goto err_vsi;
13657 }
13658 vsi->base_queue = ret;
13659
13660
13661 vsi->uplink_seid = uplink_seid;
13662 ret = i40e_add_vsi(vsi);
13663 if (ret)
13664 goto err_vsi;
13665
13666 switch (vsi->type) {
13667
13668 case I40E_VSI_MAIN:
13669 case I40E_VSI_VMDQ2:
13670 ret = i40e_config_netdev(vsi);
13671 if (ret)
13672 goto err_netdev;
13673 ret = register_netdev(vsi->netdev);
13674 if (ret)
13675 goto err_netdev;
13676 vsi->netdev_registered = true;
13677 netif_carrier_off(vsi->netdev);
13678#ifdef CONFIG_I40E_DCB
13679
13680 i40e_dcbnl_setup(vsi);
13681#endif
13682
13683
13684 case I40E_VSI_FDIR:
13685
13686 ret = i40e_vsi_setup_vectors(vsi);
13687 if (ret)
13688 goto err_msix;
13689
13690 ret = i40e_alloc_rings(vsi);
13691 if (ret)
13692 goto err_rings;
13693
13694
13695 i40e_vsi_map_rings_to_vectors(vsi);
13696
13697 i40e_vsi_reset_stats(vsi);
13698 break;
13699
13700 default:
13701
13702 break;
13703 }
13704
13705 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13706 (vsi->type == I40E_VSI_VMDQ2)) {
13707 ret = i40e_vsi_config_rss(vsi);
13708 }
13709 return vsi;
13710
13711err_rings:
13712 i40e_vsi_free_q_vectors(vsi);
13713err_msix:
13714 if (vsi->netdev_registered) {
13715 vsi->netdev_registered = false;
13716 unregister_netdev(vsi->netdev);
13717 free_netdev(vsi->netdev);
13718 vsi->netdev = NULL;
13719 }
13720err_netdev:
13721 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13722err_vsi:
13723 i40e_vsi_clear(vsi);
13724err_alloc:
13725 return NULL;
13726}
13727
13728
13729
13730
13731
13732
13733
13734static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13735{
13736 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13737 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13738 struct i40e_pf *pf = veb->pf;
13739 struct i40e_hw *hw = &pf->hw;
13740 u32 tc_bw_max;
13741 int ret = 0;
13742 int i;
13743
13744 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13745 &bw_data, NULL);
13746 if (ret) {
13747 dev_info(&pf->pdev->dev,
13748 "query veb bw config failed, err %s aq_err %s\n",
13749 i40e_stat_str(&pf->hw, ret),
13750 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13751 goto out;
13752 }
13753
13754 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13755 &ets_data, NULL);
13756 if (ret) {
13757 dev_info(&pf->pdev->dev,
13758 "query veb bw ets config failed, err %s aq_err %s\n",
13759 i40e_stat_str(&pf->hw, ret),
13760 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13761 goto out;
13762 }
13763
13764 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13765 veb->bw_max_quanta = ets_data.tc_bw_max;
13766 veb->is_abs_credits = bw_data.absolute_credits_enable;
13767 veb->enabled_tc = ets_data.tc_valid_bits;
13768 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13769 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13770 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13771 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13772 veb->bw_tc_limit_credits[i] =
13773 le16_to_cpu(bw_data.tc_bw_limits[i]);
13774 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13775 }
13776
13777out:
13778 return ret;
13779}
13780
13781
13782
13783
13784
13785
13786
13787
13788static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13789{
13790 int ret = -ENOENT;
13791 struct i40e_veb *veb;
13792 int i;
13793
13794
13795 mutex_lock(&pf->switch_mutex);
13796
13797
13798
13799
13800
13801
13802
13803 i = 0;
13804 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13805 i++;
13806 if (i >= I40E_MAX_VEB) {
13807 ret = -ENOMEM;
13808 goto err_alloc_veb;
13809 }
13810
13811 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13812 if (!veb) {
13813 ret = -ENOMEM;
13814 goto err_alloc_veb;
13815 }
13816 veb->pf = pf;
13817 veb->idx = i;
13818 veb->enabled_tc = 1;
13819
13820 pf->veb[i] = veb;
13821 ret = i;
13822err_alloc_veb:
13823 mutex_unlock(&pf->switch_mutex);
13824 return ret;
13825}
13826
13827
13828
13829
13830
13831
13832
13833
13834static void i40e_switch_branch_release(struct i40e_veb *branch)
13835{
13836 struct i40e_pf *pf = branch->pf;
13837 u16 branch_seid = branch->seid;
13838 u16 veb_idx = branch->idx;
13839 int i;
13840
13841
13842 for (i = 0; i < I40E_MAX_VEB; i++) {
13843 if (!pf->veb[i])
13844 continue;
13845 if (pf->veb[i]->uplink_seid == branch->seid)
13846 i40e_switch_branch_release(pf->veb[i]);
13847 }
13848
13849
13850
13851
13852
13853
13854 for (i = 0; i < pf->num_alloc_vsi; i++) {
13855 if (!pf->vsi[i])
13856 continue;
13857 if (pf->vsi[i]->uplink_seid == branch_seid &&
13858 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13859 i40e_vsi_release(pf->vsi[i]);
13860 }
13861 }
13862
13863
13864
13865
13866
13867
13868 if (pf->veb[veb_idx])
13869 i40e_veb_release(pf->veb[veb_idx]);
13870}
13871
13872
13873
13874
13875
13876static void i40e_veb_clear(struct i40e_veb *veb)
13877{
13878 if (!veb)
13879 return;
13880
13881 if (veb->pf) {
13882 struct i40e_pf *pf = veb->pf;
13883
13884 mutex_lock(&pf->switch_mutex);
13885 if (pf->veb[veb->idx] == veb)
13886 pf->veb[veb->idx] = NULL;
13887 mutex_unlock(&pf->switch_mutex);
13888 }
13889
13890 kfree(veb);
13891}
13892
13893
13894
13895
13896
13897void i40e_veb_release(struct i40e_veb *veb)
13898{
13899 struct i40e_vsi *vsi = NULL;
13900 struct i40e_pf *pf;
13901 int i, n = 0;
13902
13903 pf = veb->pf;
13904
13905
13906 for (i = 0; i < pf->num_alloc_vsi; i++) {
13907 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13908 n++;
13909 vsi = pf->vsi[i];
13910 }
13911 }
13912 if (n != 1) {
13913 dev_info(&pf->pdev->dev,
13914 "can't remove VEB %d with %d VSIs left\n",
13915 veb->seid, n);
13916 return;
13917 }
13918
13919
13920 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13921 if (veb->uplink_seid) {
13922 vsi->uplink_seid = veb->uplink_seid;
13923 if (veb->uplink_seid == pf->mac_seid)
13924 vsi->veb_idx = I40E_NO_VEB;
13925 else
13926 vsi->veb_idx = veb->veb_idx;
13927 } else {
13928
13929 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13930 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13931 }
13932
13933 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13934 i40e_veb_clear(veb);
13935}
13936
13937
13938
13939
13940
13941
13942static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13943{
13944 struct i40e_pf *pf = veb->pf;
13945 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13946 int ret;
13947
13948 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13949 veb->enabled_tc, false,
13950 &veb->seid, enable_stats, NULL);
13951
13952
13953 if (ret) {
13954 dev_info(&pf->pdev->dev,
13955 "couldn't add VEB, err %s aq_err %s\n",
13956 i40e_stat_str(&pf->hw, ret),
13957 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13958 return -EPERM;
13959 }
13960
13961
13962 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13963 &veb->stats_idx, NULL, NULL, NULL);
13964 if (ret) {
13965 dev_info(&pf->pdev->dev,
13966 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13967 i40e_stat_str(&pf->hw, ret),
13968 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13969 return -EPERM;
13970 }
13971 ret = i40e_veb_get_bw_info(veb);
13972 if (ret) {
13973 dev_info(&pf->pdev->dev,
13974 "couldn't get VEB bw info, err %s aq_err %s\n",
13975 i40e_stat_str(&pf->hw, ret),
13976 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13977 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13978 return -ENOENT;
13979 }
13980
13981 vsi->uplink_seid = veb->seid;
13982 vsi->veb_idx = veb->idx;
13983 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13984
13985 return 0;
13986}
13987
13988
13989
13990
13991
13992
13993
13994
13995
13996
13997
13998
13999
14000
14001
14002
14003
14004struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14005 u16 uplink_seid, u16 vsi_seid,
14006 u8 enabled_tc)
14007{
14008 struct i40e_veb *veb, *uplink_veb = NULL;
14009 int vsi_idx, veb_idx;
14010 int ret;
14011
14012
14013 if ((uplink_seid == 0 || vsi_seid == 0) &&
14014 (uplink_seid + vsi_seid != 0)) {
14015 dev_info(&pf->pdev->dev,
14016 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14017 uplink_seid, vsi_seid);
14018 return NULL;
14019 }
14020
14021
14022 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14023 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14024 break;
14025 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14026 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14027 vsi_seid);
14028 return NULL;
14029 }
14030
14031 if (uplink_seid && uplink_seid != pf->mac_seid) {
14032 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14033 if (pf->veb[veb_idx] &&
14034 pf->veb[veb_idx]->seid == uplink_seid) {
14035 uplink_veb = pf->veb[veb_idx];
14036 break;
14037 }
14038 }
14039 if (!uplink_veb) {
14040 dev_info(&pf->pdev->dev,
14041 "uplink seid %d not found\n", uplink_seid);
14042 return NULL;
14043 }
14044 }
14045
14046
14047 veb_idx = i40e_veb_mem_alloc(pf);
14048 if (veb_idx < 0)
14049 goto err_alloc;
14050 veb = pf->veb[veb_idx];
14051 veb->flags = flags;
14052 veb->uplink_seid = uplink_seid;
14053 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14054 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14055
14056
14057 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14058 if (ret)
14059 goto err_veb;
14060 if (vsi_idx == pf->lan_vsi)
14061 pf->lan_veb = veb->idx;
14062
14063 return veb;
14064
14065err_veb:
14066 i40e_veb_clear(veb);
14067err_alloc:
14068 return NULL;
14069}
14070
14071
14072
14073
14074
14075
14076
14077
14078
14079
14080static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14081 struct i40e_aqc_switch_config_element_resp *ele,
14082 u16 num_reported, bool printconfig)
14083{
14084 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14085 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14086 u8 element_type = ele->element_type;
14087 u16 seid = le16_to_cpu(ele->seid);
14088
14089 if (printconfig)
14090 dev_info(&pf->pdev->dev,
14091 "type=%d seid=%d uplink=%d downlink=%d\n",
14092 element_type, seid, uplink_seid, downlink_seid);
14093
14094 switch (element_type) {
14095 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14096 pf->mac_seid = seid;
14097 break;
14098 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14099
14100 if (uplink_seid != pf->mac_seid)
14101 break;
14102 if (pf->lan_veb >= I40E_MAX_VEB) {
14103 int v;
14104
14105
14106 for (v = 0; v < I40E_MAX_VEB; v++) {
14107 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14108 pf->lan_veb = v;
14109 break;
14110 }
14111 }
14112 if (pf->lan_veb >= I40E_MAX_VEB) {
14113 v = i40e_veb_mem_alloc(pf);
14114 if (v < 0)
14115 break;
14116 pf->lan_veb = v;
14117 }
14118 }
14119 if (pf->lan_veb >= I40E_MAX_VEB)
14120 break;
14121
14122 pf->veb[pf->lan_veb]->seid = seid;
14123 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14124 pf->veb[pf->lan_veb]->pf = pf;
14125 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14126 break;
14127 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14128 if (num_reported != 1)
14129 break;
14130
14131
14132
14133 pf->mac_seid = uplink_seid;
14134 pf->pf_seid = downlink_seid;
14135 pf->main_vsi_seid = seid;
14136 if (printconfig)
14137 dev_info(&pf->pdev->dev,
14138 "pf_seid=%d main_vsi_seid=%d\n",
14139 pf->pf_seid, pf->main_vsi_seid);
14140 break;
14141 case I40E_SWITCH_ELEMENT_TYPE_PF:
14142 case I40E_SWITCH_ELEMENT_TYPE_VF:
14143 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14144 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14145 case I40E_SWITCH_ELEMENT_TYPE_PE:
14146 case I40E_SWITCH_ELEMENT_TYPE_PA:
14147
14148 break;
14149 default:
14150 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14151 element_type, seid);
14152 break;
14153 }
14154}
14155
14156
14157
14158
14159
14160
14161
14162
14163
14164int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14165{
14166 struct i40e_aqc_get_switch_config_resp *sw_config;
14167 u16 next_seid = 0;
14168 int ret = 0;
14169 u8 *aq_buf;
14170 int i;
14171
14172 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14173 if (!aq_buf)
14174 return -ENOMEM;
14175
14176 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14177 do {
14178 u16 num_reported, num_total;
14179
14180 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14181 I40E_AQ_LARGE_BUF,
14182 &next_seid, NULL);
14183 if (ret) {
14184 dev_info(&pf->pdev->dev,
14185 "get switch config failed err %s aq_err %s\n",
14186 i40e_stat_str(&pf->hw, ret),
14187 i40e_aq_str(&pf->hw,
14188 pf->hw.aq.asq_last_status));
14189 kfree(aq_buf);
14190 return -ENOENT;
14191 }
14192
14193 num_reported = le16_to_cpu(sw_config->header.num_reported);
14194 num_total = le16_to_cpu(sw_config->header.num_total);
14195
14196 if (printconfig)
14197 dev_info(&pf->pdev->dev,
14198 "header: %d reported %d total\n",
14199 num_reported, num_total);
14200
14201 for (i = 0; i < num_reported; i++) {
14202 struct i40e_aqc_switch_config_element_resp *ele =
14203 &sw_config->element[i];
14204
14205 i40e_setup_pf_switch_element(pf, ele, num_reported,
14206 printconfig);
14207 }
14208 } while (next_seid != 0);
14209
14210 kfree(aq_buf);
14211 return ret;
14212}
14213
14214
14215
14216
14217
14218
14219
14220
14221static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14222{
14223 u16 flags = 0;
14224 int ret;
14225
14226
14227 ret = i40e_fetch_switch_configuration(pf, false);
14228 if (ret) {
14229 dev_info(&pf->pdev->dev,
14230 "couldn't fetch switch config, err %s aq_err %s\n",
14231 i40e_stat_str(&pf->hw, ret),
14232 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14233 return ret;
14234 }
14235 i40e_pf_reset_stats(pf);
14236
14237
14238
14239
14240
14241
14242
14243 if ((pf->hw.pf_id == 0) &&
14244 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14245 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14246 pf->last_sw_conf_flags = flags;
14247 }
14248
14249 if (pf->hw.pf_id == 0) {
14250 u16 valid_flags;
14251
14252 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14253 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14254 NULL);
14255 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14256 dev_info(&pf->pdev->dev,
14257 "couldn't set switch config bits, err %s aq_err %s\n",
14258 i40e_stat_str(&pf->hw, ret),
14259 i40e_aq_str(&pf->hw,
14260 pf->hw.aq.asq_last_status));
14261
14262 }
14263 pf->last_sw_conf_valid_flags = valid_flags;
14264 }
14265
14266
14267 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14268 struct i40e_vsi *vsi = NULL;
14269 u16 uplink_seid;
14270
14271
14272
14273
14274 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14275 uplink_seid = pf->veb[pf->lan_veb]->seid;
14276 else
14277 uplink_seid = pf->mac_seid;
14278 if (pf->lan_vsi == I40E_NO_VSI)
14279 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14280 else if (reinit)
14281 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14282 if (!vsi) {
14283 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14284 i40e_cloud_filter_exit(pf);
14285 i40e_fdir_teardown(pf);
14286 return -EAGAIN;
14287 }
14288 } else {
14289
14290 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14291
14292 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14293 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14294 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14295 }
14296 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14297
14298 i40e_fdir_sb_setup(pf);
14299
14300
14301 ret = i40e_setup_pf_filter_control(pf);
14302 if (ret) {
14303 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14304 ret);
14305
14306 }
14307
14308
14309
14310
14311 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14312 i40e_pf_config_rss(pf);
14313
14314
14315 i40e_link_event(pf);
14316
14317
14318 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14319 I40E_AQ_AN_COMPLETED) ? true : false);
14320
14321 i40e_ptp_init(pf);
14322
14323
14324 i40e_sync_udp_filters(pf);
14325
14326 return ret;
14327}
14328
14329
14330
14331
14332
14333static void i40e_determine_queue_usage(struct i40e_pf *pf)
14334{
14335 int queues_left;
14336 int q_max;
14337
14338 pf->num_lan_qps = 0;
14339
14340
14341
14342
14343
14344 queues_left = pf->hw.func_caps.num_tx_qp;
14345
14346 if ((queues_left == 1) ||
14347 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14348
14349 queues_left = 0;
14350 pf->alloc_rss_size = pf->num_lan_qps = 1;
14351
14352
14353 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14354 I40E_FLAG_IWARP_ENABLED |
14355 I40E_FLAG_FD_SB_ENABLED |
14356 I40E_FLAG_FD_ATR_ENABLED |
14357 I40E_FLAG_DCB_CAPABLE |
14358 I40E_FLAG_DCB_ENABLED |
14359 I40E_FLAG_SRIOV_ENABLED |
14360 I40E_FLAG_VMDQ_ENABLED);
14361 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14362 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14363 I40E_FLAG_FD_SB_ENABLED |
14364 I40E_FLAG_FD_ATR_ENABLED |
14365 I40E_FLAG_DCB_CAPABLE))) {
14366
14367 pf->alloc_rss_size = pf->num_lan_qps = 1;
14368 queues_left -= pf->num_lan_qps;
14369
14370 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14371 I40E_FLAG_IWARP_ENABLED |
14372 I40E_FLAG_FD_SB_ENABLED |
14373 I40E_FLAG_FD_ATR_ENABLED |
14374 I40E_FLAG_DCB_ENABLED |
14375 I40E_FLAG_VMDQ_ENABLED);
14376 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14377 } else {
14378
14379 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14380 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14381 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14382 I40E_FLAG_DCB_ENABLED);
14383 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14384 }
14385
14386
14387 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14388 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14389 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14390 pf->num_lan_qps = q_max;
14391
14392 queues_left -= pf->num_lan_qps;
14393 }
14394
14395 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14396 if (queues_left > 1) {
14397 queues_left -= 1;
14398 } else {
14399 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14400 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14401 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14402 }
14403 }
14404
14405 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14406 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14407 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14408 (queues_left / pf->num_vf_qps));
14409 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14410 }
14411
14412 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14413 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14414 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14415 (queues_left / pf->num_vmdq_qps));
14416 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14417 }
14418
14419 pf->queues_left = queues_left;
14420 dev_dbg(&pf->pdev->dev,
14421 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14422 pf->hw.func_caps.num_tx_qp,
14423 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14424 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14425 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14426 queues_left);
14427}
14428
14429
14430
14431
14432
14433
14434
14435
14436
14437
14438
14439
14440static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14441{
14442 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14443
14444 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14445
14446
14447 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14448 settings->enable_fdir = true;
14449
14450
14451 settings->enable_ethtype = true;
14452 settings->enable_macvlan = true;
14453
14454 if (i40e_set_filter_control(&pf->hw, settings))
14455 return -ENOENT;
14456
14457 return 0;
14458}
14459
14460#define INFO_STRING_LEN 255
14461#define REMAIN(__x) (INFO_STRING_LEN - (__x))
14462static void i40e_print_features(struct i40e_pf *pf)
14463{
14464 struct i40e_hw *hw = &pf->hw;
14465 char *buf;
14466 int i;
14467
14468 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14469 if (!buf)
14470 return;
14471
14472 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14473#ifdef CONFIG_PCI_IOV
14474 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14475#endif
14476 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14477 pf->hw.func_caps.num_vsis,
14478 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14479 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14480 i += snprintf(&buf[i], REMAIN(i), " RSS");
14481 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14482 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
14483 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14484 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
14485 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
14486 }
14487 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14488 i += snprintf(&buf[i], REMAIN(i), " DCB");
14489 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
14490 i += snprintf(&buf[i], REMAIN(i), " Geneve");
14491 if (pf->flags & I40E_FLAG_PTP)
14492 i += snprintf(&buf[i], REMAIN(i), " PTP");
14493 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14494 i += snprintf(&buf[i], REMAIN(i), " VEB");
14495 else
14496 i += snprintf(&buf[i], REMAIN(i), " VEPA");
14497
14498 dev_info(&pf->pdev->dev, "%s\n", buf);
14499 kfree(buf);
14500 WARN_ON(i > INFO_STRING_LEN);
14501}
14502
14503
14504
14505
14506
14507
14508
14509
14510
14511
14512
14513static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14514{
14515 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14516 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14517}
14518
14519
14520
14521
14522
14523
14524void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14525{
14526 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14527 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14528 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14529 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14530 *flags |= I40E_FLAG_RS_FEC;
14531 *flags &= ~I40E_FLAG_BASE_R_FEC;
14532 }
14533 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14534 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14535 *flags |= I40E_FLAG_BASE_R_FEC;
14536 *flags &= ~I40E_FLAG_RS_FEC;
14537 }
14538 if (fec_cfg == 0)
14539 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14540}
14541
14542
14543
14544
14545
14546
14547
14548
14549
14550
14551static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14552{
14553 u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
14554 bool is_recovery_mode = false;
14555
14556 if (pf->hw.mac.type == I40E_MAC_XL710)
14557 is_recovery_mode =
14558 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
14559 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
14560 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
14561 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
14562 if (pf->hw.mac.type == I40E_MAC_X722)
14563 is_recovery_mode =
14564 val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
14565 val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
14566 if (is_recovery_mode) {
14567 dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14568 dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14569 set_bit(__I40E_RECOVERY_MODE, pf->state);
14570
14571 return true;
14572 }
14573 if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
14574 dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
14575
14576 return false;
14577}
14578
14579
14580
14581
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596
14597
14598
14599
14600static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
14601{
14602 const unsigned short MAX_CNT = 1000;
14603 const unsigned short MSECS = 10;
14604 struct i40e_hw *hw = &pf->hw;
14605 i40e_status ret;
14606 int cnt;
14607
14608 for (cnt = 0; cnt < MAX_CNT; ++cnt) {
14609 ret = i40e_pf_reset(hw);
14610 if (!ret)
14611 break;
14612 msleep(MSECS);
14613 }
14614
14615 if (cnt == MAX_CNT) {
14616 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
14617 return ret;
14618 }
14619
14620 pf->pfr_count++;
14621 return ret;
14622}
14623
14624
14625
14626
14627
14628
14629
14630
14631
14632
14633
14634static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14635{
14636 struct i40e_vsi *vsi;
14637 int err;
14638 int v_idx;
14639
14640 pci_save_state(pf->pdev);
14641
14642
14643 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14644 pf->service_timer_period = HZ;
14645
14646 INIT_WORK(&pf->service_task, i40e_service_task);
14647 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14648
14649 err = i40e_init_interrupt_scheme(pf);
14650 if (err)
14651 goto err_switch_setup;
14652
14653
14654
14655
14656
14657
14658 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14659 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14660 else
14661 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14662
14663
14664 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14665 GFP_KERNEL);
14666 if (!pf->vsi) {
14667 err = -ENOMEM;
14668 goto err_switch_setup;
14669 }
14670
14671
14672
14673
14674 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14675 if (v_idx < 0)
14676 goto err_switch_setup;
14677 pf->lan_vsi = v_idx;
14678 vsi = pf->vsi[v_idx];
14679 if (!vsi)
14680 goto err_switch_setup;
14681 vsi->alloc_queue_pairs = 1;
14682 err = i40e_config_netdev(vsi);
14683 if (err)
14684 goto err_switch_setup;
14685 err = register_netdev(vsi->netdev);
14686 if (err)
14687 goto err_switch_setup;
14688 vsi->netdev_registered = true;
14689 i40e_dbg_pf_init(pf);
14690
14691 err = i40e_setup_misc_vector_for_recovery_mode(pf);
14692 if (err)
14693 goto err_switch_setup;
14694
14695
14696 i40e_send_version(pf);
14697
14698
14699 mod_timer(&pf->service_timer,
14700 round_jiffies(jiffies + pf->service_timer_period));
14701
14702 return 0;
14703
14704err_switch_setup:
14705 i40e_reset_interrupt_capability(pf);
14706 del_timer_sync(&pf->service_timer);
14707 i40e_shutdown_adminq(hw);
14708 iounmap(hw->hw_addr);
14709 pci_disable_pcie_error_reporting(pf->pdev);
14710 pci_release_mem_regions(pf->pdev);
14711 pci_disable_device(pf->pdev);
14712 kfree(pf);
14713
14714 return err;
14715}
14716
14717
14718
14719
14720
14721
14722
14723
14724
14725
14726
14727
14728static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14729{
14730 struct i40e_aq_get_phy_abilities_resp abilities;
14731 struct i40e_pf *pf;
14732 struct i40e_hw *hw;
14733 static u16 pfs_found;
14734 u16 wol_nvm_bits;
14735 u16 link_status;
14736 int err;
14737 u32 val;
14738 u32 i;
14739 u8 set_fc_aq_fail;
14740
14741 err = pci_enable_device_mem(pdev);
14742 if (err)
14743 return err;
14744
14745
14746 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
14747 if (err) {
14748 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
14749 if (err) {
14750 dev_err(&pdev->dev,
14751 "DMA configuration failed: 0x%x\n", err);
14752 goto err_dma;
14753 }
14754 }
14755
14756
14757 err = pci_request_mem_regions(pdev, i40e_driver_name);
14758 if (err) {
14759 dev_info(&pdev->dev,
14760 "pci_request_selected_regions failed %d\n", err);
14761 goto err_pci_reg;
14762 }
14763
14764 pci_enable_pcie_error_reporting(pdev);
14765 pci_set_master(pdev);
14766
14767
14768
14769
14770
14771
14772 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
14773 if (!pf) {
14774 err = -ENOMEM;
14775 goto err_pf_alloc;
14776 }
14777 pf->next_vsi = 0;
14778 pf->pdev = pdev;
14779 set_bit(__I40E_DOWN, pf->state);
14780
14781 hw = &pf->hw;
14782 hw->back = pf;
14783
14784 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
14785 I40E_MAX_CSR_SPACE);
14786
14787
14788
14789
14790
14791 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
14792 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
14793 pf->ioremap_len);
14794 err = -ENOMEM;
14795 goto err_ioremap;
14796 }
14797 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
14798 if (!hw->hw_addr) {
14799 err = -EIO;
14800 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
14801 (unsigned int)pci_resource_start(pdev, 0),
14802 pf->ioremap_len, err);
14803 goto err_ioremap;
14804 }
14805 hw->vendor_id = pdev->vendor;
14806 hw->device_id = pdev->device;
14807 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
14808 hw->subsystem_vendor_id = pdev->subsystem_vendor;
14809 hw->subsystem_device_id = pdev->subsystem_device;
14810 hw->bus.device = PCI_SLOT(pdev->devfn);
14811 hw->bus.func = PCI_FUNC(pdev->devfn);
14812 hw->bus.bus_id = pdev->bus->number;
14813 pf->instance = pfs_found;
14814
14815
14816
14817
14818 hw->switch_tag = 0xffff;
14819 hw->first_tag = ETH_P_8021AD;
14820 hw->second_tag = ETH_P_8021Q;
14821
14822 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
14823 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
14824 INIT_LIST_HEAD(&pf->ddp_old_prof);
14825
14826
14827
14828
14829 mutex_init(&hw->aq.asq_mutex);
14830 mutex_init(&hw->aq.arq_mutex);
14831
14832 pf->msg_enable = netif_msg_init(debug,
14833 NETIF_MSG_DRV |
14834 NETIF_MSG_PROBE |
14835 NETIF_MSG_LINK);
14836 if (debug < -1)
14837 pf->hw.debug_mask = debug;
14838
14839
14840 if (hw->revision_id == 0 &&
14841 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14842 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14843 i40e_flush(hw);
14844 msleep(200);
14845 pf->corer_count++;
14846
14847 i40e_clear_pxe_mode(hw);
14848 }
14849
14850
14851 i40e_clear_hw(hw);
14852
14853 err = i40e_set_mac_type(hw);
14854 if (err) {
14855 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14856 err);
14857 goto err_pf_reset;
14858 }
14859
14860 err = i40e_pf_loop_reset(pf);
14861 if (err) {
14862 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14863 goto err_pf_reset;
14864 }
14865
14866 i40e_check_recovery_mode(pf);
14867
14868 hw->aq.num_arq_entries = I40E_AQ_LEN;
14869 hw->aq.num_asq_entries = I40E_AQ_LEN;
14870 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14871 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14872 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14873
14874 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14875 "%s-%s:misc",
14876 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14877
14878 err = i40e_init_shared_code(hw);
14879 if (err) {
14880 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14881 err);
14882 goto err_pf_reset;
14883 }
14884
14885
14886 pf->hw.fc.requested_mode = I40E_FC_NONE;
14887
14888 err = i40e_init_adminq(hw);
14889 if (err) {
14890 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14891 dev_info(&pdev->dev,
14892 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
14893 hw->aq.api_maj_ver,
14894 hw->aq.api_min_ver,
14895 I40E_FW_API_VERSION_MAJOR,
14896 I40E_FW_MINOR_VERSION(hw));
14897 else
14898 dev_info(&pdev->dev,
14899 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14900
14901 goto err_pf_reset;
14902 }
14903 i40e_get_oem_version(hw);
14904
14905
14906 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
14907 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14908 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14909 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
14910 hw->subsystem_vendor_id, hw->subsystem_device_id);
14911
14912 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14913 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14914 dev_info(&pdev->dev,
14915 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
14916 hw->aq.api_maj_ver,
14917 hw->aq.api_min_ver,
14918 I40E_FW_API_VERSION_MAJOR,
14919 I40E_FW_MINOR_VERSION(hw));
14920 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14921 dev_info(&pdev->dev,
14922 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
14923 hw->aq.api_maj_ver,
14924 hw->aq.api_min_ver,
14925 I40E_FW_API_VERSION_MAJOR,
14926 I40E_FW_MINOR_VERSION(hw));
14927
14928 i40e_verify_eeprom(pf);
14929
14930
14931 if (hw->revision_id < 1)
14932 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14933
14934 i40e_clear_pxe_mode(hw);
14935
14936 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14937 if (err)
14938 goto err_adminq_setup;
14939
14940 err = i40e_sw_init(pf);
14941 if (err) {
14942 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14943 goto err_sw_init;
14944 }
14945
14946 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14947 return i40e_init_recovery_mode(pf, hw);
14948
14949 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14950 hw->func_caps.num_rx_qp, 0, 0);
14951 if (err) {
14952 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14953 goto err_init_lan_hmc;
14954 }
14955
14956 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14957 if (err) {
14958 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14959 err = -ENOENT;
14960 goto err_configure_lan_hmc;
14961 }
14962
14963
14964
14965
14966
14967 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14968 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14969 i40e_aq_stop_lldp(hw, true, false, NULL);
14970 }
14971
14972
14973 i40e_get_platform_mac_addr(pdev, pf);
14974
14975 if (!is_valid_ether_addr(hw->mac.addr)) {
14976 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14977 err = -EIO;
14978 goto err_mac_addr;
14979 }
14980 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14981 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14982 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14983 if (is_valid_ether_addr(hw->mac.port_addr))
14984 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14985
14986 pci_set_drvdata(pdev, pf);
14987 pci_save_state(pdev);
14988
14989 dev_info(&pdev->dev,
14990 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
14991 "FW LLDP is disabled\n" :
14992 "FW LLDP is enabled\n");
14993
14994
14995 i40e_aq_set_dcb_parameters(hw, true, NULL);
14996
14997#ifdef CONFIG_I40E_DCB
14998 err = i40e_init_pf_dcb(pf);
14999 if (err) {
15000 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15001 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15002
15003 }
15004#endif
15005
15006
15007 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15008 pf->service_timer_period = HZ;
15009
15010 INIT_WORK(&pf->service_task, i40e_service_task);
15011 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15012
15013
15014 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15015 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15016 pf->wol_en = false;
15017 else
15018 pf->wol_en = true;
15019 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15020
15021
15022 i40e_determine_queue_usage(pf);
15023 err = i40e_init_interrupt_scheme(pf);
15024 if (err)
15025 goto err_switch_setup;
15026
15027
15028
15029
15030
15031
15032 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15033 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15034 else
15035 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15036
15037
15038 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15039 GFP_KERNEL);
15040 if (!pf->vsi) {
15041 err = -ENOMEM;
15042 goto err_switch_setup;
15043 }
15044
15045#ifdef CONFIG_PCI_IOV
15046
15047 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15048 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15049 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15050 if (pci_num_vf(pdev))
15051 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15052 }
15053#endif
15054 err = i40e_setup_pf_switch(pf, false);
15055 if (err) {
15056 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15057 goto err_vsis;
15058 }
15059 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15060
15061
15062 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
15063 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
15064 dev_dbg(&pf->pdev->dev,
15065 "Set fc with err %s aq_err %s on get_phy_cap\n",
15066 i40e_stat_str(hw, err),
15067 i40e_aq_str(hw, hw->aq.asq_last_status));
15068 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
15069 dev_dbg(&pf->pdev->dev,
15070 "Set fc with err %s aq_err %s on set_phy_config\n",
15071 i40e_stat_str(hw, err),
15072 i40e_aq_str(hw, hw->aq.asq_last_status));
15073 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
15074 dev_dbg(&pf->pdev->dev,
15075 "Set fc with err %s aq_err %s on get_link_info\n",
15076 i40e_stat_str(hw, err),
15077 i40e_aq_str(hw, hw->aq.asq_last_status));
15078
15079
15080 for (i = 0; i < pf->num_alloc_vsi; i++) {
15081 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15082 i40e_vsi_open(pf->vsi[i]);
15083 break;
15084 }
15085 }
15086
15087
15088
15089
15090 err = i40e_aq_set_phy_int_mask(&pf->hw,
15091 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15092 I40E_AQ_EVENT_MEDIA_NA |
15093 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15094 if (err)
15095 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15096 i40e_stat_str(&pf->hw, err),
15097 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15098
15099
15100
15101
15102
15103 val = rd32(hw, I40E_REG_MSS);
15104 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15105 val &= ~I40E_REG_MSS_MIN_MASK;
15106 val |= I40E_64BYTE_MSS;
15107 wr32(hw, I40E_REG_MSS, val);
15108 }
15109
15110 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15111 msleep(75);
15112 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15113 if (err)
15114 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15115 i40e_stat_str(&pf->hw, err),
15116 i40e_aq_str(&pf->hw,
15117 pf->hw.aq.asq_last_status));
15118 }
15119
15120
15121
15122
15123 clear_bit(__I40E_DOWN, pf->state);
15124
15125
15126
15127
15128
15129
15130 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15131 err = i40e_setup_misc_vector(pf);
15132 if (err) {
15133 dev_info(&pdev->dev,
15134 "setup of misc vector failed: %d\n", err);
15135 goto err_vsis;
15136 }
15137 }
15138
15139#ifdef CONFIG_PCI_IOV
15140
15141 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15142 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15143 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15144
15145 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15146 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15147 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15148 i40e_flush(hw);
15149
15150 if (pci_num_vf(pdev)) {
15151 dev_info(&pdev->dev,
15152 "Active VFs found, allocating resources.\n");
15153 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15154 if (err)
15155 dev_info(&pdev->dev,
15156 "Error %d allocating resources for existing VFs\n",
15157 err);
15158 }
15159 }
15160#endif
15161
15162 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15163 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15164 pf->num_iwarp_msix,
15165 I40E_IWARP_IRQ_PILE_ID);
15166 if (pf->iwarp_base_vector < 0) {
15167 dev_info(&pdev->dev,
15168 "failed to get tracking for %d vectors for IWARP err=%d\n",
15169 pf->num_iwarp_msix, pf->iwarp_base_vector);
15170 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15171 }
15172 }
15173
15174 i40e_dbg_pf_init(pf);
15175
15176
15177 i40e_send_version(pf);
15178
15179
15180 mod_timer(&pf->service_timer,
15181 round_jiffies(jiffies + pf->service_timer_period));
15182
15183
15184 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15185 err = i40e_lan_add_device(pf);
15186 if (err)
15187 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15188 err);
15189 }
15190
15191#define PCI_SPEED_SIZE 8
15192#define PCI_WIDTH_SIZE 8
15193
15194
15195
15196
15197 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15198 char speed[PCI_SPEED_SIZE] = "Unknown";
15199 char width[PCI_WIDTH_SIZE] = "Unknown";
15200
15201
15202
15203
15204 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15205 &link_status);
15206
15207 i40e_set_pci_config_data(hw, link_status);
15208
15209 switch (hw->bus.speed) {
15210 case i40e_bus_speed_8000:
15211 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15212 case i40e_bus_speed_5000:
15213 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15214 case i40e_bus_speed_2500:
15215 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15216 default:
15217 break;
15218 }
15219 switch (hw->bus.width) {
15220 case i40e_bus_width_pcie_x8:
15221 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15222 case i40e_bus_width_pcie_x4:
15223 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15224 case i40e_bus_width_pcie_x2:
15225 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15226 case i40e_bus_width_pcie_x1:
15227 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15228 default:
15229 break;
15230 }
15231
15232 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15233 speed, width);
15234
15235 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15236 hw->bus.speed < i40e_bus_speed_8000) {
15237 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15238 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15239 }
15240 }
15241
15242
15243 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15244 if (err)
15245 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15246 i40e_stat_str(&pf->hw, err),
15247 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15248 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15249
15250
15251 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15252
15253
15254 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15255 if (err)
15256 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15257 i40e_stat_str(&pf->hw, err),
15258 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15259
15260
15261
15262
15263
15264
15265
15266 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15267 pf->main_vsi_seid);
15268
15269 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15270 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15271 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15272 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15273 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15274
15275 i40e_print_features(pf);
15276
15277 return 0;
15278
15279
15280err_vsis:
15281 set_bit(__I40E_DOWN, pf->state);
15282 i40e_clear_interrupt_scheme(pf);
15283 kfree(pf->vsi);
15284err_switch_setup:
15285 i40e_reset_interrupt_capability(pf);
15286 del_timer_sync(&pf->service_timer);
15287err_mac_addr:
15288err_configure_lan_hmc:
15289 (void)i40e_shutdown_lan_hmc(hw);
15290err_init_lan_hmc:
15291 kfree(pf->qp_pile);
15292err_sw_init:
15293err_adminq_setup:
15294err_pf_reset:
15295 iounmap(hw->hw_addr);
15296err_ioremap:
15297 kfree(pf);
15298err_pf_alloc:
15299 pci_disable_pcie_error_reporting(pdev);
15300 pci_release_mem_regions(pdev);
15301err_pci_reg:
15302err_dma:
15303 pci_disable_device(pdev);
15304 return err;
15305}
15306
15307
15308
15309
15310
15311
15312
15313
15314
15315
15316static void i40e_remove(struct pci_dev *pdev)
15317{
15318 struct i40e_pf *pf = pci_get_drvdata(pdev);
15319 struct i40e_hw *hw = &pf->hw;
15320 i40e_status ret_code;
15321 int i;
15322
15323 i40e_dbg_pf_exit(pf);
15324
15325 i40e_ptp_stop(pf);
15326
15327
15328 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15329 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15330
15331
15332 set_bit(__I40E_SUSPENDED, pf->state);
15333 set_bit(__I40E_DOWN, pf->state);
15334 if (pf->service_timer.function)
15335 del_timer_sync(&pf->service_timer);
15336 if (pf->service_task.func)
15337 cancel_work_sync(&pf->service_task);
15338
15339 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15340 struct i40e_vsi *vsi = pf->vsi[0];
15341
15342
15343
15344
15345
15346 unregister_netdev(vsi->netdev);
15347 free_netdev(vsi->netdev);
15348
15349 goto unmap;
15350 }
15351
15352
15353
15354
15355 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15356
15357 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15358 i40e_free_vfs(pf);
15359 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15360 }
15361
15362 i40e_fdir_teardown(pf);
15363
15364
15365
15366
15367 for (i = 0; i < I40E_MAX_VEB; i++) {
15368 if (!pf->veb[i])
15369 continue;
15370
15371 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15372 pf->veb[i]->uplink_seid == 0)
15373 i40e_switch_branch_release(pf->veb[i]);
15374 }
15375
15376
15377
15378
15379 if (pf->vsi[pf->lan_vsi])
15380 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15381
15382 i40e_cloud_filter_exit(pf);
15383
15384
15385 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15386 ret_code = i40e_lan_del_device(pf);
15387 if (ret_code)
15388 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15389 ret_code);
15390 }
15391
15392
15393 if (hw->hmc.hmc_obj) {
15394 ret_code = i40e_shutdown_lan_hmc(hw);
15395 if (ret_code)
15396 dev_warn(&pdev->dev,
15397 "Failed to destroy the HMC resources: %d\n",
15398 ret_code);
15399 }
15400
15401unmap:
15402
15403 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15404 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15405 free_irq(pf->pdev->irq, pf);
15406
15407
15408 i40e_shutdown_adminq(hw);
15409
15410
15411 mutex_destroy(&hw->aq.arq_mutex);
15412 mutex_destroy(&hw->aq.asq_mutex);
15413
15414
15415 rtnl_lock();
15416 i40e_clear_interrupt_scheme(pf);
15417 for (i = 0; i < pf->num_alloc_vsi; i++) {
15418 if (pf->vsi[i]) {
15419 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15420 i40e_vsi_clear_rings(pf->vsi[i]);
15421 i40e_vsi_clear(pf->vsi[i]);
15422 pf->vsi[i] = NULL;
15423 }
15424 }
15425 rtnl_unlock();
15426
15427 for (i = 0; i < I40E_MAX_VEB; i++) {
15428 kfree(pf->veb[i]);
15429 pf->veb[i] = NULL;
15430 }
15431
15432 kfree(pf->qp_pile);
15433 kfree(pf->vsi);
15434
15435 iounmap(hw->hw_addr);
15436 kfree(pf);
15437 pci_release_mem_regions(pdev);
15438
15439 pci_disable_pcie_error_reporting(pdev);
15440 pci_disable_device(pdev);
15441}
15442
15443
15444
15445
15446
15447
15448
15449
15450
15451
15452static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15453 enum pci_channel_state error)
15454{
15455 struct i40e_pf *pf = pci_get_drvdata(pdev);
15456
15457 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15458
15459 if (!pf) {
15460 dev_info(&pdev->dev,
15461 "Cannot recover - error happened during device probe\n");
15462 return PCI_ERS_RESULT_DISCONNECT;
15463 }
15464
15465
15466 if (!test_bit(__I40E_SUSPENDED, pf->state))
15467 i40e_prep_for_reset(pf, false);
15468
15469
15470 return PCI_ERS_RESULT_NEED_RESET;
15471}
15472
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15483{
15484 struct i40e_pf *pf = pci_get_drvdata(pdev);
15485 pci_ers_result_t result;
15486 u32 reg;
15487
15488 dev_dbg(&pdev->dev, "%s\n", __func__);
15489 if (pci_enable_device_mem(pdev)) {
15490 dev_info(&pdev->dev,
15491 "Cannot re-enable PCI device after reset.\n");
15492 result = PCI_ERS_RESULT_DISCONNECT;
15493 } else {
15494 pci_set_master(pdev);
15495 pci_restore_state(pdev);
15496 pci_save_state(pdev);
15497 pci_wake_from_d3(pdev, false);
15498
15499 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15500 if (reg == 0)
15501 result = PCI_ERS_RESULT_RECOVERED;
15502 else
15503 result = PCI_ERS_RESULT_DISCONNECT;
15504 }
15505
15506 return result;
15507}
15508
15509
15510
15511
15512
15513static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
15514{
15515 struct i40e_pf *pf = pci_get_drvdata(pdev);
15516
15517 i40e_prep_for_reset(pf, false);
15518}
15519
15520
15521
15522
15523
15524static void i40e_pci_error_reset_done(struct pci_dev *pdev)
15525{
15526 struct i40e_pf *pf = pci_get_drvdata(pdev);
15527
15528 i40e_reset_and_rebuild(pf, false, false);
15529}
15530
15531
15532
15533
15534
15535
15536
15537
15538static void i40e_pci_error_resume(struct pci_dev *pdev)
15539{
15540 struct i40e_pf *pf = pci_get_drvdata(pdev);
15541
15542 dev_dbg(&pdev->dev, "%s\n", __func__);
15543 if (test_bit(__I40E_SUSPENDED, pf->state))
15544 return;
15545
15546 i40e_handle_reset_warning(pf, false);
15547}
15548
15549
15550
15551
15552
15553
15554static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
15555{
15556 struct i40e_hw *hw = &pf->hw;
15557 i40e_status ret;
15558 u8 mac_addr[6];
15559 u16 flags = 0;
15560
15561
15562 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
15563 ether_addr_copy(mac_addr,
15564 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
15565 } else {
15566 dev_err(&pf->pdev->dev,
15567 "Failed to retrieve MAC address; using default\n");
15568 ether_addr_copy(mac_addr, hw->mac.addr);
15569 }
15570
15571
15572
15573
15574
15575 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
15576
15577 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
15578 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
15579
15580 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15581 if (ret) {
15582 dev_err(&pf->pdev->dev,
15583 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
15584 return;
15585 }
15586
15587 flags = I40E_AQC_MC_MAG_EN
15588 | I40E_AQC_WOL_PRESERVE_ON_PFR
15589 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
15590 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15591 if (ret)
15592 dev_err(&pf->pdev->dev,
15593 "Failed to enable Multicast Magic Packet wake up\n");
15594}
15595
15596
15597
15598
15599
15600static void i40e_shutdown(struct pci_dev *pdev)
15601{
15602 struct i40e_pf *pf = pci_get_drvdata(pdev);
15603 struct i40e_hw *hw = &pf->hw;
15604
15605 set_bit(__I40E_SUSPENDED, pf->state);
15606 set_bit(__I40E_DOWN, pf->state);
15607
15608 del_timer_sync(&pf->service_timer);
15609 cancel_work_sync(&pf->service_task);
15610 i40e_cloud_filter_exit(pf);
15611 i40e_fdir_teardown(pf);
15612
15613
15614
15615
15616 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15617
15618 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15619 i40e_enable_mc_magic_wake(pf);
15620
15621 i40e_prep_for_reset(pf, false);
15622
15623 wr32(hw, I40E_PFPM_APM,
15624 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15625 wr32(hw, I40E_PFPM_WUFC,
15626 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15627
15628
15629 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15630 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15631 free_irq(pf->pdev->irq, pf);
15632
15633
15634
15635
15636
15637 rtnl_lock();
15638 i40e_clear_interrupt_scheme(pf);
15639 rtnl_unlock();
15640
15641 if (system_state == SYSTEM_POWER_OFF) {
15642 pci_wake_from_d3(pdev, pf->wol_en);
15643 pci_set_power_state(pdev, PCI_D3hot);
15644 }
15645}
15646
15647
15648
15649
15650
15651static int __maybe_unused i40e_suspend(struct device *dev)
15652{
15653 struct i40e_pf *pf = dev_get_drvdata(dev);
15654 struct i40e_hw *hw = &pf->hw;
15655
15656
15657 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
15658 return 0;
15659
15660 set_bit(__I40E_DOWN, pf->state);
15661
15662
15663 del_timer_sync(&pf->service_timer);
15664 cancel_work_sync(&pf->service_task);
15665
15666
15667
15668
15669 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15670
15671 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15672 i40e_enable_mc_magic_wake(pf);
15673
15674
15675
15676
15677
15678 rtnl_lock();
15679
15680 i40e_prep_for_reset(pf, true);
15681
15682 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15683 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15684
15685
15686
15687
15688
15689
15690 i40e_clear_interrupt_scheme(pf);
15691
15692 rtnl_unlock();
15693
15694 return 0;
15695}
15696
15697
15698
15699
15700
15701static int __maybe_unused i40e_resume(struct device *dev)
15702{
15703 struct i40e_pf *pf = dev_get_drvdata(dev);
15704 int err;
15705
15706
15707 if (!test_bit(__I40E_SUSPENDED, pf->state))
15708 return 0;
15709
15710
15711
15712
15713 rtnl_lock();
15714
15715
15716
15717
15718 err = i40e_restore_interrupt_scheme(pf);
15719 if (err) {
15720 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
15721 err);
15722 }
15723
15724 clear_bit(__I40E_DOWN, pf->state);
15725 i40e_reset_and_rebuild(pf, false, true);
15726
15727 rtnl_unlock();
15728
15729
15730 clear_bit(__I40E_SUSPENDED, pf->state);
15731
15732
15733 mod_timer(&pf->service_timer,
15734 round_jiffies(jiffies + pf->service_timer_period));
15735
15736 return 0;
15737}
15738
15739static const struct pci_error_handlers i40e_err_handler = {
15740 .error_detected = i40e_pci_error_detected,
15741 .slot_reset = i40e_pci_error_slot_reset,
15742 .reset_prepare = i40e_pci_error_reset_prepare,
15743 .reset_done = i40e_pci_error_reset_done,
15744 .resume = i40e_pci_error_resume,
15745};
15746
15747static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
15748
15749static struct pci_driver i40e_driver = {
15750 .name = i40e_driver_name,
15751 .id_table = i40e_pci_tbl,
15752 .probe = i40e_probe,
15753 .remove = i40e_remove,
15754 .driver = {
15755 .pm = &i40e_pm_ops,
15756 },
15757 .shutdown = i40e_shutdown,
15758 .err_handler = &i40e_err_handler,
15759 .sriov_configure = i40e_pci_sriov_configure,
15760};
15761
15762
15763
15764
15765
15766
15767
15768static int __init i40e_init_module(void)
15769{
15770 pr_info("%s: %s - version %s\n", i40e_driver_name,
15771 i40e_driver_string, i40e_driver_version_str);
15772 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
15773
15774
15775
15776
15777
15778
15779
15780
15781 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
15782 if (!i40e_wq) {
15783 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
15784 return -ENOMEM;
15785 }
15786
15787 i40e_dbg_init();
15788 return pci_register_driver(&i40e_driver);
15789}
15790module_init(i40e_init_module);
15791
15792
15793
15794
15795
15796
15797
15798static void __exit i40e_exit_module(void)
15799{
15800 pci_unregister_driver(&i40e_driver);
15801 destroy_workqueue(i40e_wq);
15802 i40e_dbg_exit();
15803}
15804module_exit(i40e_exit_module);
15805