1
2
3
4#include <linux/etherdevice.h>
5#include <linux/of_net.h>
6#include <linux/pci.h>
7#include <linux/bpf.h>
8
9
10#include "i40e.h"
11#include "i40e_diag.h"
12#include "i40e_xsk.h"
13#include <net/udp_tunnel.h>
14#include <net/xdp_sock.h>
15
16
17
18
19#define CREATE_TRACE_POINTS
20#include "i40e_trace.h"
21
22const char i40e_driver_name[] = "i40e";
23static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
25
26#define DRV_KERN "-k"
27
28#define DRV_VERSION_MAJOR 2
29#define DRV_VERSION_MINOR 8
30#define DRV_VERSION_BUILD 10
31#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34const char i40e_driver_version_str[] = DRV_VERSION;
35static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
36
37
38static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40static int i40e_add_vsi(struct i40e_vsi *vsi);
41static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43static int i40e_setup_misc_vector(struct i40e_pf *pf);
44static void i40e_determine_queue_usage(struct i40e_pf *pf);
45static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47static int i40e_reset(struct i40e_pf *pf);
48static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49static void i40e_fdir_sb_setup(struct i40e_pf *pf);
50static int i40e_veb_get_bw_info(struct i40e_veb *veb);
51static int i40e_get_capabilities(struct i40e_pf *pf,
52 enum i40e_admin_queue_opc list_type);
53
54
55
56
57
58
59
60
61
62static const struct pci_device_id i40e_pci_tbl[] = {
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
82
83 {0, }
84};
85MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
86
87#define I40E_MAX_VF_COUNT 128
88static int debug = -1;
89module_param(debug, uint, 0);
90MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
91
92MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94MODULE_LICENSE("GPL v2");
95MODULE_VERSION(DRV_VERSION);
96
97static struct workqueue_struct *i40e_wq;
98
99
100
101
102
103
104
105
106int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
107 u64 size, u32 alignment)
108{
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
110
111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
113 &mem->pa, GFP_KERNEL);
114 if (!mem->va)
115 return -ENOMEM;
116
117 return 0;
118}
119
120
121
122
123
124
125int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
126{
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128
129 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
130 mem->va = NULL;
131 mem->pa = 0;
132 mem->size = 0;
133
134 return 0;
135}
136
137
138
139
140
141
142
143int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
144 u32 size)
145{
146 mem->size = size;
147 mem->va = kzalloc(size, GFP_KERNEL);
148
149 if (!mem->va)
150 return -ENOMEM;
151
152 return 0;
153}
154
155
156
157
158
159
160int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
161{
162
163 kfree(mem->va);
164 mem->va = NULL;
165 mem->size = 0;
166
167 return 0;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
184 u16 needed, u16 id)
185{
186 int ret = -ENOMEM;
187 int i, j;
188
189 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
190 dev_info(&pf->pdev->dev,
191 "param err: pile=%s needed=%d id=0x%04x\n",
192 pile ? "<valid>" : "<null>", needed, id);
193 return -EINVAL;
194 }
195
196
197 i = pile->search_hint;
198 while (i < pile->num_entries) {
199
200 if (pile->list[i] & I40E_PILE_VALID_BIT) {
201 i++;
202 continue;
203 }
204
205
206 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
207 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
208 break;
209 }
210
211 if (j == needed) {
212
213 for (j = 0; j < needed; j++)
214 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
215 ret = i;
216 pile->search_hint = i + j;
217 break;
218 }
219
220
221 i += j;
222 }
223
224 return ret;
225}
226
227
228
229
230
231
232
233
234
235static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
236{
237 int valid_id = (id | I40E_PILE_VALID_BIT);
238 int count = 0;
239 int i;
240
241 if (!pile || index >= pile->num_entries)
242 return -EINVAL;
243
244 for (i = index;
245 i < pile->num_entries && pile->list[i] == valid_id;
246 i++) {
247 pile->list[i] = 0;
248 count++;
249 }
250
251 if (count && index < pile->search_hint)
252 pile->search_hint = index;
253
254 return count;
255}
256
257
258
259
260
261
262struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
263{
264 int i;
265
266 for (i = 0; i < pf->num_alloc_vsi; i++)
267 if (pf->vsi[i] && (pf->vsi[i]->id == id))
268 return pf->vsi[i];
269
270 return NULL;
271}
272
273
274
275
276
277
278
279void i40e_service_event_schedule(struct i40e_pf *pf)
280{
281 if (!test_bit(__I40E_DOWN, pf->state) &&
282 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
283 queue_work(i40e_wq, &pf->service_task);
284}
285
286
287
288
289
290
291
292
293
294static void i40e_tx_timeout(struct net_device *netdev)
295{
296 struct i40e_netdev_priv *np = netdev_priv(netdev);
297 struct i40e_vsi *vsi = np->vsi;
298 struct i40e_pf *pf = vsi->back;
299 struct i40e_ring *tx_ring = NULL;
300 unsigned int i, hung_queue = 0;
301 u32 head, val;
302
303 pf->tx_timeout_count++;
304
305
306 for (i = 0; i < netdev->num_tx_queues; i++) {
307 struct netdev_queue *q;
308 unsigned long trans_start;
309
310 q = netdev_get_tx_queue(netdev, i);
311 trans_start = q->trans_start;
312 if (netif_xmit_stopped(q) &&
313 time_after(jiffies,
314 (trans_start + netdev->watchdog_timeo))) {
315 hung_queue = i;
316 break;
317 }
318 }
319
320 if (i == netdev->num_tx_queues) {
321 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
322 } else {
323
324 for (i = 0; i < vsi->num_queue_pairs; i++) {
325 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
326 if (hung_queue ==
327 vsi->tx_rings[i]->queue_index) {
328 tx_ring = vsi->tx_rings[i];
329 break;
330 }
331 }
332 }
333 }
334
335 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
336 pf->tx_timeout_recovery_level = 1;
337 else if (time_before(jiffies,
338 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
339 return;
340
341
342 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
343 return;
344
345 if (tx_ring) {
346 head = i40e_get_head(tx_ring);
347
348 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
349 val = rd32(&pf->hw,
350 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
351 tx_ring->vsi->base_vector - 1));
352 else
353 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
354
355 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
356 vsi->seid, hung_queue, tx_ring->next_to_clean,
357 head, tx_ring->next_to_use,
358 readl(tx_ring->tail), val);
359 }
360
361 pf->tx_timeout_last_recovery = jiffies;
362 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
363 pf->tx_timeout_recovery_level, hung_queue);
364
365 switch (pf->tx_timeout_recovery_level) {
366 case 1:
367 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
368 break;
369 case 2:
370 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
371 break;
372 case 3:
373 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
374 break;
375 default:
376 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
377 break;
378 }
379
380 i40e_service_event_schedule(pf);
381 pf->tx_timeout_recovery_level++;
382}
383
384
385
386
387
388
389
390
391struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
392{
393 return &vsi->net_stats;
394}
395
396
397
398
399
400
401static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
402 struct rtnl_link_stats64 *stats)
403{
404 u64 bytes, packets;
405 unsigned int start;
406
407 do {
408 start = u64_stats_fetch_begin_irq(&ring->syncp);
409 packets = ring->stats.packets;
410 bytes = ring->stats.bytes;
411 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
412
413 stats->tx_packets += packets;
414 stats->tx_bytes += bytes;
415}
416
417
418
419
420
421
422
423
424
425static void i40e_get_netdev_stats_struct(struct net_device *netdev,
426 struct rtnl_link_stats64 *stats)
427{
428 struct i40e_netdev_priv *np = netdev_priv(netdev);
429 struct i40e_vsi *vsi = np->vsi;
430 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
431 struct i40e_ring *ring;
432 int i;
433
434 if (test_bit(__I40E_VSI_DOWN, vsi->state))
435 return;
436
437 if (!vsi->tx_rings)
438 return;
439
440 rcu_read_lock();
441 for (i = 0; i < vsi->num_queue_pairs; i++) {
442 u64 bytes, packets;
443 unsigned int start;
444
445 ring = READ_ONCE(vsi->tx_rings[i]);
446 if (!ring)
447 continue;
448 i40e_get_netdev_stats_struct_tx(ring, stats);
449
450 if (i40e_enabled_xdp_vsi(vsi)) {
451 ring++;
452 i40e_get_netdev_stats_struct_tx(ring, stats);
453 }
454
455 ring++;
456 do {
457 start = u64_stats_fetch_begin_irq(&ring->syncp);
458 packets = ring->stats.packets;
459 bytes = ring->stats.bytes;
460 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
461
462 stats->rx_packets += packets;
463 stats->rx_bytes += bytes;
464
465 }
466 rcu_read_unlock();
467
468
469 stats->multicast = vsi_stats->multicast;
470 stats->tx_errors = vsi_stats->tx_errors;
471 stats->tx_dropped = vsi_stats->tx_dropped;
472 stats->rx_errors = vsi_stats->rx_errors;
473 stats->rx_dropped = vsi_stats->rx_dropped;
474 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
475 stats->rx_length_errors = vsi_stats->rx_length_errors;
476}
477
478
479
480
481
482void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
483{
484 struct rtnl_link_stats64 *ns;
485 int i;
486
487 if (!vsi)
488 return;
489
490 ns = i40e_get_vsi_stats_struct(vsi);
491 memset(ns, 0, sizeof(*ns));
492 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
493 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
494 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
495 if (vsi->rx_rings && vsi->rx_rings[0]) {
496 for (i = 0; i < vsi->num_queue_pairs; i++) {
497 memset(&vsi->rx_rings[i]->stats, 0,
498 sizeof(vsi->rx_rings[i]->stats));
499 memset(&vsi->rx_rings[i]->rx_stats, 0,
500 sizeof(vsi->rx_rings[i]->rx_stats));
501 memset(&vsi->tx_rings[i]->stats, 0,
502 sizeof(vsi->tx_rings[i]->stats));
503 memset(&vsi->tx_rings[i]->tx_stats, 0,
504 sizeof(vsi->tx_rings[i]->tx_stats));
505 }
506 }
507 vsi->stat_offsets_loaded = false;
508}
509
510
511
512
513
514void i40e_pf_reset_stats(struct i40e_pf *pf)
515{
516 int i;
517
518 memset(&pf->stats, 0, sizeof(pf->stats));
519 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
520 pf->stat_offsets_loaded = false;
521
522 for (i = 0; i < I40E_MAX_VEB; i++) {
523 if (pf->veb[i]) {
524 memset(&pf->veb[i]->stats, 0,
525 sizeof(pf->veb[i]->stats));
526 memset(&pf->veb[i]->stats_offsets, 0,
527 sizeof(pf->veb[i]->stats_offsets));
528 pf->veb[i]->stat_offsets_loaded = false;
529 }
530 }
531 pf->hw_csum_rx_error = 0;
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
550 bool offset_loaded, u64 *offset, u64 *stat)
551{
552 u64 new_data;
553
554 if (hw->device_id == I40E_DEV_ID_QEMU) {
555 new_data = rd32(hw, loreg);
556 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
557 } else {
558 new_data = rd64(hw, loreg);
559 }
560 if (!offset_loaded)
561 *offset = new_data;
562 if (likely(new_data >= *offset))
563 *stat = new_data - *offset;
564 else
565 *stat = (new_data + BIT_ULL(48)) - *offset;
566 *stat &= 0xFFFFFFFFFFFFULL;
567}
568
569
570
571
572
573
574
575
576
577static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
578 bool offset_loaded, u64 *offset, u64 *stat)
579{
580 u32 new_data;
581
582 new_data = rd32(hw, reg);
583 if (!offset_loaded)
584 *offset = new_data;
585 if (likely(new_data >= *offset))
586 *stat = (u32)(new_data - *offset);
587 else
588 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
589}
590
591
592
593
594
595
596
597static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
598{
599 u32 new_data = rd32(hw, reg);
600
601 wr32(hw, reg, 1);
602 *stat += new_data;
603}
604
605
606
607
608
609void i40e_update_eth_stats(struct i40e_vsi *vsi)
610{
611 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
612 struct i40e_pf *pf = vsi->back;
613 struct i40e_hw *hw = &pf->hw;
614 struct i40e_eth_stats *oes;
615 struct i40e_eth_stats *es;
616
617 es = &vsi->eth_stats;
618 oes = &vsi->eth_stats_offsets;
619
620
621 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
622 vsi->stat_offsets_loaded,
623 &oes->tx_errors, &es->tx_errors);
624 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
625 vsi->stat_offsets_loaded,
626 &oes->rx_discards, &es->rx_discards);
627 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
628 vsi->stat_offsets_loaded,
629 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
630 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->tx_errors, &es->tx_errors);
633
634 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
635 I40E_GLV_GORCL(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->rx_bytes, &es->rx_bytes);
638 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
639 I40E_GLV_UPRCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->rx_unicast, &es->rx_unicast);
642 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
643 I40E_GLV_MPRCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->rx_multicast, &es->rx_multicast);
646 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
647 I40E_GLV_BPRCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->rx_broadcast, &es->rx_broadcast);
650
651 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
652 I40E_GLV_GOTCL(stat_idx),
653 vsi->stat_offsets_loaded,
654 &oes->tx_bytes, &es->tx_bytes);
655 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
656 I40E_GLV_UPTCL(stat_idx),
657 vsi->stat_offsets_loaded,
658 &oes->tx_unicast, &es->tx_unicast);
659 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
660 I40E_GLV_MPTCL(stat_idx),
661 vsi->stat_offsets_loaded,
662 &oes->tx_multicast, &es->tx_multicast);
663 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
664 I40E_GLV_BPTCL(stat_idx),
665 vsi->stat_offsets_loaded,
666 &oes->tx_broadcast, &es->tx_broadcast);
667 vsi->stat_offsets_loaded = true;
668}
669
670
671
672
673
674static void i40e_update_veb_stats(struct i40e_veb *veb)
675{
676 struct i40e_pf *pf = veb->pf;
677 struct i40e_hw *hw = &pf->hw;
678 struct i40e_eth_stats *oes;
679 struct i40e_eth_stats *es;
680 struct i40e_veb_tc_stats *veb_oes;
681 struct i40e_veb_tc_stats *veb_es;
682 int i, idx = 0;
683
684 idx = veb->stats_idx;
685 es = &veb->stats;
686 oes = &veb->stats_offsets;
687 veb_es = &veb->tc_stats;
688 veb_oes = &veb->tc_stats_offsets;
689
690
691 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
692 veb->stat_offsets_loaded,
693 &oes->tx_discards, &es->tx_discards);
694 if (hw->revision_id > 0)
695 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_unknown_protocol,
698 &es->rx_unknown_protocol);
699 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_bytes, &es->rx_bytes);
702 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->rx_unicast, &es->rx_unicast);
705 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->rx_multicast, &es->rx_multicast);
708 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->rx_broadcast, &es->rx_broadcast);
711
712 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->tx_bytes, &es->tx_bytes);
715 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
716 veb->stat_offsets_loaded,
717 &oes->tx_unicast, &es->tx_unicast);
718 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->tx_multicast, &es->tx_multicast);
721 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
722 veb->stat_offsets_loaded,
723 &oes->tx_broadcast, &es->tx_broadcast);
724 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
725 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
726 I40E_GLVEBTC_RPCL(i, idx),
727 veb->stat_offsets_loaded,
728 &veb_oes->tc_rx_packets[i],
729 &veb_es->tc_rx_packets[i]);
730 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
731 I40E_GLVEBTC_RBCL(i, idx),
732 veb->stat_offsets_loaded,
733 &veb_oes->tc_rx_bytes[i],
734 &veb_es->tc_rx_bytes[i]);
735 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
736 I40E_GLVEBTC_TPCL(i, idx),
737 veb->stat_offsets_loaded,
738 &veb_oes->tc_tx_packets[i],
739 &veb_es->tc_tx_packets[i]);
740 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
741 I40E_GLVEBTC_TBCL(i, idx),
742 veb->stat_offsets_loaded,
743 &veb_oes->tc_tx_bytes[i],
744 &veb_es->tc_tx_bytes[i]);
745 }
746 veb->stat_offsets_loaded = true;
747}
748
749
750
751
752
753
754
755
756
757
758
759static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
760{
761 struct i40e_pf *pf = vsi->back;
762 struct rtnl_link_stats64 *ons;
763 struct rtnl_link_stats64 *ns;
764 struct i40e_eth_stats *oes;
765 struct i40e_eth_stats *es;
766 u32 tx_restart, tx_busy;
767 struct i40e_ring *p;
768 u32 rx_page, rx_buf;
769 u64 bytes, packets;
770 unsigned int start;
771 u64 tx_linearize;
772 u64 tx_force_wb;
773 u64 rx_p, rx_b;
774 u64 tx_p, tx_b;
775 u16 q;
776
777 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
778 test_bit(__I40E_CONFIG_BUSY, pf->state))
779 return;
780
781 ns = i40e_get_vsi_stats_struct(vsi);
782 ons = &vsi->net_stats_offsets;
783 es = &vsi->eth_stats;
784 oes = &vsi->eth_stats_offsets;
785
786
787
788
789 rx_b = rx_p = 0;
790 tx_b = tx_p = 0;
791 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
792 rx_page = 0;
793 rx_buf = 0;
794 rcu_read_lock();
795 for (q = 0; q < vsi->num_queue_pairs; q++) {
796
797 p = READ_ONCE(vsi->tx_rings[q]);
798
799 do {
800 start = u64_stats_fetch_begin_irq(&p->syncp);
801 packets = p->stats.packets;
802 bytes = p->stats.bytes;
803 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
804 tx_b += bytes;
805 tx_p += packets;
806 tx_restart += p->tx_stats.restart_queue;
807 tx_busy += p->tx_stats.tx_busy;
808 tx_linearize += p->tx_stats.tx_linearize;
809 tx_force_wb += p->tx_stats.tx_force_wb;
810
811
812 p = &p[1];
813 do {
814 start = u64_stats_fetch_begin_irq(&p->syncp);
815 packets = p->stats.packets;
816 bytes = p->stats.bytes;
817 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
818 rx_b += bytes;
819 rx_p += packets;
820 rx_buf += p->rx_stats.alloc_buff_failed;
821 rx_page += p->rx_stats.alloc_page_failed;
822 }
823 rcu_read_unlock();
824 vsi->tx_restart = tx_restart;
825 vsi->tx_busy = tx_busy;
826 vsi->tx_linearize = tx_linearize;
827 vsi->tx_force_wb = tx_force_wb;
828 vsi->rx_page_failed = rx_page;
829 vsi->rx_buf_failed = rx_buf;
830
831 ns->rx_packets = rx_p;
832 ns->rx_bytes = rx_b;
833 ns->tx_packets = tx_p;
834 ns->tx_bytes = tx_b;
835
836
837 i40e_update_eth_stats(vsi);
838 ons->tx_errors = oes->tx_errors;
839 ns->tx_errors = es->tx_errors;
840 ons->multicast = oes->rx_multicast;
841 ns->multicast = es->rx_multicast;
842 ons->rx_dropped = oes->rx_discards;
843 ns->rx_dropped = es->rx_discards;
844 ons->tx_dropped = oes->tx_discards;
845 ns->tx_dropped = es->tx_discards;
846
847
848 if (vsi == pf->vsi[pf->lan_vsi]) {
849 ns->rx_crc_errors = pf->stats.crc_errors;
850 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
851 ns->rx_length_errors = pf->stats.rx_length_errors;
852 }
853}
854
855
856
857
858
859static void i40e_update_pf_stats(struct i40e_pf *pf)
860{
861 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
862 struct i40e_hw_port_stats *nsd = &pf->stats;
863 struct i40e_hw *hw = &pf->hw;
864 u32 val;
865 int i;
866
867 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
868 I40E_GLPRT_GORCL(hw->port),
869 pf->stat_offsets_loaded,
870 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
871 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
872 I40E_GLPRT_GOTCL(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
875 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
876 pf->stat_offsets_loaded,
877 &osd->eth.rx_discards,
878 &nsd->eth.rx_discards);
879 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
880 I40E_GLPRT_UPRCL(hw->port),
881 pf->stat_offsets_loaded,
882 &osd->eth.rx_unicast,
883 &nsd->eth.rx_unicast);
884 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
885 I40E_GLPRT_MPRCL(hw->port),
886 pf->stat_offsets_loaded,
887 &osd->eth.rx_multicast,
888 &nsd->eth.rx_multicast);
889 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
890 I40E_GLPRT_BPRCL(hw->port),
891 pf->stat_offsets_loaded,
892 &osd->eth.rx_broadcast,
893 &nsd->eth.rx_broadcast);
894 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
895 I40E_GLPRT_UPTCL(hw->port),
896 pf->stat_offsets_loaded,
897 &osd->eth.tx_unicast,
898 &nsd->eth.tx_unicast);
899 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
900 I40E_GLPRT_MPTCL(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->eth.tx_multicast,
903 &nsd->eth.tx_multicast);
904 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
905 I40E_GLPRT_BPTCL(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->eth.tx_broadcast,
908 &nsd->eth.tx_broadcast);
909
910 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->tx_dropped_link_down,
913 &nsd->tx_dropped_link_down);
914
915 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->crc_errors, &nsd->crc_errors);
918
919 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->illegal_bytes, &nsd->illegal_bytes);
922
923 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
924 pf->stat_offsets_loaded,
925 &osd->mac_local_faults,
926 &nsd->mac_local_faults);
927 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
928 pf->stat_offsets_loaded,
929 &osd->mac_remote_faults,
930 &nsd->mac_remote_faults);
931
932 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_length_errors,
935 &nsd->rx_length_errors);
936
937 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->link_xon_rx, &nsd->link_xon_rx);
940 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->link_xon_tx, &nsd->link_xon_tx);
943 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->link_xoff_rx, &nsd->link_xoff_rx);
946 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->link_xoff_tx, &nsd->link_xoff_tx);
949
950 for (i = 0; i < 8; i++) {
951 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
952 pf->stat_offsets_loaded,
953 &osd->priority_xoff_rx[i],
954 &nsd->priority_xoff_rx[i]);
955 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
956 pf->stat_offsets_loaded,
957 &osd->priority_xon_rx[i],
958 &nsd->priority_xon_rx[i]);
959 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
960 pf->stat_offsets_loaded,
961 &osd->priority_xon_tx[i],
962 &nsd->priority_xon_tx[i]);
963 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
964 pf->stat_offsets_loaded,
965 &osd->priority_xoff_tx[i],
966 &nsd->priority_xoff_tx[i]);
967 i40e_stat_update32(hw,
968 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
969 pf->stat_offsets_loaded,
970 &osd->priority_xon_2_xoff[i],
971 &nsd->priority_xon_2_xoff[i]);
972 }
973
974 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
975 I40E_GLPRT_PRC64L(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->rx_size_64, &nsd->rx_size_64);
978 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
979 I40E_GLPRT_PRC127L(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->rx_size_127, &nsd->rx_size_127);
982 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
983 I40E_GLPRT_PRC255L(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->rx_size_255, &nsd->rx_size_255);
986 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
987 I40E_GLPRT_PRC511L(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->rx_size_511, &nsd->rx_size_511);
990 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
991 I40E_GLPRT_PRC1023L(hw->port),
992 pf->stat_offsets_loaded,
993 &osd->rx_size_1023, &nsd->rx_size_1023);
994 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
995 I40E_GLPRT_PRC1522L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->rx_size_1522, &nsd->rx_size_1522);
998 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
999 I40E_GLPRT_PRC9522L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->rx_size_big, &nsd->rx_size_big);
1002
1003 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1004 I40E_GLPRT_PTC64L(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->tx_size_64, &nsd->tx_size_64);
1007 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1008 I40E_GLPRT_PTC127L(hw->port),
1009 pf->stat_offsets_loaded,
1010 &osd->tx_size_127, &nsd->tx_size_127);
1011 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1012 I40E_GLPRT_PTC255L(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->tx_size_255, &nsd->tx_size_255);
1015 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1016 I40E_GLPRT_PTC511L(hw->port),
1017 pf->stat_offsets_loaded,
1018 &osd->tx_size_511, &nsd->tx_size_511);
1019 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1020 I40E_GLPRT_PTC1023L(hw->port),
1021 pf->stat_offsets_loaded,
1022 &osd->tx_size_1023, &nsd->tx_size_1023);
1023 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1024 I40E_GLPRT_PTC1522L(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->tx_size_1522, &nsd->tx_size_1522);
1027 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1028 I40E_GLPRT_PTC9522L(hw->port),
1029 pf->stat_offsets_loaded,
1030 &osd->tx_size_big, &nsd->tx_size_big);
1031
1032 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->rx_undersize, &nsd->rx_undersize);
1035 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_fragments, &nsd->rx_fragments);
1038 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_oversize, &nsd->rx_oversize);
1041 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_jabber, &nsd->rx_jabber);
1044
1045
1046 i40e_stat_update_and_clear32(hw,
1047 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1048 &nsd->fd_atr_match);
1049 i40e_stat_update_and_clear32(hw,
1050 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1051 &nsd->fd_sb_match);
1052 i40e_stat_update_and_clear32(hw,
1053 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1054 &nsd->fd_atr_tunnel_match);
1055
1056 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1057 nsd->tx_lpi_status =
1058 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1059 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1060 nsd->rx_lpi_status =
1061 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1062 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1063 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1064 pf->stat_offsets_loaded,
1065 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1066 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1067 pf->stat_offsets_loaded,
1068 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1069
1070 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1071 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1072 nsd->fd_sb_status = true;
1073 else
1074 nsd->fd_sb_status = false;
1075
1076 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1077 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1078 nsd->fd_atr_status = true;
1079 else
1080 nsd->fd_atr_status = false;
1081
1082 pf->stat_offsets_loaded = true;
1083}
1084
1085
1086
1087
1088
1089
1090
1091void i40e_update_stats(struct i40e_vsi *vsi)
1092{
1093 struct i40e_pf *pf = vsi->back;
1094
1095 if (vsi == pf->vsi[pf->lan_vsi])
1096 i40e_update_pf_stats(pf);
1097
1098 i40e_update_vsi_stats(vsi);
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1110 const u8 *macaddr, s16 vlan)
1111{
1112 struct i40e_mac_filter *f;
1113 u64 key;
1114
1115 if (!vsi || !macaddr)
1116 return NULL;
1117
1118 key = i40e_addr_to_hkey(macaddr);
1119 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1120 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1121 (vlan == f->vlan))
1122 return f;
1123 }
1124 return NULL;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1136{
1137 struct i40e_mac_filter *f;
1138 u64 key;
1139
1140 if (!vsi || !macaddr)
1141 return NULL;
1142
1143 key = i40e_addr_to_hkey(macaddr);
1144 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1145 if ((ether_addr_equal(macaddr, f->macaddr)))
1146 return f;
1147 }
1148 return NULL;
1149}
1150
1151
1152
1153
1154
1155
1156
1157bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1158{
1159
1160 if (vsi->info.pvid)
1161 return true;
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 return vsi->has_vlan_filter;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1216 struct hlist_head *tmp_add_list,
1217 struct hlist_head *tmp_del_list,
1218 int vlan_filters)
1219{
1220 s16 pvid = le16_to_cpu(vsi->info.pvid);
1221 struct i40e_mac_filter *f, *add_head;
1222 struct i40e_new_mac_filter *new;
1223 struct hlist_node *h;
1224 int bkt, new_vlan;
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 hlist_for_each_entry(new, tmp_add_list, hlist) {
1242 if (pvid && new->f->vlan != pvid)
1243 new->f->vlan = pvid;
1244 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1245 new->f->vlan = 0;
1246 else if (!vlan_filters && new->f->vlan == 0)
1247 new->f->vlan = I40E_VLAN_ANY;
1248 }
1249
1250
1251 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1252
1253
1254
1255
1256
1257 if ((pvid && f->vlan != pvid) ||
1258 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1259 (!vlan_filters && f->vlan == 0)) {
1260
1261 if (pvid)
1262 new_vlan = pvid;
1263 else if (vlan_filters)
1264 new_vlan = 0;
1265 else
1266 new_vlan = I40E_VLAN_ANY;
1267
1268
1269 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1270 if (!add_head)
1271 return -ENOMEM;
1272
1273
1274 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1275 if (!new)
1276 return -ENOMEM;
1277
1278 new->f = add_head;
1279 new->state = add_head->state;
1280
1281
1282 hlist_add_head(&new->hlist, tmp_add_list);
1283
1284
1285 f->state = I40E_FILTER_REMOVE;
1286 hash_del(&f->hlist);
1287 hlist_add_head(&f->hlist, tmp_del_list);
1288 }
1289 }
1290
1291 vsi->has_vlan_filter = !!vlan_filters;
1292
1293 return 0;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1305{
1306 struct i40e_aqc_remove_macvlan_element_data element;
1307 struct i40e_pf *pf = vsi->back;
1308
1309
1310 if (vsi->type != I40E_VSI_MAIN)
1311 return;
1312
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316
1317 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1318 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1319
1320 memset(&element, 0, sizeof(element));
1321 ether_addr_copy(element.mac_addr, macaddr);
1322 element.vlan_tag = 0;
1323
1324 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1325 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1326 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1341 const u8 *macaddr, s16 vlan)
1342{
1343 struct i40e_mac_filter *f;
1344 u64 key;
1345
1346 if (!vsi || !macaddr)
1347 return NULL;
1348
1349 f = i40e_find_filter(vsi, macaddr, vlan);
1350 if (!f) {
1351 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1352 if (!f)
1353 return NULL;
1354
1355
1356
1357
1358 if (vlan >= 0)
1359 vsi->has_vlan_filter = true;
1360
1361 ether_addr_copy(f->macaddr, macaddr);
1362 f->vlan = vlan;
1363 f->state = I40E_FILTER_NEW;
1364 INIT_HLIST_NODE(&f->hlist);
1365
1366 key = i40e_addr_to_hkey(macaddr);
1367 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1368
1369 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1370 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (f->state == I40E_FILTER_REMOVE)
1382 f->state = I40E_FILTER_ACTIVE;
1383
1384 return f;
1385}
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1403{
1404 if (!f)
1405 return;
1406
1407
1408
1409
1410
1411 if ((f->state == I40E_FILTER_FAILED) ||
1412 (f->state == I40E_FILTER_NEW)) {
1413 hash_del(&f->hlist);
1414 kfree(f);
1415 } else {
1416 f->state = I40E_FILTER_REMOVE;
1417 }
1418
1419 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1420 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1436{
1437 struct i40e_mac_filter *f;
1438
1439 if (!vsi || !macaddr)
1440 return;
1441
1442 f = i40e_find_filter(vsi, macaddr, vlan);
1443 __i40e_del_filter(vsi, f);
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1459 const u8 *macaddr)
1460{
1461 struct i40e_mac_filter *f, *add = NULL;
1462 struct hlist_node *h;
1463 int bkt;
1464
1465 if (vsi->info.pvid)
1466 return i40e_add_filter(vsi, macaddr,
1467 le16_to_cpu(vsi->info.pvid));
1468
1469 if (!i40e_is_vsi_in_vlan(vsi))
1470 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1471
1472 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1473 if (f->state == I40E_FILTER_REMOVE)
1474 continue;
1475 add = i40e_add_filter(vsi, macaddr, f->vlan);
1476 if (!add)
1477 return NULL;
1478 }
1479
1480 return add;
1481}
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1494{
1495 struct i40e_mac_filter *f;
1496 struct hlist_node *h;
1497 bool found = false;
1498 int bkt;
1499
1500 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1501 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1502 if (ether_addr_equal(macaddr, f->macaddr)) {
1503 __i40e_del_filter(vsi, f);
1504 found = true;
1505 }
1506 }
1507
1508 if (found)
1509 return 0;
1510 else
1511 return -ENOENT;
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521static int i40e_set_mac(struct net_device *netdev, void *p)
1522{
1523 struct i40e_netdev_priv *np = netdev_priv(netdev);
1524 struct i40e_vsi *vsi = np->vsi;
1525 struct i40e_pf *pf = vsi->back;
1526 struct i40e_hw *hw = &pf->hw;
1527 struct sockaddr *addr = p;
1528
1529 if (!is_valid_ether_addr(addr->sa_data))
1530 return -EADDRNOTAVAIL;
1531
1532 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1533 netdev_info(netdev, "already using mac address %pM\n",
1534 addr->sa_data);
1535 return 0;
1536 }
1537
1538 if (test_bit(__I40E_DOWN, pf->state) ||
1539 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1540 return -EADDRNOTAVAIL;
1541
1542 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1543 netdev_info(netdev, "returning to hw mac address %pM\n",
1544 hw->mac.addr);
1545 else
1546 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1547
1548
1549
1550
1551
1552
1553
1554 spin_lock_bh(&vsi->mac_filter_hash_lock);
1555 i40e_del_mac_filter(vsi, netdev->dev_addr);
1556 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1557 i40e_add_mac_filter(vsi, netdev->dev_addr);
1558 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1559
1560 if (vsi->type == I40E_VSI_MAIN) {
1561 i40e_status ret;
1562
1563 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1564 addr->sa_data, NULL);
1565 if (ret)
1566 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1567 i40e_stat_str(hw, ret),
1568 i40e_aq_str(hw, hw->aq.asq_last_status));
1569 }
1570
1571
1572
1573
1574 i40e_service_event_schedule(pf);
1575 return 0;
1576}
1577
1578
1579
1580
1581
1582
1583static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1584 u8 *lut, u16 lut_size)
1585{
1586 struct i40e_pf *pf = vsi->back;
1587 struct i40e_hw *hw = &pf->hw;
1588 int ret = 0;
1589
1590 if (seed) {
1591 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1592 (struct i40e_aqc_get_set_rss_key_data *)seed;
1593 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1594 if (ret) {
1595 dev_info(&pf->pdev->dev,
1596 "Cannot set RSS key, err %s aq_err %s\n",
1597 i40e_stat_str(hw, ret),
1598 i40e_aq_str(hw, hw->aq.asq_last_status));
1599 return ret;
1600 }
1601 }
1602 if (lut) {
1603 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1604
1605 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1606 if (ret) {
1607 dev_info(&pf->pdev->dev,
1608 "Cannot set RSS lut, err %s aq_err %s\n",
1609 i40e_stat_str(hw, ret),
1610 i40e_aq_str(hw, hw->aq.asq_last_status));
1611 return ret;
1612 }
1613 }
1614 return ret;
1615}
1616
1617
1618
1619
1620
1621static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1622{
1623 struct i40e_pf *pf = vsi->back;
1624 u8 seed[I40E_HKEY_ARRAY_SIZE];
1625 u8 *lut;
1626 int ret;
1627
1628 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1629 return 0;
1630 if (!vsi->rss_size)
1631 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1632 vsi->num_queue_pairs);
1633 if (!vsi->rss_size)
1634 return -EINVAL;
1635 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1636 if (!lut)
1637 return -ENOMEM;
1638
1639
1640
1641
1642 if (vsi->rss_lut_user)
1643 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1644 else
1645 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1646 if (vsi->rss_hkey_user)
1647 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1648 else
1649 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1650 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1651 kfree(lut);
1652 return ret;
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1664 struct i40e_vsi_context *ctxt,
1665 u8 enabled_tc)
1666{
1667 u16 qcount = 0, max_qcount, qmap, sections = 0;
1668 int i, override_q, pow, num_qps, ret;
1669 u8 netdev_tc = 0, offset = 0;
1670
1671 if (vsi->type != I40E_VSI_MAIN)
1672 return -EINVAL;
1673 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1674 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1675 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1676 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1677 num_qps = vsi->mqprio_qopt.qopt.count[0];
1678
1679
1680 pow = ilog2(num_qps);
1681 if (!is_power_of_2(num_qps))
1682 pow++;
1683 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1684 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1685
1686
1687 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1688 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1689
1690 if (vsi->tc_config.enabled_tc & BIT(i)) {
1691 offset = vsi->mqprio_qopt.qopt.offset[i];
1692 qcount = vsi->mqprio_qopt.qopt.count[i];
1693 if (qcount > max_qcount)
1694 max_qcount = qcount;
1695 vsi->tc_config.tc_info[i].qoffset = offset;
1696 vsi->tc_config.tc_info[i].qcount = qcount;
1697 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1698 } else {
1699
1700
1701
1702
1703 vsi->tc_config.tc_info[i].qoffset = 0;
1704 vsi->tc_config.tc_info[i].qcount = 1;
1705 vsi->tc_config.tc_info[i].netdev_tc = 0;
1706 }
1707 }
1708
1709
1710 vsi->num_queue_pairs = offset + qcount;
1711
1712
1713 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1714 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1715 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1716 ctxt->info.valid_sections |= cpu_to_le16(sections);
1717
1718
1719 vsi->rss_size = max_qcount;
1720 ret = i40e_vsi_config_rss(vsi);
1721 if (ret) {
1722 dev_info(&vsi->back->pdev->dev,
1723 "Failed to reconfig rss for num_queues (%u)\n",
1724 max_qcount);
1725 return ret;
1726 }
1727 vsi->reconfig_rss = true;
1728 dev_dbg(&vsi->back->pdev->dev,
1729 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1730
1731
1732
1733
1734 override_q = vsi->mqprio_qopt.qopt.count[0];
1735 if (override_q && override_q < vsi->num_queue_pairs) {
1736 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1737 vsi->next_base_queue = override_q;
1738 }
1739 return 0;
1740}
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1752 struct i40e_vsi_context *ctxt,
1753 u8 enabled_tc,
1754 bool is_add)
1755{
1756 struct i40e_pf *pf = vsi->back;
1757 u16 sections = 0;
1758 u8 netdev_tc = 0;
1759 u16 numtc = 1;
1760 u16 qcount;
1761 u8 offset;
1762 u16 qmap;
1763 int i;
1764 u16 num_tc_qps = 0;
1765
1766 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1767 offset = 0;
1768
1769
1770 num_tc_qps = vsi->alloc_queue_pairs;
1771 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1772
1773 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1774 if (enabled_tc & BIT(i))
1775 numtc++;
1776 }
1777 if (!numtc) {
1778 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1779 numtc = 1;
1780 }
1781 num_tc_qps = num_tc_qps / numtc;
1782 num_tc_qps = min_t(int, num_tc_qps,
1783 i40e_pf_get_max_q_per_tc(pf));
1784 }
1785
1786 vsi->tc_config.numtc = numtc;
1787 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1788
1789
1790 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1791 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1792
1793
1794 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1795
1796 if (vsi->tc_config.enabled_tc & BIT(i)) {
1797
1798 int pow, num_qps;
1799
1800 switch (vsi->type) {
1801 case I40E_VSI_MAIN:
1802 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1803 I40E_FLAG_FD_ATR_ENABLED)) ||
1804 vsi->tc_config.enabled_tc != 1) {
1805 qcount = min_t(int, pf->alloc_rss_size,
1806 num_tc_qps);
1807 break;
1808 }
1809
1810 case I40E_VSI_FDIR:
1811 case I40E_VSI_SRIOV:
1812 case I40E_VSI_VMDQ2:
1813 default:
1814 qcount = num_tc_qps;
1815 WARN_ON(i != 0);
1816 break;
1817 }
1818 vsi->tc_config.tc_info[i].qoffset = offset;
1819 vsi->tc_config.tc_info[i].qcount = qcount;
1820
1821
1822 num_qps = qcount;
1823 pow = 0;
1824 while (num_qps && (BIT_ULL(pow) < qcount)) {
1825 pow++;
1826 num_qps >>= 1;
1827 }
1828
1829 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1830 qmap =
1831 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1832 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1833
1834 offset += qcount;
1835 } else {
1836
1837
1838
1839
1840 vsi->tc_config.tc_info[i].qoffset = 0;
1841 vsi->tc_config.tc_info[i].qcount = 1;
1842 vsi->tc_config.tc_info[i].netdev_tc = 0;
1843
1844 qmap = 0;
1845 }
1846 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1847 }
1848
1849
1850 vsi->num_queue_pairs = offset;
1851 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1852 if (vsi->req_queue_pairs > 0)
1853 vsi->num_queue_pairs = vsi->req_queue_pairs;
1854 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1855 vsi->num_queue_pairs = pf->num_lan_msix;
1856 }
1857
1858
1859 if (is_add) {
1860 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1861
1862 ctxt->info.up_enable_bits = enabled_tc;
1863 }
1864 if (vsi->type == I40E_VSI_SRIOV) {
1865 ctxt->info.mapping_flags |=
1866 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1867 for (i = 0; i < vsi->num_queue_pairs; i++)
1868 ctxt->info.queue_mapping[i] =
1869 cpu_to_le16(vsi->base_queue + i);
1870 } else {
1871 ctxt->info.mapping_flags |=
1872 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1873 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1874 }
1875 ctxt->info.valid_sections |= cpu_to_le16(sections);
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1887{
1888 struct i40e_netdev_priv *np = netdev_priv(netdev);
1889 struct i40e_vsi *vsi = np->vsi;
1890
1891 if (i40e_add_mac_filter(vsi, addr))
1892 return 0;
1893 else
1894 return -ENOMEM;
1895}
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1906{
1907 struct i40e_netdev_priv *np = netdev_priv(netdev);
1908 struct i40e_vsi *vsi = np->vsi;
1909
1910
1911
1912
1913
1914
1915 if (ether_addr_equal(addr, netdev->dev_addr))
1916 return 0;
1917
1918 i40e_del_mac_filter(vsi, addr);
1919
1920 return 0;
1921}
1922
1923
1924
1925
1926
1927static void i40e_set_rx_mode(struct net_device *netdev)
1928{
1929 struct i40e_netdev_priv *np = netdev_priv(netdev);
1930 struct i40e_vsi *vsi = np->vsi;
1931
1932 spin_lock_bh(&vsi->mac_filter_hash_lock);
1933
1934 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1935 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1936
1937 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1938
1939
1940 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1941 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1942 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1943 }
1944}
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1955 struct hlist_head *from)
1956{
1957 struct i40e_mac_filter *f;
1958 struct hlist_node *h;
1959
1960 hlist_for_each_entry_safe(f, h, from, hlist) {
1961 u64 key = i40e_addr_to_hkey(f->macaddr);
1962
1963
1964 hlist_del(&f->hlist);
1965 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1966 }
1967}
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1978 struct hlist_head *from)
1979{
1980 struct i40e_new_mac_filter *new;
1981 struct hlist_node *h;
1982
1983 hlist_for_each_entry_safe(new, h, from, hlist) {
1984
1985 hlist_del(&new->hlist);
1986 kfree(new);
1987 }
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998static
1999struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2000{
2001 hlist_for_each_entry_continue(next, hlist) {
2002 if (!is_broadcast_ether_addr(next->f->macaddr))
2003 return next;
2004 }
2005
2006 return NULL;
2007}
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019static int
2020i40e_update_filter_state(int count,
2021 struct i40e_aqc_add_macvlan_element_data *add_list,
2022 struct i40e_new_mac_filter *add_head)
2023{
2024 int retval = 0;
2025 int i;
2026
2027 for (i = 0; i < count; i++) {
2028
2029
2030
2031
2032
2033
2034 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2035 add_head->state = I40E_FILTER_FAILED;
2036 } else {
2037 add_head->state = I40E_FILTER_ACTIVE;
2038 retval++;
2039 }
2040
2041 add_head = i40e_next_filter(add_head);
2042 if (!add_head)
2043 break;
2044 }
2045
2046 return retval;
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062static
2063void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2064 struct i40e_aqc_remove_macvlan_element_data *list,
2065 int num_del, int *retval)
2066{
2067 struct i40e_hw *hw = &vsi->back->hw;
2068 i40e_status aq_ret;
2069 int aq_err;
2070
2071 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2072 aq_err = hw->aq.asq_last_status;
2073
2074
2075 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2076 *retval = -EIO;
2077 dev_info(&vsi->back->pdev->dev,
2078 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2079 vsi_name, i40e_stat_str(hw, aq_ret),
2080 i40e_aq_str(hw, aq_err));
2081 }
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096static
2097void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2098 struct i40e_aqc_add_macvlan_element_data *list,
2099 struct i40e_new_mac_filter *add_head,
2100 int num_add)
2101{
2102 struct i40e_hw *hw = &vsi->back->hw;
2103 int aq_err, fcnt;
2104
2105 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2106 aq_err = hw->aq.asq_last_status;
2107 fcnt = i40e_update_filter_state(num_add, list, add_head);
2108
2109 if (fcnt != num_add) {
2110 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2111 dev_warn(&vsi->back->pdev->dev,
2112 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2113 i40e_aq_str(hw, aq_err),
2114 vsi_name);
2115 }
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130static i40e_status
2131i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2132 struct i40e_mac_filter *f)
2133{
2134 bool enable = f->state == I40E_FILTER_NEW;
2135 struct i40e_hw *hw = &vsi->back->hw;
2136 i40e_status aq_ret;
2137
2138 if (f->vlan == I40E_VLAN_ANY) {
2139 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2140 vsi->seid,
2141 enable,
2142 NULL);
2143 } else {
2144 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2145 vsi->seid,
2146 enable,
2147 f->vlan,
2148 NULL);
2149 }
2150
2151 if (aq_ret) {
2152 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2153 dev_warn(&vsi->back->pdev->dev,
2154 "Error %s, forcing overflow promiscuous on %s\n",
2155 i40e_aq_str(hw, hw->aq.asq_last_status),
2156 vsi_name);
2157 }
2158
2159 return aq_ret;
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2172{
2173 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2174 struct i40e_hw *hw = &pf->hw;
2175 i40e_status aq_ret;
2176
2177 if (vsi->type == I40E_VSI_MAIN &&
2178 pf->lan_veb != I40E_NO_VEB &&
2179 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2180
2181
2182
2183
2184
2185 if (promisc)
2186 aq_ret = i40e_aq_set_default_vsi(hw,
2187 vsi->seid,
2188 NULL);
2189 else
2190 aq_ret = i40e_aq_clear_default_vsi(hw,
2191 vsi->seid,
2192 NULL);
2193 if (aq_ret) {
2194 dev_info(&pf->pdev->dev,
2195 "Set default VSI failed, err %s, aq_err %s\n",
2196 i40e_stat_str(hw, aq_ret),
2197 i40e_aq_str(hw, hw->aq.asq_last_status));
2198 }
2199 } else {
2200 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2201 hw,
2202 vsi->seid,
2203 promisc, NULL,
2204 true);
2205 if (aq_ret) {
2206 dev_info(&pf->pdev->dev,
2207 "set unicast promisc failed, err %s, aq_err %s\n",
2208 i40e_stat_str(hw, aq_ret),
2209 i40e_aq_str(hw, hw->aq.asq_last_status));
2210 }
2211 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2212 hw,
2213 vsi->seid,
2214 promisc, NULL);
2215 if (aq_ret) {
2216 dev_info(&pf->pdev->dev,
2217 "set multicast promisc failed, err %s, aq_err %s\n",
2218 i40e_stat_str(hw, aq_ret),
2219 i40e_aq_str(hw, hw->aq.asq_last_status));
2220 }
2221 }
2222
2223 if (!aq_ret)
2224 pf->cur_promisc = promisc;
2225
2226 return aq_ret;
2227}
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2238{
2239 struct hlist_head tmp_add_list, tmp_del_list;
2240 struct i40e_mac_filter *f;
2241 struct i40e_new_mac_filter *new, *add_head = NULL;
2242 struct i40e_hw *hw = &vsi->back->hw;
2243 bool old_overflow, new_overflow;
2244 unsigned int failed_filters = 0;
2245 unsigned int vlan_filters = 0;
2246 char vsi_name[16] = "PF";
2247 int filter_list_len = 0;
2248 i40e_status aq_ret = 0;
2249 u32 changed_flags = 0;
2250 struct hlist_node *h;
2251 struct i40e_pf *pf;
2252 int num_add = 0;
2253 int num_del = 0;
2254 int retval = 0;
2255 u16 cmd_flags;
2256 int list_size;
2257 int bkt;
2258
2259
2260 struct i40e_aqc_add_macvlan_element_data *add_list;
2261 struct i40e_aqc_remove_macvlan_element_data *del_list;
2262
2263 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2264 usleep_range(1000, 2000);
2265 pf = vsi->back;
2266
2267 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2268
2269 if (vsi->netdev) {
2270 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2271 vsi->current_netdev_flags = vsi->netdev->flags;
2272 }
2273
2274 INIT_HLIST_HEAD(&tmp_add_list);
2275 INIT_HLIST_HEAD(&tmp_del_list);
2276
2277 if (vsi->type == I40E_VSI_SRIOV)
2278 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2279 else if (vsi->type != I40E_VSI_MAIN)
2280 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2281
2282 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2283 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2284
2285 spin_lock_bh(&vsi->mac_filter_hash_lock);
2286
2287 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2288 if (f->state == I40E_FILTER_REMOVE) {
2289
2290 hash_del(&f->hlist);
2291 hlist_add_head(&f->hlist, &tmp_del_list);
2292
2293
2294 continue;
2295 }
2296 if (f->state == I40E_FILTER_NEW) {
2297
2298 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2299 if (!new)
2300 goto err_no_memory_locked;
2301
2302
2303 new->f = f;
2304 new->state = f->state;
2305
2306
2307 hlist_add_head(&new->hlist, &tmp_add_list);
2308 }
2309
2310
2311
2312
2313
2314 if (f->vlan > 0)
2315 vlan_filters++;
2316 }
2317
2318 retval = i40e_correct_mac_vlan_filters(vsi,
2319 &tmp_add_list,
2320 &tmp_del_list,
2321 vlan_filters);
2322 if (retval)
2323 goto err_no_memory_locked;
2324
2325 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2326 }
2327
2328
2329 if (!hlist_empty(&tmp_del_list)) {
2330 filter_list_len = hw->aq.asq_buf_size /
2331 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2332 list_size = filter_list_len *
2333 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2334 del_list = kzalloc(list_size, GFP_ATOMIC);
2335 if (!del_list)
2336 goto err_no_memory;
2337
2338 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2339 cmd_flags = 0;
2340
2341
2342
2343
2344 if (is_broadcast_ether_addr(f->macaddr)) {
2345 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2346
2347 hlist_del(&f->hlist);
2348 kfree(f);
2349 continue;
2350 }
2351
2352
2353 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2354 if (f->vlan == I40E_VLAN_ANY) {
2355 del_list[num_del].vlan_tag = 0;
2356 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2357 } else {
2358 del_list[num_del].vlan_tag =
2359 cpu_to_le16((u16)(f->vlan));
2360 }
2361
2362 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2363 del_list[num_del].flags = cmd_flags;
2364 num_del++;
2365
2366
2367 if (num_del == filter_list_len) {
2368 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2369 num_del, &retval);
2370 memset(del_list, 0, list_size);
2371 num_del = 0;
2372 }
2373
2374
2375
2376 hlist_del(&f->hlist);
2377 kfree(f);
2378 }
2379
2380 if (num_del) {
2381 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2382 num_del, &retval);
2383 }
2384
2385 kfree(del_list);
2386 del_list = NULL;
2387 }
2388
2389 if (!hlist_empty(&tmp_add_list)) {
2390
2391 filter_list_len = hw->aq.asq_buf_size /
2392 sizeof(struct i40e_aqc_add_macvlan_element_data);
2393 list_size = filter_list_len *
2394 sizeof(struct i40e_aqc_add_macvlan_element_data);
2395 add_list = kzalloc(list_size, GFP_ATOMIC);
2396 if (!add_list)
2397 goto err_no_memory;
2398
2399 num_add = 0;
2400 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2401
2402
2403
2404 if (is_broadcast_ether_addr(new->f->macaddr)) {
2405 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2406 new->f))
2407 new->state = I40E_FILTER_FAILED;
2408 else
2409 new->state = I40E_FILTER_ACTIVE;
2410 continue;
2411 }
2412
2413
2414 if (num_add == 0)
2415 add_head = new;
2416 cmd_flags = 0;
2417 ether_addr_copy(add_list[num_add].mac_addr,
2418 new->f->macaddr);
2419 if (new->f->vlan == I40E_VLAN_ANY) {
2420 add_list[num_add].vlan_tag = 0;
2421 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2422 } else {
2423 add_list[num_add].vlan_tag =
2424 cpu_to_le16((u16)(new->f->vlan));
2425 }
2426 add_list[num_add].queue_number = 0;
2427
2428 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2429 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2430 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2431 num_add++;
2432
2433
2434 if (num_add == filter_list_len) {
2435 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2436 add_head, num_add);
2437 memset(add_list, 0, list_size);
2438 num_add = 0;
2439 }
2440 }
2441 if (num_add) {
2442 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2443 num_add);
2444 }
2445
2446
2447
2448 spin_lock_bh(&vsi->mac_filter_hash_lock);
2449 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2450
2451 if (new->f->state == I40E_FILTER_NEW)
2452 new->f->state = new->state;
2453 hlist_del(&new->hlist);
2454 kfree(new);
2455 }
2456 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2457 kfree(add_list);
2458 add_list = NULL;
2459 }
2460
2461
2462 spin_lock_bh(&vsi->mac_filter_hash_lock);
2463 vsi->active_filters = 0;
2464 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2465 if (f->state == I40E_FILTER_ACTIVE)
2466 vsi->active_filters++;
2467 else if (f->state == I40E_FILTER_FAILED)
2468 failed_filters++;
2469 }
2470 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2471
2472
2473
2474
2475
2476 if (old_overflow && !failed_filters &&
2477 vsi->active_filters < vsi->promisc_threshold) {
2478 dev_info(&pf->pdev->dev,
2479 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2480 vsi_name);
2481 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2482 vsi->promisc_threshold = 0;
2483 }
2484
2485
2486 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2487 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2488 goto out;
2489 }
2490
2491 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2492
2493
2494
2495
2496 if (!old_overflow && new_overflow)
2497 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2498
2499
2500 if (changed_flags & IFF_ALLMULTI) {
2501 bool cur_multipromisc;
2502
2503 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2504 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2505 vsi->seid,
2506 cur_multipromisc,
2507 NULL);
2508 if (aq_ret) {
2509 retval = i40e_aq_rc_to_posix(aq_ret,
2510 hw->aq.asq_last_status);
2511 dev_info(&pf->pdev->dev,
2512 "set multi promisc failed on %s, err %s aq_err %s\n",
2513 vsi_name,
2514 i40e_stat_str(hw, aq_ret),
2515 i40e_aq_str(hw, hw->aq.asq_last_status));
2516 }
2517 }
2518
2519 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2520 bool cur_promisc;
2521
2522 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2523 new_overflow);
2524 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2525 if (aq_ret) {
2526 retval = i40e_aq_rc_to_posix(aq_ret,
2527 hw->aq.asq_last_status);
2528 dev_info(&pf->pdev->dev,
2529 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2530 cur_promisc ? "on" : "off",
2531 vsi_name,
2532 i40e_stat_str(hw, aq_ret),
2533 i40e_aq_str(hw, hw->aq.asq_last_status));
2534 }
2535 }
2536out:
2537
2538 if (retval)
2539 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2540
2541 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2542 return retval;
2543
2544err_no_memory:
2545
2546 spin_lock_bh(&vsi->mac_filter_hash_lock);
2547err_no_memory_locked:
2548 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2549 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2550 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2551
2552 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2553 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2554 return -ENOMEM;
2555}
2556
2557
2558
2559
2560
2561static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2562{
2563 int v;
2564
2565 if (!pf)
2566 return;
2567 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2568 return;
2569
2570 for (v = 0; v < pf->num_alloc_vsi; v++) {
2571 if (pf->vsi[v] &&
2572 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2573 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2574
2575 if (ret) {
2576
2577 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2578 pf->state);
2579 break;
2580 }
2581 }
2582 }
2583}
2584
2585
2586
2587
2588
2589static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2590{
2591 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2592 return I40E_RXBUFFER_2048;
2593 else
2594 return I40E_RXBUFFER_3072;
2595}
2596
2597
2598
2599
2600
2601
2602
2603
2604static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2605{
2606 struct i40e_netdev_priv *np = netdev_priv(netdev);
2607 struct i40e_vsi *vsi = np->vsi;
2608 struct i40e_pf *pf = vsi->back;
2609
2610 if (i40e_enabled_xdp_vsi(vsi)) {
2611 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2612
2613 if (frame_size > i40e_max_xdp_frame_size(vsi))
2614 return -EINVAL;
2615 }
2616
2617 netdev_info(netdev, "changing MTU from %d to %d\n",
2618 netdev->mtu, new_mtu);
2619 netdev->mtu = new_mtu;
2620 if (netif_running(netdev))
2621 i40e_vsi_reinit_locked(vsi);
2622 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2623 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2624 return 0;
2625}
2626
2627
2628
2629
2630
2631
2632
2633int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2634{
2635 struct i40e_netdev_priv *np = netdev_priv(netdev);
2636 struct i40e_pf *pf = np->vsi->back;
2637
2638 switch (cmd) {
2639 case SIOCGHWTSTAMP:
2640 return i40e_ptp_get_ts_config(pf, ifr);
2641 case SIOCSHWTSTAMP:
2642 return i40e_ptp_set_ts_config(pf, ifr);
2643 default:
2644 return -EOPNOTSUPP;
2645 }
2646}
2647
2648
2649
2650
2651
2652void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2653{
2654 struct i40e_vsi_context ctxt;
2655 i40e_status ret;
2656
2657 if ((vsi->info.valid_sections &
2658 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2659 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2660 return;
2661
2662 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2663 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2664 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2665
2666 ctxt.seid = vsi->seid;
2667 ctxt.info = vsi->info;
2668 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2669 if (ret) {
2670 dev_info(&vsi->back->pdev->dev,
2671 "update vlan stripping failed, err %s aq_err %s\n",
2672 i40e_stat_str(&vsi->back->hw, ret),
2673 i40e_aq_str(&vsi->back->hw,
2674 vsi->back->hw.aq.asq_last_status));
2675 }
2676}
2677
2678
2679
2680
2681
2682void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2683{
2684 struct i40e_vsi_context ctxt;
2685 i40e_status ret;
2686
2687 if ((vsi->info.valid_sections &
2688 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2689 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2690 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2691 return;
2692
2693 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2694 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2695 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2696
2697 ctxt.seid = vsi->seid;
2698 ctxt.info = vsi->info;
2699 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2700 if (ret) {
2701 dev_info(&vsi->back->pdev->dev,
2702 "update vlan stripping failed, err %s aq_err %s\n",
2703 i40e_stat_str(&vsi->back->hw, ret),
2704 i40e_aq_str(&vsi->back->hw,
2705 vsi->back->hw.aq.asq_last_status));
2706 }
2707}
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2723{
2724 struct i40e_mac_filter *f, *add_f;
2725 struct hlist_node *h;
2726 int bkt;
2727
2728 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2729 if (f->state == I40E_FILTER_REMOVE)
2730 continue;
2731 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2732 if (!add_f) {
2733 dev_info(&vsi->back->pdev->dev,
2734 "Could not add vlan filter %d for %pM\n",
2735 vid, f->macaddr);
2736 return -ENOMEM;
2737 }
2738 }
2739
2740 return 0;
2741}
2742
2743
2744
2745
2746
2747
2748int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2749{
2750 int err;
2751
2752 if (vsi->info.pvid)
2753 return -EINVAL;
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763 if (!vid)
2764 return 0;
2765
2766
2767 spin_lock_bh(&vsi->mac_filter_hash_lock);
2768 err = i40e_add_vlan_all_mac(vsi, vid);
2769 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2770 if (err)
2771 return err;
2772
2773
2774
2775
2776 i40e_service_event_schedule(vsi->back);
2777 return 0;
2778}
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2794{
2795 struct i40e_mac_filter *f;
2796 struct hlist_node *h;
2797 int bkt;
2798
2799 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2800 if (f->vlan == vid)
2801 __i40e_del_filter(vsi, f);
2802 }
2803}
2804
2805
2806
2807
2808
2809
2810void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2811{
2812 if (!vid || vsi->info.pvid)
2813 return;
2814
2815 spin_lock_bh(&vsi->mac_filter_hash_lock);
2816 i40e_rm_vlan_all_mac(vsi, vid);
2817 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2818
2819
2820
2821
2822 i40e_service_event_schedule(vsi->back);
2823}
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2834 __always_unused __be16 proto, u16 vid)
2835{
2836 struct i40e_netdev_priv *np = netdev_priv(netdev);
2837 struct i40e_vsi *vsi = np->vsi;
2838 int ret = 0;
2839
2840 if (vid >= VLAN_N_VID)
2841 return -EINVAL;
2842
2843 ret = i40e_vsi_add_vlan(vsi, vid);
2844 if (!ret)
2845 set_bit(vid, vsi->active_vlans);
2846
2847 return ret;
2848}
2849
2850
2851
2852
2853
2854
2855
2856static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2857 __always_unused __be16 proto, u16 vid)
2858{
2859 struct i40e_netdev_priv *np = netdev_priv(netdev);
2860 struct i40e_vsi *vsi = np->vsi;
2861
2862 if (vid >= VLAN_N_VID)
2863 return;
2864 set_bit(vid, vsi->active_vlans);
2865}
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2876 __always_unused __be16 proto, u16 vid)
2877{
2878 struct i40e_netdev_priv *np = netdev_priv(netdev);
2879 struct i40e_vsi *vsi = np->vsi;
2880
2881
2882
2883
2884
2885 i40e_vsi_kill_vlan(vsi, vid);
2886
2887 clear_bit(vid, vsi->active_vlans);
2888
2889 return 0;
2890}
2891
2892
2893
2894
2895
2896static void i40e_restore_vlan(struct i40e_vsi *vsi)
2897{
2898 u16 vid;
2899
2900 if (!vsi->netdev)
2901 return;
2902
2903 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2904 i40e_vlan_stripping_enable(vsi);
2905 else
2906 i40e_vlan_stripping_disable(vsi);
2907
2908 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2909 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2910 vid);
2911}
2912
2913
2914
2915
2916
2917
2918int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2919{
2920 struct i40e_vsi_context ctxt;
2921 i40e_status ret;
2922
2923 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2924 vsi->info.pvid = cpu_to_le16(vid);
2925 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2926 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2927 I40E_AQ_VSI_PVLAN_EMOD_STR;
2928
2929 ctxt.seid = vsi->seid;
2930 ctxt.info = vsi->info;
2931 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2932 if (ret) {
2933 dev_info(&vsi->back->pdev->dev,
2934 "add pvid failed, err %s aq_err %s\n",
2935 i40e_stat_str(&vsi->back->hw, ret),
2936 i40e_aq_str(&vsi->back->hw,
2937 vsi->back->hw.aq.asq_last_status));
2938 return -ENOENT;
2939 }
2940
2941 return 0;
2942}
2943
2944
2945
2946
2947
2948
2949
2950void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2951{
2952 i40e_vlan_stripping_disable(vsi);
2953
2954 vsi->info.pvid = 0;
2955}
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2968{
2969 int i, err = 0;
2970
2971 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2972 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2973
2974 if (!i40e_enabled_xdp_vsi(vsi))
2975 return err;
2976
2977 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2978 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2979
2980 return err;
2981}
2982
2983
2984
2985
2986
2987
2988
2989static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2990{
2991 int i;
2992
2993 if (vsi->tx_rings) {
2994 for (i = 0; i < vsi->num_queue_pairs; i++)
2995 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2996 i40e_free_tx_resources(vsi->tx_rings[i]);
2997 }
2998
2999 if (vsi->xdp_rings) {
3000 for (i = 0; i < vsi->num_queue_pairs; i++)
3001 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3002 i40e_free_tx_resources(vsi->xdp_rings[i]);
3003 }
3004}
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3017{
3018 int i, err = 0;
3019
3020 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3021 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3022 return err;
3023}
3024
3025
3026
3027
3028
3029
3030
3031static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3032{
3033 int i;
3034
3035 if (!vsi->rx_rings)
3036 return;
3037
3038 for (i = 0; i < vsi->num_queue_pairs; i++)
3039 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3040 i40e_free_rx_resources(vsi->rx_rings[i]);
3041}
3042
3043
3044
3045
3046
3047
3048
3049
3050static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3051{
3052 int cpu;
3053
3054 if (!ring->q_vector || !ring->netdev || ring->ch)
3055 return;
3056
3057
3058 if (ring->vsi->tc_config.numtc > 1)
3059 return;
3060
3061
3062 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3063 return;
3064
3065 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3066 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3067 ring->queue_index);
3068}
3069
3070
3071
3072
3073
3074
3075
3076static int i40e_configure_tx_ring(struct i40e_ring *ring)
3077{
3078 struct i40e_vsi *vsi = ring->vsi;
3079 u16 pf_q = vsi->base_queue + ring->queue_index;
3080 struct i40e_hw *hw = &vsi->back->hw;
3081 struct i40e_hmc_obj_txq tx_ctx;
3082 i40e_status err = 0;
3083 u32 qtx_ctl = 0;
3084
3085 if (ring_is_xdp(ring))
3086 ring->xsk_umem = i40e_xsk_umem(ring);
3087
3088
3089 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3090 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3091 ring->atr_count = 0;
3092 } else {
3093 ring->atr_sample_rate = 0;
3094 }
3095
3096
3097 i40e_config_xps_tx_ring(ring);
3098
3099
3100 memset(&tx_ctx, 0, sizeof(tx_ctx));
3101
3102 tx_ctx.new_context = 1;
3103 tx_ctx.base = (ring->dma / 128);
3104 tx_ctx.qlen = ring->count;
3105 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3106 I40E_FLAG_FD_ATR_ENABLED));
3107 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3108
3109 if (vsi->type != I40E_VSI_FDIR)
3110 tx_ctx.head_wb_ena = 1;
3111 tx_ctx.head_wb_addr = ring->dma +
3112 (ring->count * sizeof(struct i40e_tx_desc));
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125 if (ring->ch)
3126 tx_ctx.rdylist =
3127 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3128
3129 else
3130 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3131
3132 tx_ctx.rdylist_act = 0;
3133
3134
3135 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3136 if (err) {
3137 dev_info(&vsi->back->pdev->dev,
3138 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3139 ring->queue_index, pf_q, err);
3140 return -ENOMEM;
3141 }
3142
3143
3144 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3145 if (err) {
3146 dev_info(&vsi->back->pdev->dev,
3147 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3148 ring->queue_index, pf_q, err);
3149 return -ENOMEM;
3150 }
3151
3152
3153 if (ring->ch) {
3154 if (ring->ch->type == I40E_VSI_VMDQ2)
3155 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3156 else
3157 return -EINVAL;
3158
3159 qtx_ctl |= (ring->ch->vsi_number <<
3160 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3161 I40E_QTX_CTL_VFVM_INDX_MASK;
3162 } else {
3163 if (vsi->type == I40E_VSI_VMDQ2) {
3164 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3165 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3166 I40E_QTX_CTL_VFVM_INDX_MASK;
3167 } else {
3168 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3169 }
3170 }
3171
3172 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3173 I40E_QTX_CTL_PF_INDX_MASK);
3174 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3175 i40e_flush(hw);
3176
3177
3178 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3179
3180 return 0;
3181}
3182
3183
3184
3185
3186
3187
3188
3189static int i40e_configure_rx_ring(struct i40e_ring *ring)
3190{
3191 struct i40e_vsi *vsi = ring->vsi;
3192 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3193 u16 pf_q = vsi->base_queue + ring->queue_index;
3194 struct i40e_hw *hw = &vsi->back->hw;
3195 struct i40e_hmc_obj_rxq rx_ctx;
3196 i40e_status err = 0;
3197 bool ok;
3198 int ret;
3199
3200 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3201
3202
3203 memset(&rx_ctx, 0, sizeof(rx_ctx));
3204
3205 if (ring->vsi->type == I40E_VSI_MAIN)
3206 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3207
3208 ring->xsk_umem = i40e_xsk_umem(ring);
3209 if (ring->xsk_umem) {
3210 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3211 XDP_PACKET_HEADROOM;
3212
3213
3214
3215
3216 chain_len = 1;
3217
3218
3219
3220 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3221 MEM_TYPE_ZERO_COPY,
3222 &ring->zca);
3223 if (ret)
3224 return ret;
3225 dev_info(&vsi->back->pdev->dev,
3226 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3227 ring->queue_index);
3228
3229 } else {
3230 ring->rx_buf_len = vsi->rx_buf_len;
3231 if (ring->vsi->type == I40E_VSI_MAIN) {
3232 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3233 MEM_TYPE_PAGE_SHARED,
3234 NULL);
3235 if (ret)
3236 return ret;
3237 }
3238 }
3239
3240 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3241 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3242
3243 rx_ctx.base = (ring->dma / 128);
3244 rx_ctx.qlen = ring->count;
3245
3246
3247 rx_ctx.dsize = 1;
3248
3249
3250
3251
3252 rx_ctx.hsplit_0 = 0;
3253
3254 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3255 if (hw->revision_id == 0)
3256 rx_ctx.lrxqthresh = 0;
3257 else
3258 rx_ctx.lrxqthresh = 1;
3259 rx_ctx.crcstrip = 1;
3260 rx_ctx.l2tsel = 1;
3261
3262 rx_ctx.showiv = 0;
3263
3264 rx_ctx.prefena = 1;
3265
3266
3267 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3268 if (err) {
3269 dev_info(&vsi->back->pdev->dev,
3270 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3271 ring->queue_index, pf_q, err);
3272 return -ENOMEM;
3273 }
3274
3275
3276 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3277 if (err) {
3278 dev_info(&vsi->back->pdev->dev,
3279 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3280 ring->queue_index, pf_q, err);
3281 return -ENOMEM;
3282 }
3283
3284
3285 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3286 clear_ring_build_skb_enabled(ring);
3287 else
3288 set_ring_build_skb_enabled(ring);
3289
3290
3291 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3292 writel(0, ring->tail);
3293
3294
3295 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3296 if (!ok) {
3297 dev_info(&vsi->back->pdev->dev,
3298 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
3299 ring->xsk_umem ? "UMEM enabled " : "",
3300 ring->queue_index, pf_q);
3301 }
3302
3303 return 0;
3304}
3305
3306
3307
3308
3309
3310
3311
3312static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3313{
3314 int err = 0;
3315 u16 i;
3316
3317 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3318 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3319
3320 if (!i40e_enabled_xdp_vsi(vsi))
3321 return err;
3322
3323 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3324 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3325
3326 return err;
3327}
3328
3329
3330
3331
3332
3333
3334
3335static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3336{
3337 int err = 0;
3338 u16 i;
3339
3340 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3341 vsi->max_frame = I40E_MAX_RXBUFFER;
3342 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3343#if (PAGE_SIZE < 8192)
3344 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3345 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3346 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3347 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3348#endif
3349 } else {
3350 vsi->max_frame = I40E_MAX_RXBUFFER;
3351 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3352 I40E_RXBUFFER_2048;
3353 }
3354
3355
3356 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3357 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3358
3359 return err;
3360}
3361
3362
3363
3364
3365
3366static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3367{
3368 struct i40e_ring *tx_ring, *rx_ring;
3369 u16 qoffset, qcount;
3370 int i, n;
3371
3372 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3373
3374 for (i = 0; i < vsi->num_queue_pairs; i++) {
3375 rx_ring = vsi->rx_rings[i];
3376 tx_ring = vsi->tx_rings[i];
3377 rx_ring->dcb_tc = 0;
3378 tx_ring->dcb_tc = 0;
3379 }
3380 return;
3381 }
3382
3383 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3384 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3385 continue;
3386
3387 qoffset = vsi->tc_config.tc_info[n].qoffset;
3388 qcount = vsi->tc_config.tc_info[n].qcount;
3389 for (i = qoffset; i < (qoffset + qcount); i++) {
3390 rx_ring = vsi->rx_rings[i];
3391 tx_ring = vsi->tx_rings[i];
3392 rx_ring->dcb_tc = n;
3393 tx_ring->dcb_tc = n;
3394 }
3395 }
3396}
3397
3398
3399
3400
3401
3402static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3403{
3404 if (vsi->netdev)
3405 i40e_set_rx_mode(vsi->netdev);
3406}
3407
3408
3409
3410
3411
3412
3413
3414
3415static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3416{
3417 struct i40e_fdir_filter *filter;
3418 struct i40e_pf *pf = vsi->back;
3419 struct hlist_node *node;
3420
3421 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3422 return;
3423
3424
3425 pf->fd_tcp4_filter_cnt = 0;
3426 pf->fd_udp4_filter_cnt = 0;
3427 pf->fd_sctp4_filter_cnt = 0;
3428 pf->fd_ip4_filter_cnt = 0;
3429
3430 hlist_for_each_entry_safe(filter, node,
3431 &pf->fdir_filter_list, fdir_node) {
3432 i40e_add_del_fdir(vsi, filter, true);
3433 }
3434}
3435
3436
3437
3438
3439
3440static int i40e_vsi_configure(struct i40e_vsi *vsi)
3441{
3442 int err;
3443
3444 i40e_set_vsi_rx_mode(vsi);
3445 i40e_restore_vlan(vsi);
3446 i40e_vsi_config_dcb_rings(vsi);
3447 err = i40e_vsi_configure_tx(vsi);
3448 if (!err)
3449 err = i40e_vsi_configure_rx(vsi);
3450
3451 return err;
3452}
3453
3454
3455
3456
3457
3458static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3459{
3460 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3461 struct i40e_pf *pf = vsi->back;
3462 struct i40e_hw *hw = &pf->hw;
3463 u16 vector;
3464 int i, q;
3465 u32 qp;
3466
3467
3468
3469
3470
3471 qp = vsi->base_queue;
3472 vector = vsi->base_vector;
3473 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3474 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3475
3476 q_vector->rx.next_update = jiffies + 1;
3477 q_vector->rx.target_itr =
3478 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3479 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3480 q_vector->rx.target_itr);
3481 q_vector->rx.current_itr = q_vector->rx.target_itr;
3482
3483 q_vector->tx.next_update = jiffies + 1;
3484 q_vector->tx.target_itr =
3485 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3486 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3487 q_vector->tx.target_itr);
3488 q_vector->tx.current_itr = q_vector->tx.target_itr;
3489
3490 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3491 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3492
3493
3494 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3495 for (q = 0; q < q_vector->num_ringpairs; q++) {
3496 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3497 u32 val;
3498
3499 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3500 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3501 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3502 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3503 (I40E_QUEUE_TYPE_TX <<
3504 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3505
3506 wr32(hw, I40E_QINT_RQCTL(qp), val);
3507
3508 if (has_xdp) {
3509 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3510 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3511 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3512 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3513 (I40E_QUEUE_TYPE_TX <<
3514 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3515
3516 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3517 }
3518
3519 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3520 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3521 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3522 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3523 (I40E_QUEUE_TYPE_RX <<
3524 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3525
3526
3527 if (q == (q_vector->num_ringpairs - 1))
3528 val |= (I40E_QUEUE_END_OF_LIST <<
3529 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3530
3531 wr32(hw, I40E_QINT_TQCTL(qp), val);
3532 qp++;
3533 }
3534 }
3535
3536 i40e_flush(hw);
3537}
3538
3539
3540
3541
3542
3543static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3544{
3545 struct i40e_hw *hw = &pf->hw;
3546 u32 val;
3547
3548
3549 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3550 rd32(hw, I40E_PFINT_ICR0);
3551
3552 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3553 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3554 I40E_PFINT_ICR0_ENA_GRST_MASK |
3555 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3556 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3557 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3558 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3559 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3560
3561 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3562 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3563
3564 if (pf->flags & I40E_FLAG_PTP)
3565 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3566
3567 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3568
3569
3570 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3571 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3572
3573
3574 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3575}
3576
3577
3578
3579
3580
3581static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3582{
3583 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3584 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3585 struct i40e_pf *pf = vsi->back;
3586 struct i40e_hw *hw = &pf->hw;
3587 u32 val;
3588
3589
3590 q_vector->rx.next_update = jiffies + 1;
3591 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3592 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
3593 q_vector->rx.current_itr = q_vector->rx.target_itr;
3594 q_vector->tx.next_update = jiffies + 1;
3595 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3596 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
3597 q_vector->tx.current_itr = q_vector->tx.target_itr;
3598
3599 i40e_enable_misc_int_causes(pf);
3600
3601
3602 wr32(hw, I40E_PFINT_LNKLST0, 0);
3603
3604
3605 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3606 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3607 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3608 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3609
3610 wr32(hw, I40E_QINT_RQCTL(0), val);
3611
3612 if (i40e_enabled_xdp_vsi(vsi)) {
3613 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3614 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3615 (I40E_QUEUE_TYPE_TX
3616 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3617
3618 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3619 }
3620
3621 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3622 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3623 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3624
3625 wr32(hw, I40E_QINT_TQCTL(0), val);
3626 i40e_flush(hw);
3627}
3628
3629
3630
3631
3632
3633void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3634{
3635 struct i40e_hw *hw = &pf->hw;
3636
3637 wr32(hw, I40E_PFINT_DYN_CTL0,
3638 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3639 i40e_flush(hw);
3640}
3641
3642
3643
3644
3645
3646void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3647{
3648 struct i40e_hw *hw = &pf->hw;
3649 u32 val;
3650
3651 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3652 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3653 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3654
3655 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3656 i40e_flush(hw);
3657}
3658
3659
3660
3661
3662
3663
3664static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3665{
3666 struct i40e_q_vector *q_vector = data;
3667
3668 if (!q_vector->tx.ring && !q_vector->rx.ring)
3669 return IRQ_HANDLED;
3670
3671 napi_schedule_irqoff(&q_vector->napi);
3672
3673 return IRQ_HANDLED;
3674}
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3685 const cpumask_t *mask)
3686{
3687 struct i40e_q_vector *q_vector =
3688 container_of(notify, struct i40e_q_vector, affinity_notify);
3689
3690 cpumask_copy(&q_vector->affinity_mask, mask);
3691}
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701static void i40e_irq_affinity_release(struct kref *ref) {}
3702
3703
3704
3705
3706
3707
3708
3709
3710static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3711{
3712 int q_vectors = vsi->num_q_vectors;
3713 struct i40e_pf *pf = vsi->back;
3714 int base = vsi->base_vector;
3715 int rx_int_idx = 0;
3716 int tx_int_idx = 0;
3717 int vector, err;
3718 int irq_num;
3719 int cpu;
3720
3721 for (vector = 0; vector < q_vectors; vector++) {
3722 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3723
3724 irq_num = pf->msix_entries[base + vector].vector;
3725
3726 if (q_vector->tx.ring && q_vector->rx.ring) {
3727 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3728 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3729 tx_int_idx++;
3730 } else if (q_vector->rx.ring) {
3731 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3732 "%s-%s-%d", basename, "rx", rx_int_idx++);
3733 } else if (q_vector->tx.ring) {
3734 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3735 "%s-%s-%d", basename, "tx", tx_int_idx++);
3736 } else {
3737
3738 continue;
3739 }
3740 err = request_irq(irq_num,
3741 vsi->irq_handler,
3742 0,
3743 q_vector->name,
3744 q_vector);
3745 if (err) {
3746 dev_info(&pf->pdev->dev,
3747 "MSIX request_irq failed, error: %d\n", err);
3748 goto free_queue_irqs;
3749 }
3750
3751
3752 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3753 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3754 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3755
3756
3757
3758
3759
3760
3761 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3762 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3763 }
3764
3765 vsi->irqs_ready = true;
3766 return 0;
3767
3768free_queue_irqs:
3769 while (vector) {
3770 vector--;
3771 irq_num = pf->msix_entries[base + vector].vector;
3772 irq_set_affinity_notifier(irq_num, NULL);
3773 irq_set_affinity_hint(irq_num, NULL);
3774 free_irq(irq_num, &vsi->q_vectors[vector]);
3775 }
3776 return err;
3777}
3778
3779
3780
3781
3782
3783static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3784{
3785 struct i40e_pf *pf = vsi->back;
3786 struct i40e_hw *hw = &pf->hw;
3787 int base = vsi->base_vector;
3788 int i;
3789
3790
3791 for (i = 0; i < vsi->num_queue_pairs; i++) {
3792 u32 val;
3793
3794 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3795 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3796 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3797
3798 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3799 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3800 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3801
3802 if (!i40e_enabled_xdp_vsi(vsi))
3803 continue;
3804 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3805 }
3806
3807
3808 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3809 for (i = vsi->base_vector;
3810 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3811 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3812
3813 i40e_flush(hw);
3814 for (i = 0; i < vsi->num_q_vectors; i++)
3815 synchronize_irq(pf->msix_entries[i + base].vector);
3816 } else {
3817
3818 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3819 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3820 i40e_flush(hw);
3821 synchronize_irq(pf->pdev->irq);
3822 }
3823}
3824
3825
3826
3827
3828
3829static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3830{
3831 struct i40e_pf *pf = vsi->back;
3832 int i;
3833
3834 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3835 for (i = 0; i < vsi->num_q_vectors; i++)
3836 i40e_irq_dynamic_enable(vsi, i);
3837 } else {
3838 i40e_irq_dynamic_enable_icr0(pf);
3839 }
3840
3841 i40e_flush(&pf->hw);
3842 return 0;
3843}
3844
3845
3846
3847
3848
3849static void i40e_free_misc_vector(struct i40e_pf *pf)
3850{
3851
3852 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3853 i40e_flush(&pf->hw);
3854
3855 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3856 synchronize_irq(pf->msix_entries[0].vector);
3857 free_irq(pf->msix_entries[0].vector, pf);
3858 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3859 }
3860}
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871static irqreturn_t i40e_intr(int irq, void *data)
3872{
3873 struct i40e_pf *pf = (struct i40e_pf *)data;
3874 struct i40e_hw *hw = &pf->hw;
3875 irqreturn_t ret = IRQ_NONE;
3876 u32 icr0, icr0_remaining;
3877 u32 val, ena_mask;
3878
3879 icr0 = rd32(hw, I40E_PFINT_ICR0);
3880 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3881
3882
3883 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3884 goto enable_intr;
3885
3886
3887 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3888 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3889 pf->sw_int_count++;
3890
3891 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3892 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3893 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3894 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3895 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3896 }
3897
3898
3899 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3900 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3901 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3902
3903
3904
3905
3906
3907
3908
3909 if (!test_bit(__I40E_DOWN, pf->state))
3910 napi_schedule_irqoff(&q_vector->napi);
3911 }
3912
3913 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3914 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3915 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3916 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3917 }
3918
3919 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3920 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3921 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3922 }
3923
3924 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3925 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3926 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3927 }
3928
3929 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3930 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3931 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3932 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3933 val = rd32(hw, I40E_GLGEN_RSTAT);
3934 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3935 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3936 if (val == I40E_RESET_CORER) {
3937 pf->corer_count++;
3938 } else if (val == I40E_RESET_GLOBR) {
3939 pf->globr_count++;
3940 } else if (val == I40E_RESET_EMPR) {
3941 pf->empr_count++;
3942 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3943 }
3944 }
3945
3946 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3947 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3948 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3949 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3950 rd32(hw, I40E_PFHMC_ERRORINFO),
3951 rd32(hw, I40E_PFHMC_ERRORDATA));
3952 }
3953
3954 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3955 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3956
3957 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3958 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3959 i40e_ptp_tx_hwtstamp(pf);
3960 }
3961 }
3962
3963
3964
3965
3966
3967 icr0_remaining = icr0 & ena_mask;
3968 if (icr0_remaining) {
3969 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3970 icr0_remaining);
3971 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3972 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3973 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3974 dev_info(&pf->pdev->dev, "device will be reset\n");
3975 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3976 i40e_service_event_schedule(pf);
3977 }
3978 ena_mask &= ~icr0_remaining;
3979 }
3980 ret = IRQ_HANDLED;
3981
3982enable_intr:
3983
3984 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3985 if (!test_bit(__I40E_DOWN, pf->state)) {
3986 i40e_service_event_schedule(pf);
3987 i40e_irq_dynamic_enable_icr0(pf);
3988 }
3989
3990 return ret;
3991}
3992
3993
3994
3995
3996
3997
3998
3999
4000static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4001{
4002 struct i40e_vsi *vsi = tx_ring->vsi;
4003 u16 i = tx_ring->next_to_clean;
4004 struct i40e_tx_buffer *tx_buf;
4005 struct i40e_tx_desc *tx_desc;
4006
4007 tx_buf = &tx_ring->tx_bi[i];
4008 tx_desc = I40E_TX_DESC(tx_ring, i);
4009 i -= tx_ring->count;
4010
4011 do {
4012 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4013
4014
4015 if (!eop_desc)
4016 break;
4017
4018
4019 smp_rmb();
4020
4021
4022 if (!(eop_desc->cmd_type_offset_bsz &
4023 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4024 break;
4025
4026
4027 tx_buf->next_to_watch = NULL;
4028
4029 tx_desc->buffer_addr = 0;
4030 tx_desc->cmd_type_offset_bsz = 0;
4031
4032 tx_buf++;
4033 tx_desc++;
4034 i++;
4035 if (unlikely(!i)) {
4036 i -= tx_ring->count;
4037 tx_buf = tx_ring->tx_bi;
4038 tx_desc = I40E_TX_DESC(tx_ring, 0);
4039 }
4040
4041 dma_unmap_single(tx_ring->dev,
4042 dma_unmap_addr(tx_buf, dma),
4043 dma_unmap_len(tx_buf, len),
4044 DMA_TO_DEVICE);
4045 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4046 kfree(tx_buf->raw_buf);
4047
4048 tx_buf->raw_buf = NULL;
4049 tx_buf->tx_flags = 0;
4050 tx_buf->next_to_watch = NULL;
4051 dma_unmap_len_set(tx_buf, len, 0);
4052 tx_desc->buffer_addr = 0;
4053 tx_desc->cmd_type_offset_bsz = 0;
4054
4055
4056 tx_buf++;
4057 tx_desc++;
4058 i++;
4059 if (unlikely(!i)) {
4060 i -= tx_ring->count;
4061 tx_buf = tx_ring->tx_bi;
4062 tx_desc = I40E_TX_DESC(tx_ring, 0);
4063 }
4064
4065
4066 budget--;
4067 } while (likely(budget));
4068
4069 i += tx_ring->count;
4070 tx_ring->next_to_clean = i;
4071
4072 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4073 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4074
4075 return budget > 0;
4076}
4077
4078
4079
4080
4081
4082
4083static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4084{
4085 struct i40e_q_vector *q_vector = data;
4086 struct i40e_vsi *vsi;
4087
4088 if (!q_vector->tx.ring)
4089 return IRQ_HANDLED;
4090
4091 vsi = q_vector->tx.ring->vsi;
4092 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4093
4094 return IRQ_HANDLED;
4095}
4096
4097
4098
4099
4100
4101
4102
4103static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4104{
4105 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4106 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4107 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4108
4109 tx_ring->q_vector = q_vector;
4110 tx_ring->next = q_vector->tx.ring;
4111 q_vector->tx.ring = tx_ring;
4112 q_vector->tx.count++;
4113
4114
4115 if (i40e_enabled_xdp_vsi(vsi)) {
4116 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4117
4118 xdp_ring->q_vector = q_vector;
4119 xdp_ring->next = q_vector->tx.ring;
4120 q_vector->tx.ring = xdp_ring;
4121 q_vector->tx.count++;
4122 }
4123
4124 rx_ring->q_vector = q_vector;
4125 rx_ring->next = q_vector->rx.ring;
4126 q_vector->rx.ring = rx_ring;
4127 q_vector->rx.count++;
4128}
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4140{
4141 int qp_remaining = vsi->num_queue_pairs;
4142 int q_vectors = vsi->num_q_vectors;
4143 int num_ringpairs;
4144 int v_start = 0;
4145 int qp_idx = 0;
4146
4147
4148
4149
4150
4151
4152
4153
4154 for (; v_start < q_vectors; v_start++) {
4155 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4156
4157 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4158
4159 q_vector->num_ringpairs = num_ringpairs;
4160 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4161
4162 q_vector->rx.count = 0;
4163 q_vector->tx.count = 0;
4164 q_vector->rx.ring = NULL;
4165 q_vector->tx.ring = NULL;
4166
4167 while (num_ringpairs--) {
4168 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4169 qp_idx++;
4170 qp_remaining--;
4171 }
4172 }
4173}
4174
4175
4176
4177
4178
4179
4180static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4181{
4182 struct i40e_pf *pf = vsi->back;
4183 int err;
4184
4185 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4186 err = i40e_vsi_request_irq_msix(vsi, basename);
4187 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4188 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4189 pf->int_name, pf);
4190 else
4191 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4192 pf->int_name, pf);
4193
4194 if (err)
4195 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4196
4197 return err;
4198}
4199
4200#ifdef CONFIG_NET_POLL_CONTROLLER
4201
4202
4203
4204
4205
4206
4207
4208static void i40e_netpoll(struct net_device *netdev)
4209{
4210 struct i40e_netdev_priv *np = netdev_priv(netdev);
4211 struct i40e_vsi *vsi = np->vsi;
4212 struct i40e_pf *pf = vsi->back;
4213 int i;
4214
4215
4216 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4217 return;
4218
4219 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4220 for (i = 0; i < vsi->num_q_vectors; i++)
4221 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4222 } else {
4223 i40e_intr(pf->pdev->irq, netdev);
4224 }
4225}
4226#endif
4227
4228#define I40E_QTX_ENA_WAIT_COUNT 50
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4242{
4243 int i;
4244 u32 tx_reg;
4245
4246 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4247 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4248 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4249 break;
4250
4251 usleep_range(10, 20);
4252 }
4253 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4254 return -ETIMEDOUT;
4255
4256 return 0;
4257}
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4270{
4271 struct i40e_hw *hw = &pf->hw;
4272 u32 tx_reg;
4273 int i;
4274
4275
4276 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4277 if (!enable)
4278 usleep_range(10, 20);
4279
4280 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4281 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4282 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4283 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4284 break;
4285 usleep_range(1000, 2000);
4286 }
4287
4288
4289 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4290 return;
4291
4292
4293 if (enable) {
4294 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4295 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4296 } else {
4297 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4298 }
4299
4300 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4301}
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4312 bool is_xdp, bool enable)
4313{
4314 int ret;
4315
4316 i40e_control_tx_q(pf, pf_q, enable);
4317
4318
4319 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4320 if (ret) {
4321 dev_info(&pf->pdev->dev,
4322 "VSI seid %d %sTx ring %d %sable timeout\n",
4323 seid, (is_xdp ? "XDP " : ""), pf_q,
4324 (enable ? "en" : "dis"));
4325 }
4326
4327 return ret;
4328}
4329
4330
4331
4332
4333
4334
4335static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4336{
4337 struct i40e_pf *pf = vsi->back;
4338 int i, pf_q, ret = 0;
4339
4340 pf_q = vsi->base_queue;
4341 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4342 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4343 pf_q,
4344 false , enable);
4345 if (ret)
4346 break;
4347
4348 if (!i40e_enabled_xdp_vsi(vsi))
4349 continue;
4350
4351 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4352 pf_q + vsi->alloc_queue_pairs,
4353 true , enable);
4354 if (ret)
4355 break;
4356 }
4357 return ret;
4358}
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4372{
4373 int i;
4374 u32 rx_reg;
4375
4376 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4377 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4378 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4379 break;
4380
4381 usleep_range(10, 20);
4382 }
4383 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4384 return -ETIMEDOUT;
4385
4386 return 0;
4387}
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4400{
4401 struct i40e_hw *hw = &pf->hw;
4402 u32 rx_reg;
4403 int i;
4404
4405 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4406 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4407 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4408 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4409 break;
4410 usleep_range(1000, 2000);
4411 }
4412
4413
4414 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4415 return;
4416
4417
4418 if (enable)
4419 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4420 else
4421 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4422
4423 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4424}
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4437{
4438 int ret = 0;
4439
4440 i40e_control_rx_q(pf, pf_q, enable);
4441
4442
4443 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4444 if (ret)
4445 return ret;
4446
4447 return ret;
4448}
4449
4450
4451
4452
4453
4454
4455static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4456{
4457 struct i40e_pf *pf = vsi->back;
4458 int i, pf_q, ret = 0;
4459
4460 pf_q = vsi->base_queue;
4461 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4462 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4463 if (ret) {
4464 dev_info(&pf->pdev->dev,
4465 "VSI seid %d Rx ring %d %sable timeout\n",
4466 vsi->seid, pf_q, (enable ? "en" : "dis"));
4467 break;
4468 }
4469 }
4470
4471
4472
4473
4474 if (!enable)
4475 mdelay(50);
4476
4477 return ret;
4478}
4479
4480
4481
4482
4483
4484int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4485{
4486 int ret = 0;
4487
4488
4489 ret = i40e_vsi_control_rx(vsi, true);
4490 if (ret)
4491 return ret;
4492 ret = i40e_vsi_control_tx(vsi, true);
4493
4494 return ret;
4495}
4496
4497
4498
4499
4500
4501void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4502{
4503
4504 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4505 return i40e_vsi_stop_rings_no_wait(vsi);
4506
4507
4508
4509
4510 i40e_vsi_control_tx(vsi, false);
4511 i40e_vsi_control_rx(vsi, false);
4512}
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4526{
4527 struct i40e_pf *pf = vsi->back;
4528 int i, pf_q;
4529
4530 pf_q = vsi->base_queue;
4531 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4532 i40e_control_tx_q(pf, pf_q, false);
4533 i40e_control_rx_q(pf, pf_q, false);
4534 }
4535}
4536
4537
4538
4539
4540
4541static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4542{
4543 struct i40e_pf *pf = vsi->back;
4544 struct i40e_hw *hw = &pf->hw;
4545 int base = vsi->base_vector;
4546 u32 val, qp;
4547 int i;
4548
4549 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4550 if (!vsi->q_vectors)
4551 return;
4552
4553 if (!vsi->irqs_ready)
4554 return;
4555
4556 vsi->irqs_ready = false;
4557 for (i = 0; i < vsi->num_q_vectors; i++) {
4558 int irq_num;
4559 u16 vector;
4560
4561 vector = i + base;
4562 irq_num = pf->msix_entries[vector].vector;
4563
4564
4565 if (!vsi->q_vectors[i] ||
4566 !vsi->q_vectors[i]->num_ringpairs)
4567 continue;
4568
4569
4570 irq_set_affinity_notifier(irq_num, NULL);
4571
4572 irq_set_affinity_hint(irq_num, NULL);
4573 synchronize_irq(irq_num);
4574 free_irq(irq_num, vsi->q_vectors[i]);
4575
4576
4577
4578
4579
4580
4581
4582
4583 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4584 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4585 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4586 val |= I40E_QUEUE_END_OF_LIST
4587 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4588 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4589
4590 while (qp != I40E_QUEUE_END_OF_LIST) {
4591 u32 next;
4592
4593 val = rd32(hw, I40E_QINT_RQCTL(qp));
4594
4595 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4596 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4597 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4598 I40E_QINT_RQCTL_INTEVENT_MASK);
4599
4600 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4601 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4602
4603 wr32(hw, I40E_QINT_RQCTL(qp), val);
4604
4605 val = rd32(hw, I40E_QINT_TQCTL(qp));
4606
4607 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4608 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4609
4610 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4611 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4612 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4613 I40E_QINT_TQCTL_INTEVENT_MASK);
4614
4615 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4616 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4617
4618 wr32(hw, I40E_QINT_TQCTL(qp), val);
4619 qp = next;
4620 }
4621 }
4622 } else {
4623 free_irq(pf->pdev->irq, pf);
4624
4625 val = rd32(hw, I40E_PFINT_LNKLST0);
4626 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4627 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4628 val |= I40E_QUEUE_END_OF_LIST
4629 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4630 wr32(hw, I40E_PFINT_LNKLST0, val);
4631
4632 val = rd32(hw, I40E_QINT_RQCTL(qp));
4633 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4634 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4635 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4636 I40E_QINT_RQCTL_INTEVENT_MASK);
4637
4638 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4639 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4640
4641 wr32(hw, I40E_QINT_RQCTL(qp), val);
4642
4643 val = rd32(hw, I40E_QINT_TQCTL(qp));
4644
4645 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4646 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4647 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4648 I40E_QINT_TQCTL_INTEVENT_MASK);
4649
4650 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4651 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4652
4653 wr32(hw, I40E_QINT_TQCTL(qp), val);
4654 }
4655}
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4667{
4668 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4669 struct i40e_ring *ring;
4670
4671 if (!q_vector)
4672 return;
4673
4674
4675 i40e_for_each_ring(ring, q_vector->tx)
4676 ring->q_vector = NULL;
4677
4678 i40e_for_each_ring(ring, q_vector->rx)
4679 ring->q_vector = NULL;
4680
4681
4682 if (vsi->netdev)
4683 netif_napi_del(&q_vector->napi);
4684
4685 vsi->q_vectors[v_idx] = NULL;
4686
4687 kfree_rcu(q_vector, rcu);
4688}
4689
4690
4691
4692
4693
4694
4695
4696
4697static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4698{
4699 int v_idx;
4700
4701 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4702 i40e_free_q_vector(vsi, v_idx);
4703}
4704
4705
4706
4707
4708
4709static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4710{
4711
4712 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4713 pci_disable_msix(pf->pdev);
4714 kfree(pf->msix_entries);
4715 pf->msix_entries = NULL;
4716 kfree(pf->irq_pile);
4717 pf->irq_pile = NULL;
4718 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4719 pci_disable_msi(pf->pdev);
4720 }
4721 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4722}
4723
4724
4725
4726
4727
4728
4729
4730
4731static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4732{
4733 int i;
4734
4735 i40e_free_misc_vector(pf);
4736
4737 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4738 I40E_IWARP_IRQ_PILE_ID);
4739
4740 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4741 for (i = 0; i < pf->num_alloc_vsi; i++)
4742 if (pf->vsi[i])
4743 i40e_vsi_free_q_vectors(pf->vsi[i]);
4744 i40e_reset_interrupt_capability(pf);
4745}
4746
4747
4748
4749
4750
4751static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4752{
4753 int q_idx;
4754
4755 if (!vsi->netdev)
4756 return;
4757
4758 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4759 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4760
4761 if (q_vector->rx.ring || q_vector->tx.ring)
4762 napi_enable(&q_vector->napi);
4763 }
4764}
4765
4766
4767
4768
4769
4770static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4771{
4772 int q_idx;
4773
4774 if (!vsi->netdev)
4775 return;
4776
4777 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4778 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4779
4780 if (q_vector->rx.ring || q_vector->tx.ring)
4781 napi_disable(&q_vector->napi);
4782 }
4783}
4784
4785
4786
4787
4788
4789static void i40e_vsi_close(struct i40e_vsi *vsi)
4790{
4791 struct i40e_pf *pf = vsi->back;
4792 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4793 i40e_down(vsi);
4794 i40e_vsi_free_irq(vsi);
4795 i40e_vsi_free_tx_resources(vsi);
4796 i40e_vsi_free_rx_resources(vsi);
4797 vsi->current_netdev_flags = 0;
4798 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4799 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4800 set_bit(__I40E_CLIENT_RESET, pf->state);
4801}
4802
4803
4804
4805
4806
4807static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4808{
4809 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4810 return;
4811
4812 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4813 if (vsi->netdev && netif_running(vsi->netdev))
4814 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4815 else
4816 i40e_vsi_close(vsi);
4817}
4818
4819
4820
4821
4822
4823static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4824{
4825 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4826 return;
4827
4828 if (vsi->netdev && netif_running(vsi->netdev))
4829 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4830 else
4831 i40e_vsi_open(vsi);
4832}
4833
4834
4835
4836
4837
4838static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4839{
4840 int v;
4841
4842 for (v = 0; v < pf->num_alloc_vsi; v++) {
4843 if (pf->vsi[v])
4844 i40e_quiesce_vsi(pf->vsi[v]);
4845 }
4846}
4847
4848
4849
4850
4851
4852static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4853{
4854 int v;
4855
4856 for (v = 0; v < pf->num_alloc_vsi; v++) {
4857 if (pf->vsi[v])
4858 i40e_unquiesce_vsi(pf->vsi[v]);
4859 }
4860}
4861
4862
4863
4864
4865
4866
4867
4868int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4869{
4870 struct i40e_pf *pf = vsi->back;
4871 int i, pf_q, ret;
4872
4873 pf_q = vsi->base_queue;
4874 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4875
4876 ret = i40e_pf_txq_wait(pf, pf_q, false);
4877 if (ret) {
4878 dev_info(&pf->pdev->dev,
4879 "VSI seid %d Tx ring %d disable timeout\n",
4880 vsi->seid, pf_q);
4881 return ret;
4882 }
4883
4884 if (!i40e_enabled_xdp_vsi(vsi))
4885 goto wait_rx;
4886
4887
4888 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4889 false);
4890 if (ret) {
4891 dev_info(&pf->pdev->dev,
4892 "VSI seid %d XDP Tx ring %d disable timeout\n",
4893 vsi->seid, pf_q);
4894 return ret;
4895 }
4896wait_rx:
4897
4898 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4899 if (ret) {
4900 dev_info(&pf->pdev->dev,
4901 "VSI seid %d Rx ring %d disable timeout\n",
4902 vsi->seid, pf_q);
4903 return ret;
4904 }
4905 }
4906
4907 return 0;
4908}
4909
4910#ifdef CONFIG_I40E_DCB
4911
4912
4913
4914
4915
4916
4917
4918static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4919{
4920 int v, ret = 0;
4921
4922 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4923 if (pf->vsi[v]) {
4924 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4925 if (ret)
4926 break;
4927 }
4928 }
4929
4930 return ret;
4931}
4932
4933#endif
4934
4935
4936
4937
4938
4939
4940
4941
4942static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4943{
4944 struct i40e_dcb_app_priority_table app;
4945 struct i40e_hw *hw = &pf->hw;
4946 u8 enabled_tc = 1;
4947 u8 tc, i;
4948
4949 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4950
4951 for (i = 0; i < dcbcfg->numapps; i++) {
4952 app = dcbcfg->app[i];
4953 if (app.selector == I40E_APP_SEL_TCPIP &&
4954 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4955 tc = dcbcfg->etscfg.prioritytable[app.priority];
4956 enabled_tc |= BIT(tc);
4957 break;
4958 }
4959 }
4960
4961 return enabled_tc;
4962}
4963
4964
4965
4966
4967
4968
4969
4970static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4971{
4972 int i, tc_unused = 0;
4973 u8 num_tc = 0;
4974 u8 ret = 0;
4975
4976
4977
4978
4979
4980 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4981 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4982
4983
4984
4985
4986 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4987 if (num_tc & BIT(i)) {
4988 if (!tc_unused) {
4989 ret++;
4990 } else {
4991 pr_err("Non-contiguous TC - Disabling DCB\n");
4992 return 1;
4993 }
4994 } else {
4995 tc_unused = 1;
4996 }
4997 }
4998
4999
5000 if (!ret)
5001 ret = 1;
5002
5003 return ret;
5004}
5005
5006
5007
5008
5009
5010
5011
5012
5013static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5014{
5015 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5016 u8 enabled_tc = 1;
5017 u8 i;
5018
5019 for (i = 0; i < num_tc; i++)
5020 enabled_tc |= BIT(i);
5021
5022 return enabled_tc;
5023}
5024
5025
5026
5027
5028
5029
5030
5031
5032static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5033{
5034 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5035 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5036 u8 enabled_tc = 1, i;
5037
5038 for (i = 1; i < num_tc; i++)
5039 enabled_tc |= BIT(i);
5040 return enabled_tc;
5041}
5042
5043
5044
5045
5046
5047
5048
5049static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5050{
5051 struct i40e_hw *hw = &pf->hw;
5052 u8 i, enabled_tc = 1;
5053 u8 num_tc = 0;
5054 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5055
5056 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5057 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5058
5059
5060 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5061 return 1;
5062
5063
5064 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5065 return i40e_dcb_get_num_tc(dcbcfg);
5066
5067
5068 if (pf->hw.func_caps.iscsi)
5069 enabled_tc = i40e_get_iscsi_tc_map(pf);
5070 else
5071 return 1;
5072
5073 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5074 if (enabled_tc & BIT(i))
5075 num_tc++;
5076 }
5077 return num_tc;
5078}
5079
5080
5081
5082
5083
5084
5085
5086static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5087{
5088 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5089 return i40e_mqprio_get_enabled_tc(pf);
5090
5091
5092
5093
5094 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5095 return I40E_DEFAULT_TRAFFIC_CLASS;
5096
5097
5098 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5099 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5100
5101
5102 if (pf->hw.func_caps.iscsi)
5103 return i40e_get_iscsi_tc_map(pf);
5104 else
5105 return I40E_DEFAULT_TRAFFIC_CLASS;
5106}
5107
5108
5109
5110
5111
5112
5113
5114static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5115{
5116 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5117 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5118 struct i40e_pf *pf = vsi->back;
5119 struct i40e_hw *hw = &pf->hw;
5120 i40e_status ret;
5121 u32 tc_bw_max;
5122 int i;
5123
5124
5125 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5126 if (ret) {
5127 dev_info(&pf->pdev->dev,
5128 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5129 i40e_stat_str(&pf->hw, ret),
5130 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5131 return -EINVAL;
5132 }
5133
5134
5135 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5136 NULL);
5137 if (ret) {
5138 dev_info(&pf->pdev->dev,
5139 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5140 i40e_stat_str(&pf->hw, ret),
5141 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5142 return -EINVAL;
5143 }
5144
5145 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5146 dev_info(&pf->pdev->dev,
5147 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5148 bw_config.tc_valid_bits,
5149 bw_ets_config.tc_valid_bits);
5150
5151 }
5152
5153 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5154 vsi->bw_max_quanta = bw_config.max_bw;
5155 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5156 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5157 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5158 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5159 vsi->bw_ets_limit_credits[i] =
5160 le16_to_cpu(bw_ets_config.credits[i]);
5161
5162 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5163 }
5164
5165 return 0;
5166}
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5177 u8 *bw_share)
5178{
5179 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5180 struct i40e_pf *pf = vsi->back;
5181 i40e_status ret;
5182 int i;
5183
5184
5185 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5186 return 0;
5187 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5188 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5189 if (ret)
5190 dev_info(&pf->pdev->dev,
5191 "Failed to reset tx rate for vsi->seid %u\n",
5192 vsi->seid);
5193 return ret;
5194 }
5195 bw_data.tc_valid_bits = enabled_tc;
5196 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5197 bw_data.tc_bw_credits[i] = bw_share[i];
5198
5199 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5200 if (ret) {
5201 dev_info(&pf->pdev->dev,
5202 "AQ command Config VSI BW allocation per TC failed = %d\n",
5203 pf->hw.aq.asq_last_status);
5204 return -EINVAL;
5205 }
5206
5207 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5208 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5209
5210 return 0;
5211}
5212
5213
5214
5215
5216
5217
5218
5219static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5220{
5221 struct net_device *netdev = vsi->netdev;
5222 struct i40e_pf *pf = vsi->back;
5223 struct i40e_hw *hw = &pf->hw;
5224 u8 netdev_tc = 0;
5225 int i;
5226 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5227
5228 if (!netdev)
5229 return;
5230
5231 if (!enabled_tc) {
5232 netdev_reset_tc(netdev);
5233 return;
5234 }
5235
5236
5237 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5238 return;
5239
5240
5241 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5242
5243
5244
5245
5246
5247
5248
5249 if (vsi->tc_config.enabled_tc & BIT(i))
5250 netdev_set_tc_queue(netdev,
5251 vsi->tc_config.tc_info[i].netdev_tc,
5252 vsi->tc_config.tc_info[i].qcount,
5253 vsi->tc_config.tc_info[i].qoffset);
5254 }
5255
5256 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5257 return;
5258
5259
5260 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5261
5262 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5263
5264 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5265 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5266 }
5267}
5268
5269
5270
5271
5272
5273
5274static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5275 struct i40e_vsi_context *ctxt)
5276{
5277
5278
5279
5280
5281 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5282 memcpy(&vsi->info.queue_mapping,
5283 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5284 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5285 sizeof(vsi->info.tc_mapping));
5286}
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5302{
5303 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5304 struct i40e_pf *pf = vsi->back;
5305 struct i40e_hw *hw = &pf->hw;
5306 struct i40e_vsi_context ctxt;
5307 int ret = 0;
5308 int i;
5309
5310
5311 if (vsi->tc_config.enabled_tc == enabled_tc &&
5312 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5313 return ret;
5314
5315
5316 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5317 if (enabled_tc & BIT(i))
5318 bw_share[i] = 1;
5319 }
5320
5321 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5322 if (ret) {
5323 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5324
5325 dev_info(&pf->pdev->dev,
5326 "Failed configuring TC map %d for VSI %d\n",
5327 enabled_tc, vsi->seid);
5328 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5329 &bw_config, NULL);
5330 if (ret) {
5331 dev_info(&pf->pdev->dev,
5332 "Failed querying vsi bw info, err %s aq_err %s\n",
5333 i40e_stat_str(hw, ret),
5334 i40e_aq_str(hw, hw->aq.asq_last_status));
5335 goto out;
5336 }
5337 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5338 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5339
5340 if (!valid_tc)
5341 valid_tc = bw_config.tc_valid_bits;
5342
5343 valid_tc |= 1;
5344 dev_info(&pf->pdev->dev,
5345 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5346 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5347 enabled_tc = valid_tc;
5348 }
5349
5350 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5351 if (ret) {
5352 dev_err(&pf->pdev->dev,
5353 "Unable to configure TC map %d for VSI %d\n",
5354 enabled_tc, vsi->seid);
5355 goto out;
5356 }
5357 }
5358
5359
5360 ctxt.seid = vsi->seid;
5361 ctxt.pf_num = vsi->back->hw.pf_id;
5362 ctxt.vf_num = 0;
5363 ctxt.uplink_seid = vsi->uplink_seid;
5364 ctxt.info = vsi->info;
5365 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5366 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5367 if (ret)
5368 goto out;
5369 } else {
5370 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5371 }
5372
5373
5374
5375
5376 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5377 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5378 vsi->num_queue_pairs);
5379 ret = i40e_vsi_config_rss(vsi);
5380 if (ret) {
5381 dev_info(&vsi->back->pdev->dev,
5382 "Failed to reconfig rss for num_queues\n");
5383 return ret;
5384 }
5385 vsi->reconfig_rss = false;
5386 }
5387 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5388 ctxt.info.valid_sections |=
5389 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5390 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5391 }
5392
5393
5394
5395
5396 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5397 if (ret) {
5398 dev_info(&pf->pdev->dev,
5399 "Update vsi tc config failed, err %s aq_err %s\n",
5400 i40e_stat_str(hw, ret),
5401 i40e_aq_str(hw, hw->aq.asq_last_status));
5402 goto out;
5403 }
5404
5405 i40e_vsi_update_queue_map(vsi, &ctxt);
5406 vsi->info.valid_sections = 0;
5407
5408
5409 ret = i40e_vsi_get_bw_info(vsi);
5410 if (ret) {
5411 dev_info(&pf->pdev->dev,
5412 "Failed updating vsi bw info, err %s aq_err %s\n",
5413 i40e_stat_str(hw, ret),
5414 i40e_aq_str(hw, hw->aq.asq_last_status));
5415 goto out;
5416 }
5417
5418
5419 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5420out:
5421 return ret;
5422}
5423
5424
5425
5426
5427
5428
5429static int i40e_get_link_speed(struct i40e_vsi *vsi)
5430{
5431 struct i40e_pf *pf = vsi->back;
5432
5433 switch (pf->hw.phy.link_info.link_speed) {
5434 case I40E_LINK_SPEED_40GB:
5435 return 40000;
5436 case I40E_LINK_SPEED_25GB:
5437 return 25000;
5438 case I40E_LINK_SPEED_20GB:
5439 return 20000;
5440 case I40E_LINK_SPEED_10GB:
5441 return 10000;
5442 case I40E_LINK_SPEED_1GB:
5443 return 1000;
5444 default:
5445 return -EINVAL;
5446 }
5447}
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5458{
5459 struct i40e_pf *pf = vsi->back;
5460 u64 credits = 0;
5461 int speed = 0;
5462 int ret = 0;
5463
5464 speed = i40e_get_link_speed(vsi);
5465 if (max_tx_rate > speed) {
5466 dev_err(&pf->pdev->dev,
5467 "Invalid max tx rate %llu specified for VSI seid %d.",
5468 max_tx_rate, seid);
5469 return -EINVAL;
5470 }
5471 if (max_tx_rate && max_tx_rate < 50) {
5472 dev_warn(&pf->pdev->dev,
5473 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5474 max_tx_rate = 50;
5475 }
5476
5477
5478 credits = max_tx_rate;
5479 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5480 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5481 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5482 if (ret)
5483 dev_err(&pf->pdev->dev,
5484 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5485 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5486 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5487 return ret;
5488}
5489
5490
5491
5492
5493
5494
5495
5496static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5497{
5498 enum i40e_admin_queue_err last_aq_status;
5499 struct i40e_cloud_filter *cfilter;
5500 struct i40e_channel *ch, *ch_tmp;
5501 struct i40e_pf *pf = vsi->back;
5502 struct hlist_node *node;
5503 int ret, i;
5504
5505
5506
5507
5508 vsi->current_rss_size = 0;
5509
5510
5511 if (list_empty(&vsi->ch_list))
5512 return;
5513
5514 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5515 struct i40e_vsi *p_vsi;
5516
5517 list_del(&ch->list);
5518 p_vsi = ch->parent_vsi;
5519 if (!p_vsi || !ch->initialized) {
5520 kfree(ch);
5521 continue;
5522 }
5523
5524 for (i = 0; i < ch->num_queue_pairs; i++) {
5525 struct i40e_ring *tx_ring, *rx_ring;
5526 u16 pf_q;
5527
5528 pf_q = ch->base_queue + i;
5529 tx_ring = vsi->tx_rings[pf_q];
5530 tx_ring->ch = NULL;
5531
5532 rx_ring = vsi->rx_rings[pf_q];
5533 rx_ring->ch = NULL;
5534 }
5535
5536
5537 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5538 if (ret)
5539 dev_info(&vsi->back->pdev->dev,
5540 "Failed to reset tx rate for ch->seid %u\n",
5541 ch->seid);
5542
5543
5544 hlist_for_each_entry_safe(cfilter, node,
5545 &pf->cloud_filter_list, cloud_node) {
5546 if (cfilter->seid != ch->seid)
5547 continue;
5548
5549 hash_del(&cfilter->cloud_node);
5550 if (cfilter->dst_port)
5551 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5552 cfilter,
5553 false);
5554 else
5555 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5556 false);
5557 last_aq_status = pf->hw.aq.asq_last_status;
5558 if (ret)
5559 dev_info(&pf->pdev->dev,
5560 "Failed to delete cloud filter, err %s aq_err %s\n",
5561 i40e_stat_str(&pf->hw, ret),
5562 i40e_aq_str(&pf->hw, last_aq_status));
5563 kfree(cfilter);
5564 }
5565
5566
5567 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5568 NULL);
5569 if (ret)
5570 dev_err(&vsi->back->pdev->dev,
5571 "unable to remove channel (%d) for parent VSI(%d)\n",
5572 ch->seid, p_vsi->seid);
5573 kfree(ch);
5574 }
5575 INIT_LIST_HEAD(&vsi->ch_list);
5576}
5577
5578
5579
5580
5581
5582
5583
5584static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5585{
5586 struct i40e_channel *ch, *ch_tmp;
5587
5588 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5589 if (ch->initialized)
5590 return true;
5591 }
5592
5593 return false;
5594}
5595
5596
5597
5598
5599
5600
5601
5602
5603static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5604{
5605 struct i40e_channel *ch, *ch_tmp;
5606 int max = 0;
5607
5608 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5609 if (!ch->initialized)
5610 continue;
5611 if (ch->num_queue_pairs > max)
5612 max = ch->num_queue_pairs;
5613 }
5614
5615 return max;
5616}
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5630 struct i40e_vsi *vsi, bool *reconfig_rss)
5631{
5632 int max_ch_queues;
5633
5634 if (!reconfig_rss)
5635 return -EINVAL;
5636
5637 *reconfig_rss = false;
5638 if (vsi->current_rss_size) {
5639 if (num_queues > vsi->current_rss_size) {
5640 dev_dbg(&pf->pdev->dev,
5641 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5642 num_queues, vsi->current_rss_size);
5643 return -EINVAL;
5644 } else if ((num_queues < vsi->current_rss_size) &&
5645 (!is_power_of_2(num_queues))) {
5646 dev_dbg(&pf->pdev->dev,
5647 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5648 num_queues, vsi->current_rss_size);
5649 return -EINVAL;
5650 }
5651 }
5652
5653 if (!is_power_of_2(num_queues)) {
5654
5655
5656
5657
5658
5659 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5660 if (num_queues < max_ch_queues) {
5661 dev_dbg(&pf->pdev->dev,
5662 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5663 num_queues, max_ch_queues);
5664 return -EINVAL;
5665 }
5666 *reconfig_rss = true;
5667 }
5668
5669 return 0;
5670}
5671
5672
5673
5674
5675
5676
5677
5678
5679static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5680{
5681 struct i40e_pf *pf = vsi->back;
5682 u8 seed[I40E_HKEY_ARRAY_SIZE];
5683 struct i40e_hw *hw = &pf->hw;
5684 int local_rss_size;
5685 u8 *lut;
5686 int ret;
5687
5688 if (!vsi->rss_size)
5689 return -EINVAL;
5690
5691 if (rss_size > vsi->rss_size)
5692 return -EINVAL;
5693
5694 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5695 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5696 if (!lut)
5697 return -ENOMEM;
5698
5699
5700 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5701
5702
5703
5704
5705 if (vsi->rss_hkey_user)
5706 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5707 else
5708 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5709
5710 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5711 if (ret) {
5712 dev_info(&pf->pdev->dev,
5713 "Cannot set RSS lut, err %s aq_err %s\n",
5714 i40e_stat_str(hw, ret),
5715 i40e_aq_str(hw, hw->aq.asq_last_status));
5716 kfree(lut);
5717 return ret;
5718 }
5719 kfree(lut);
5720
5721
5722 if (!vsi->orig_rss_size)
5723 vsi->orig_rss_size = vsi->rss_size;
5724 vsi->current_rss_size = local_rss_size;
5725
5726 return ret;
5727}
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5739 struct i40e_vsi_context *ctxt,
5740 struct i40e_channel *ch)
5741{
5742 u16 qcount, qmap, sections = 0;
5743 u8 offset = 0;
5744 int pow;
5745
5746 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5747 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5748
5749 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5750 ch->num_queue_pairs = qcount;
5751
5752
5753 pow = ilog2(qcount);
5754 if (!is_power_of_2(qcount))
5755 pow++;
5756
5757 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5758 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5759
5760
5761 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5762
5763 ctxt->info.up_enable_bits = 0x1;
5764 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5765 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5766 ctxt->info.valid_sections |= cpu_to_le16(sections);
5767}
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5778 struct i40e_channel *ch)
5779{
5780 struct i40e_hw *hw = &pf->hw;
5781 struct i40e_vsi_context ctxt;
5782 u8 enabled_tc = 0x1;
5783 int ret;
5784
5785 if (ch->type != I40E_VSI_VMDQ2) {
5786 dev_info(&pf->pdev->dev,
5787 "add new vsi failed, ch->type %d\n", ch->type);
5788 return -EINVAL;
5789 }
5790
5791 memset(&ctxt, 0, sizeof(ctxt));
5792 ctxt.pf_num = hw->pf_id;
5793 ctxt.vf_num = 0;
5794 ctxt.uplink_seid = uplink_seid;
5795 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5796 if (ch->type == I40E_VSI_VMDQ2)
5797 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5798
5799 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5800 ctxt.info.valid_sections |=
5801 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5802 ctxt.info.switch_id =
5803 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5804 }
5805
5806
5807 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5808
5809
5810 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5811 if (ret) {
5812 dev_info(&pf->pdev->dev,
5813 "add new vsi failed, err %s aq_err %s\n",
5814 i40e_stat_str(&pf->hw, ret),
5815 i40e_aq_str(&pf->hw,
5816 pf->hw.aq.asq_last_status));
5817 return -ENOENT;
5818 }
5819
5820
5821 ch->enabled_tc = enabled_tc;
5822 ch->seid = ctxt.seid;
5823 ch->vsi_number = ctxt.vsi_number;
5824 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5825
5826
5827
5828
5829
5830 ch->info.mapping_flags = ctxt.info.mapping_flags;
5831 memcpy(&ch->info.queue_mapping,
5832 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5833 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5834 sizeof(ctxt.info.tc_mapping));
5835
5836 return 0;
5837}
5838
5839static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5840 u8 *bw_share)
5841{
5842 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5843 i40e_status ret;
5844 int i;
5845
5846 bw_data.tc_valid_bits = ch->enabled_tc;
5847 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5848 bw_data.tc_bw_credits[i] = bw_share[i];
5849
5850 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5851 &bw_data, NULL);
5852 if (ret) {
5853 dev_info(&vsi->back->pdev->dev,
5854 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5855 vsi->back->hw.aq.asq_last_status, ch->seid);
5856 return -EINVAL;
5857 }
5858
5859 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5860 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5861
5862 return 0;
5863}
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5875 struct i40e_vsi *vsi,
5876 struct i40e_channel *ch)
5877{
5878 i40e_status ret;
5879 int i;
5880 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5881
5882
5883 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5884 if (ch->enabled_tc & BIT(i))
5885 bw_share[i] = 1;
5886 }
5887
5888
5889 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5890 if (ret) {
5891 dev_info(&vsi->back->pdev->dev,
5892 "Failed configuring TC map %d for channel (seid %u)\n",
5893 ch->enabled_tc, ch->seid);
5894 return ret;
5895 }
5896
5897 for (i = 0; i < ch->num_queue_pairs; i++) {
5898 struct i40e_ring *tx_ring, *rx_ring;
5899 u16 pf_q;
5900
5901 pf_q = ch->base_queue + i;
5902
5903
5904
5905
5906 tx_ring = vsi->tx_rings[pf_q];
5907 tx_ring->ch = ch;
5908
5909
5910 rx_ring = vsi->rx_rings[pf_q];
5911 rx_ring->ch = ch;
5912 }
5913
5914 return 0;
5915}
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5929 struct i40e_vsi *vsi,
5930 struct i40e_channel *ch,
5931 u16 uplink_seid, u8 type)
5932{
5933 int ret;
5934
5935 ch->initialized = false;
5936 ch->base_queue = vsi->next_base_queue;
5937 ch->type = type;
5938
5939
5940 ret = i40e_add_channel(pf, uplink_seid, ch);
5941 if (ret) {
5942 dev_info(&pf->pdev->dev,
5943 "failed to add_channel using uplink_seid %u\n",
5944 uplink_seid);
5945 return ret;
5946 }
5947
5948
5949 ch->initialized = true;
5950
5951
5952 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
5953 if (ret) {
5954 dev_info(&pf->pdev->dev,
5955 "failed to configure TX rings for channel %u\n",
5956 ch->seid);
5957 return ret;
5958 }
5959
5960
5961 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
5962 dev_dbg(&pf->pdev->dev,
5963 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5964 ch->seid, ch->vsi_number, ch->stat_counter_idx,
5965 ch->num_queue_pairs,
5966 vsi->next_base_queue);
5967 return ret;
5968}
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
5981 struct i40e_channel *ch)
5982{
5983 u8 vsi_type;
5984 u16 seid;
5985 int ret;
5986
5987 if (vsi->type == I40E_VSI_MAIN) {
5988 vsi_type = I40E_VSI_VMDQ2;
5989 } else {
5990 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
5991 vsi->type);
5992 return false;
5993 }
5994
5995
5996 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5997
5998
5999 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6000 if (ret) {
6001 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6002 return false;
6003 }
6004
6005 return ch->initialized ? true : false;
6006}
6007
6008
6009
6010
6011
6012
6013
6014
6015static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6016{
6017 u8 mode;
6018 struct i40e_pf *pf = vsi->back;
6019 struct i40e_hw *hw = &pf->hw;
6020 int ret;
6021
6022 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6023 if (ret)
6024 return -EINVAL;
6025
6026 if (hw->dev_caps.switch_mode) {
6027
6028
6029
6030 u32 switch_mode = hw->dev_caps.switch_mode &
6031 I40E_SWITCH_MODE_MASK;
6032 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6033 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6034 return 0;
6035 dev_err(&pf->pdev->dev,
6036 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6037 hw->dev_caps.switch_mode);
6038 return -EINVAL;
6039 }
6040 }
6041
6042
6043 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6044
6045
6046 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6047
6048
6049 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6050
6051
6052 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6053 pf->last_sw_conf_valid_flags,
6054 mode, NULL);
6055 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6056 dev_err(&pf->pdev->dev,
6057 "couldn't set switch config bits, err %s aq_err %s\n",
6058 i40e_stat_str(hw, ret),
6059 i40e_aq_str(hw,
6060 hw->aq.asq_last_status));
6061
6062 return ret;
6063}
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073int i40e_create_queue_channel(struct i40e_vsi *vsi,
6074 struct i40e_channel *ch)
6075{
6076 struct i40e_pf *pf = vsi->back;
6077 bool reconfig_rss;
6078 int err;
6079
6080 if (!ch)
6081 return -EINVAL;
6082
6083 if (!ch->num_queue_pairs) {
6084 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6085 ch->num_queue_pairs);
6086 return -EINVAL;
6087 }
6088
6089
6090 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6091 &reconfig_rss);
6092 if (err) {
6093 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6094 ch->num_queue_pairs);
6095 return -EINVAL;
6096 }
6097
6098
6099
6100
6101 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6102 (!i40e_is_any_channel(vsi))) {
6103 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6104 dev_dbg(&pf->pdev->dev,
6105 "Failed to create channel. Override queues (%u) not power of 2\n",
6106 vsi->tc_config.tc_info[0].qcount);
6107 return -EINVAL;
6108 }
6109
6110 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6111 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6112
6113 if (vsi->type == I40E_VSI_MAIN) {
6114 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6115 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6116 true);
6117 else
6118 i40e_do_reset_safe(pf,
6119 I40E_PF_RESET_FLAG);
6120 }
6121 }
6122
6123
6124
6125 }
6126
6127
6128
6129
6130 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6131 dev_dbg(&pf->pdev->dev,
6132 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6133 vsi->cnt_q_avail, ch->num_queue_pairs);
6134 return -EINVAL;
6135 }
6136
6137
6138 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6139 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6140 if (err) {
6141 dev_info(&pf->pdev->dev,
6142 "Error: unable to reconfig rss for num_queues (%u)\n",
6143 ch->num_queue_pairs);
6144 return -EINVAL;
6145 }
6146 }
6147
6148 if (!i40e_setup_channel(pf, vsi, ch)) {
6149 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6150 return -EINVAL;
6151 }
6152
6153 dev_info(&pf->pdev->dev,
6154 "Setup channel (id:%u) utilizing num_queues %d\n",
6155 ch->seid, ch->num_queue_pairs);
6156
6157
6158 if (ch->max_tx_rate) {
6159 u64 credits = ch->max_tx_rate;
6160
6161 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6162 return -EINVAL;
6163
6164 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6165 dev_dbg(&pf->pdev->dev,
6166 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6167 ch->max_tx_rate,
6168 credits,
6169 ch->seid);
6170 }
6171
6172
6173 ch->parent_vsi = vsi;
6174
6175
6176 vsi->cnt_q_avail -= ch->num_queue_pairs;
6177
6178 return 0;
6179}
6180
6181
6182
6183
6184
6185
6186
6187static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6188{
6189 struct i40e_channel *ch;
6190 u64 max_rate = 0;
6191 int ret = 0, i;
6192
6193
6194 vsi->tc_seid_map[0] = vsi->seid;
6195 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6196 if (vsi->tc_config.enabled_tc & BIT(i)) {
6197 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6198 if (!ch) {
6199 ret = -ENOMEM;
6200 goto err_free;
6201 }
6202
6203 INIT_LIST_HEAD(&ch->list);
6204 ch->num_queue_pairs =
6205 vsi->tc_config.tc_info[i].qcount;
6206 ch->base_queue =
6207 vsi->tc_config.tc_info[i].qoffset;
6208
6209
6210
6211
6212 max_rate = vsi->mqprio_qopt.max_rate[i];
6213 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6214 ch->max_tx_rate = max_rate;
6215
6216 list_add_tail(&ch->list, &vsi->ch_list);
6217
6218 ret = i40e_create_queue_channel(vsi, ch);
6219 if (ret) {
6220 dev_err(&vsi->back->pdev->dev,
6221 "Failed creating queue channel with TC%d: queues %d\n",
6222 i, ch->num_queue_pairs);
6223 goto err_free;
6224 }
6225 vsi->tc_seid_map[i] = ch->seid;
6226 }
6227 }
6228 return ret;
6229
6230err_free:
6231 i40e_remove_queue_channels(vsi);
6232 return ret;
6233}
6234
6235
6236
6237
6238
6239
6240
6241
6242int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6243{
6244 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6245 struct i40e_pf *pf = veb->pf;
6246 int ret = 0;
6247 int i;
6248
6249
6250 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6251 return ret;
6252
6253 bw_data.tc_valid_bits = enabled_tc;
6254
6255
6256
6257 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6258 if (enabled_tc & BIT(i))
6259 bw_data.tc_bw_share_credits[i] = 1;
6260 }
6261
6262 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6263 &bw_data, NULL);
6264 if (ret) {
6265 dev_info(&pf->pdev->dev,
6266 "VEB bw config failed, err %s aq_err %s\n",
6267 i40e_stat_str(&pf->hw, ret),
6268 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6269 goto out;
6270 }
6271
6272
6273 ret = i40e_veb_get_bw_info(veb);
6274 if (ret) {
6275 dev_info(&pf->pdev->dev,
6276 "Failed getting veb bw config, err %s aq_err %s\n",
6277 i40e_stat_str(&pf->hw, ret),
6278 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6279 }
6280
6281out:
6282 return ret;
6283}
6284
6285#ifdef CONFIG_I40E_DCB
6286
6287
6288
6289
6290
6291
6292
6293
6294static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6295{
6296 u8 tc_map = 0;
6297 int ret;
6298 u8 v;
6299
6300
6301 tc_map = i40e_pf_get_tc_map(pf);
6302 for (v = 0; v < I40E_MAX_VEB; v++) {
6303 if (!pf->veb[v])
6304 continue;
6305 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6306 if (ret) {
6307 dev_info(&pf->pdev->dev,
6308 "Failed configuring TC for VEB seid=%d\n",
6309 pf->veb[v]->seid);
6310
6311 }
6312 }
6313
6314
6315 for (v = 0; v < pf->num_alloc_vsi; v++) {
6316 if (!pf->vsi[v])
6317 continue;
6318
6319
6320
6321
6322 if (v == pf->lan_vsi)
6323 tc_map = i40e_pf_get_tc_map(pf);
6324 else
6325 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6326
6327 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6328 if (ret) {
6329 dev_info(&pf->pdev->dev,
6330 "Failed configuring TC for VSI seid=%d\n",
6331 pf->vsi[v]->seid);
6332
6333 } else {
6334
6335 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6336 if (pf->vsi[v]->netdev)
6337 i40e_dcbnl_set_all(pf->vsi[v]);
6338 }
6339 }
6340}
6341
6342
6343
6344
6345
6346
6347
6348
6349static int i40e_resume_port_tx(struct i40e_pf *pf)
6350{
6351 struct i40e_hw *hw = &pf->hw;
6352 int ret;
6353
6354 ret = i40e_aq_resume_port_tx(hw, NULL);
6355 if (ret) {
6356 dev_info(&pf->pdev->dev,
6357 "Resume Port Tx failed, err %s aq_err %s\n",
6358 i40e_stat_str(&pf->hw, ret),
6359 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6360
6361 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6362 i40e_service_event_schedule(pf);
6363 }
6364
6365 return ret;
6366}
6367
6368
6369
6370
6371
6372
6373
6374
6375static int i40e_init_pf_dcb(struct i40e_pf *pf)
6376{
6377 struct i40e_hw *hw = &pf->hw;
6378 int err = 0;
6379
6380
6381
6382
6383 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6384 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
6385 goto out;
6386
6387
6388 err = i40e_init_dcb(hw);
6389 if (!err) {
6390
6391 if ((!hw->func_caps.dcb) ||
6392 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6393 dev_info(&pf->pdev->dev,
6394 "DCBX offload is not supported or is disabled for this PF.\n");
6395 } else {
6396
6397 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6398 DCB_CAP_DCBX_VER_IEEE;
6399
6400 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6401
6402
6403
6404 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6405 pf->flags |= I40E_FLAG_DCB_ENABLED;
6406 else
6407 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6408 dev_dbg(&pf->pdev->dev,
6409 "DCBX offload is supported for this PF.\n");
6410 }
6411 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6412 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6413 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6414 } else {
6415 dev_info(&pf->pdev->dev,
6416 "Query for DCB configuration failed, err %s aq_err %s\n",
6417 i40e_stat_str(&pf->hw, err),
6418 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6419 }
6420
6421out:
6422 return err;
6423}
6424#endif
6425#define SPEED_SIZE 14
6426#define FC_SIZE 8
6427
6428
6429
6430
6431
6432void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6433{
6434 enum i40e_aq_link_speed new_speed;
6435 struct i40e_pf *pf = vsi->back;
6436 char *speed = "Unknown";
6437 char *fc = "Unknown";
6438 char *fec = "";
6439 char *req_fec = "";
6440 char *an = "";
6441
6442 if (isup)
6443 new_speed = pf->hw.phy.link_info.link_speed;
6444 else
6445 new_speed = I40E_LINK_SPEED_UNKNOWN;
6446
6447 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6448 return;
6449 vsi->current_isup = isup;
6450 vsi->current_speed = new_speed;
6451 if (!isup) {
6452 netdev_info(vsi->netdev, "NIC Link is Down\n");
6453 return;
6454 }
6455
6456
6457
6458
6459 if (pf->hw.func_caps.npar_enable &&
6460 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6461 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6462 netdev_warn(vsi->netdev,
6463 "The partition detected link speed that is less than 10Gbps\n");
6464
6465 switch (pf->hw.phy.link_info.link_speed) {
6466 case I40E_LINK_SPEED_40GB:
6467 speed = "40 G";
6468 break;
6469 case I40E_LINK_SPEED_20GB:
6470 speed = "20 G";
6471 break;
6472 case I40E_LINK_SPEED_25GB:
6473 speed = "25 G";
6474 break;
6475 case I40E_LINK_SPEED_10GB:
6476 speed = "10 G";
6477 break;
6478 case I40E_LINK_SPEED_1GB:
6479 speed = "1000 M";
6480 break;
6481 case I40E_LINK_SPEED_100MB:
6482 speed = "100 M";
6483 break;
6484 default:
6485 break;
6486 }
6487
6488 switch (pf->hw.fc.current_mode) {
6489 case I40E_FC_FULL:
6490 fc = "RX/TX";
6491 break;
6492 case I40E_FC_TX_PAUSE:
6493 fc = "TX";
6494 break;
6495 case I40E_FC_RX_PAUSE:
6496 fc = "RX";
6497 break;
6498 default:
6499 fc = "None";
6500 break;
6501 }
6502
6503 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6504 req_fec = ", Requested FEC: None";
6505 fec = ", FEC: None";
6506 an = ", Autoneg: False";
6507
6508 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6509 an = ", Autoneg: True";
6510
6511 if (pf->hw.phy.link_info.fec_info &
6512 I40E_AQ_CONFIG_FEC_KR_ENA)
6513 fec = ", FEC: CL74 FC-FEC/BASE-R";
6514 else if (pf->hw.phy.link_info.fec_info &
6515 I40E_AQ_CONFIG_FEC_RS_ENA)
6516 fec = ", FEC: CL108 RS-FEC";
6517
6518
6519
6520
6521 if (vsi->back->hw.phy.link_info.req_fec_info &
6522 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6523 if (vsi->back->hw.phy.link_info.req_fec_info &
6524 I40E_AQ_REQUEST_FEC_RS)
6525 req_fec = ", Requested FEC: CL108 RS-FEC";
6526 else
6527 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6528 }
6529 }
6530
6531 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6532 speed, req_fec, fec, an, fc);
6533}
6534
6535
6536
6537
6538
6539static int i40e_up_complete(struct i40e_vsi *vsi)
6540{
6541 struct i40e_pf *pf = vsi->back;
6542 int err;
6543
6544 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6545 i40e_vsi_configure_msix(vsi);
6546 else
6547 i40e_configure_msi_and_legacy(vsi);
6548
6549
6550 err = i40e_vsi_start_rings(vsi);
6551 if (err)
6552 return err;
6553
6554 clear_bit(__I40E_VSI_DOWN, vsi->state);
6555 i40e_napi_enable_all(vsi);
6556 i40e_vsi_enable_irq(vsi);
6557
6558 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6559 (vsi->netdev)) {
6560 i40e_print_link_message(vsi, true);
6561 netif_tx_start_all_queues(vsi->netdev);
6562 netif_carrier_on(vsi->netdev);
6563 }
6564
6565
6566 if (vsi->type == I40E_VSI_FDIR) {
6567
6568 pf->fd_add_err = 0;
6569 pf->fd_atr_cnt = 0;
6570 i40e_fdir_filter_restore(vsi);
6571 }
6572
6573
6574
6575
6576 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6577 i40e_service_event_schedule(pf);
6578
6579 return 0;
6580}
6581
6582
6583
6584
6585
6586
6587
6588
6589static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6590{
6591 struct i40e_pf *pf = vsi->back;
6592
6593 WARN_ON(in_interrupt());
6594 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6595 usleep_range(1000, 2000);
6596 i40e_down(vsi);
6597
6598 i40e_up(vsi);
6599 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6600}
6601
6602
6603
6604
6605
6606int i40e_up(struct i40e_vsi *vsi)
6607{
6608 int err;
6609
6610 err = i40e_vsi_configure(vsi);
6611 if (!err)
6612 err = i40e_up_complete(vsi);
6613
6614 return err;
6615}
6616
6617
6618
6619
6620
6621
6622static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6623{
6624 struct i40e_aq_get_phy_abilities_resp abilities;
6625 struct i40e_aq_set_phy_config config = {0};
6626 struct i40e_hw *hw = &pf->hw;
6627 i40e_status err;
6628 u64 mask;
6629 u8 speed;
6630
6631
6632
6633
6634
6635
6636
6637 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6638 NULL);
6639 if (err) {
6640 dev_err(&pf->pdev->dev,
6641 "failed to get phy cap., ret = %s last_status = %s\n",
6642 i40e_stat_str(hw, err),
6643 i40e_aq_str(hw, hw->aq.asq_last_status));
6644 return err;
6645 }
6646 speed = abilities.link_speed;
6647
6648
6649 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6650 NULL);
6651 if (err) {
6652 dev_err(&pf->pdev->dev,
6653 "failed to get phy cap., ret = %s last_status = %s\n",
6654 i40e_stat_str(hw, err),
6655 i40e_aq_str(hw, hw->aq.asq_last_status));
6656 return err;
6657 }
6658
6659
6660
6661
6662 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6663 return I40E_SUCCESS;
6664
6665
6666
6667
6668
6669 mask = I40E_PHY_TYPES_BITMASK;
6670 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6671 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6672
6673 config.abilities = abilities.abilities;
6674 if (abilities.link_speed != 0)
6675 config.link_speed = abilities.link_speed;
6676 else
6677 config.link_speed = speed;
6678 config.eee_capability = abilities.eee_capability;
6679 config.eeer = abilities.eeer_val;
6680 config.low_power_ctrl = abilities.d3_lpan;
6681 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6682 I40E_AQ_PHY_FEC_CONFIG_MASK;
6683 err = i40e_aq_set_phy_config(hw, &config, NULL);
6684
6685 if (err) {
6686 dev_err(&pf->pdev->dev,
6687 "set phy config ret = %s last_status = %s\n",
6688 i40e_stat_str(&pf->hw, err),
6689 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6690 return err;
6691 }
6692
6693
6694 err = i40e_update_link_info(hw);
6695 if (err) {
6696
6697
6698
6699
6700 msleep(1000);
6701 i40e_update_link_info(hw);
6702 }
6703
6704 i40e_aq_set_link_restart_an(hw, true, NULL);
6705
6706 return I40E_SUCCESS;
6707}
6708
6709
6710
6711
6712
6713void i40e_down(struct i40e_vsi *vsi)
6714{
6715 int i;
6716
6717
6718
6719
6720 if (vsi->netdev) {
6721 netif_carrier_off(vsi->netdev);
6722 netif_tx_disable(vsi->netdev);
6723 }
6724 i40e_vsi_disable_irq(vsi);
6725 i40e_vsi_stop_rings(vsi);
6726 if (vsi->type == I40E_VSI_MAIN &&
6727 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6728 i40e_force_link_state(vsi->back, false);
6729 i40e_napi_disable_all(vsi);
6730
6731 for (i = 0; i < vsi->num_queue_pairs; i++) {
6732 i40e_clean_tx_ring(vsi->tx_rings[i]);
6733 if (i40e_enabled_xdp_vsi(vsi))
6734 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6735 i40e_clean_rx_ring(vsi->rx_rings[i]);
6736 }
6737
6738}
6739
6740
6741
6742
6743
6744
6745static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6746 struct tc_mqprio_qopt_offload *mqprio_qopt)
6747{
6748 u64 sum_max_rate = 0;
6749 u64 max_rate = 0;
6750 int i;
6751
6752 if (mqprio_qopt->qopt.offset[0] != 0 ||
6753 mqprio_qopt->qopt.num_tc < 1 ||
6754 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6755 return -EINVAL;
6756 for (i = 0; ; i++) {
6757 if (!mqprio_qopt->qopt.count[i])
6758 return -EINVAL;
6759 if (mqprio_qopt->min_rate[i]) {
6760 dev_err(&vsi->back->pdev->dev,
6761 "Invalid min tx rate (greater than 0) specified\n");
6762 return -EINVAL;
6763 }
6764 max_rate = mqprio_qopt->max_rate[i];
6765 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6766 sum_max_rate += max_rate;
6767
6768 if (i >= mqprio_qopt->qopt.num_tc - 1)
6769 break;
6770 if (mqprio_qopt->qopt.offset[i + 1] !=
6771 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6772 return -EINVAL;
6773 }
6774 if (vsi->num_queue_pairs <
6775 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6776 return -EINVAL;
6777 }
6778 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6779 dev_err(&vsi->back->pdev->dev,
6780 "Invalid max tx rate specified\n");
6781 return -EINVAL;
6782 }
6783 return 0;
6784}
6785
6786
6787
6788
6789
6790static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6791{
6792 u16 qcount;
6793 int i;
6794
6795
6796 vsi->tc_config.numtc = 1;
6797 vsi->tc_config.enabled_tc = 1;
6798 qcount = min_t(int, vsi->alloc_queue_pairs,
6799 i40e_pf_get_max_q_per_tc(vsi->back));
6800 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6801
6802
6803
6804 vsi->tc_config.tc_info[i].qoffset = 0;
6805 if (i == 0)
6806 vsi->tc_config.tc_info[i].qcount = qcount;
6807 else
6808 vsi->tc_config.tc_info[i].qcount = 1;
6809 vsi->tc_config.tc_info[i].netdev_tc = 0;
6810 }
6811}
6812
6813
6814
6815
6816
6817
6818static int i40e_setup_tc(struct net_device *netdev, void *type_data)
6819{
6820 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
6821 struct i40e_netdev_priv *np = netdev_priv(netdev);
6822 struct i40e_vsi *vsi = np->vsi;
6823 struct i40e_pf *pf = vsi->back;
6824 u8 enabled_tc = 0, num_tc, hw;
6825 bool need_reset = false;
6826 int ret = -EINVAL;
6827 u16 mode;
6828 int i;
6829
6830 num_tc = mqprio_qopt->qopt.num_tc;
6831 hw = mqprio_qopt->qopt.hw;
6832 mode = mqprio_qopt->mode;
6833 if (!hw) {
6834 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6835 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6836 goto config_tc;
6837 }
6838
6839
6840 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6841 netdev_info(netdev,
6842 "Configuring TC not supported in MFP mode\n");
6843 return ret;
6844 }
6845 switch (mode) {
6846 case TC_MQPRIO_MODE_DCB:
6847 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6848
6849
6850 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6851 netdev_info(netdev,
6852 "DCB is not enabled for adapter\n");
6853 return ret;
6854 }
6855
6856
6857 if (num_tc > i40e_pf_get_num_tc(pf)) {
6858 netdev_info(netdev,
6859 "TC count greater than enabled on link for adapter\n");
6860 return ret;
6861 }
6862 break;
6863 case TC_MQPRIO_MODE_CHANNEL:
6864 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6865 netdev_info(netdev,
6866 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6867 return ret;
6868 }
6869 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6870 return ret;
6871 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6872 if (ret)
6873 return ret;
6874 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6875 sizeof(*mqprio_qopt));
6876 pf->flags |= I40E_FLAG_TC_MQPRIO;
6877 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6878 break;
6879 default:
6880 return -EINVAL;
6881 }
6882
6883config_tc:
6884
6885 for (i = 0; i < num_tc; i++)
6886 enabled_tc |= BIT(i);
6887
6888
6889 if (enabled_tc == vsi->tc_config.enabled_tc &&
6890 mode != TC_MQPRIO_MODE_CHANNEL)
6891 return 0;
6892
6893
6894 i40e_quiesce_vsi(vsi);
6895
6896 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6897 i40e_remove_queue_channels(vsi);
6898
6899
6900 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6901 if (ret) {
6902 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6903 vsi->seid);
6904 need_reset = true;
6905 goto exit;
6906 }
6907
6908 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
6909 if (vsi->mqprio_qopt.max_rate[0]) {
6910 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
6911
6912 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
6913 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6914 if (!ret) {
6915 u64 credits = max_tx_rate;
6916
6917 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6918 dev_dbg(&vsi->back->pdev->dev,
6919 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6920 max_tx_rate,
6921 credits,
6922 vsi->seid);
6923 } else {
6924 need_reset = true;
6925 goto exit;
6926 }
6927 }
6928 ret = i40e_configure_queue_channels(vsi);
6929 if (ret) {
6930 netdev_info(netdev,
6931 "Failed configuring queue channels\n");
6932 need_reset = true;
6933 goto exit;
6934 }
6935 }
6936
6937exit:
6938
6939 if (need_reset) {
6940 i40e_vsi_set_default_tc_config(vsi);
6941 need_reset = false;
6942 }
6943
6944
6945 i40e_unquiesce_vsi(vsi);
6946 return ret;
6947}
6948
6949
6950
6951
6952
6953
6954
6955
6956static inline void
6957i40e_set_cld_element(struct i40e_cloud_filter *filter,
6958 struct i40e_aqc_cloud_filters_element_data *cld)
6959{
6960 int i, j;
6961 u32 ipa;
6962
6963 memset(cld, 0, sizeof(*cld));
6964 ether_addr_copy(cld->outer_mac, filter->dst_mac);
6965 ether_addr_copy(cld->inner_mac, filter->src_mac);
6966
6967 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
6968 return;
6969
6970 if (filter->n_proto == ETH_P_IPV6) {
6971#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6972 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
6973 i++, j += 2) {
6974 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
6975 ipa = cpu_to_le32(ipa);
6976 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
6977 }
6978 } else {
6979 ipa = be32_to_cpu(filter->dst_ipv4);
6980 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
6981 }
6982
6983 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
6984
6985
6986
6987
6988 if (filter->tenant_id)
6989 return;
6990}
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000
7001int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7002 struct i40e_cloud_filter *filter, bool add)
7003{
7004 struct i40e_aqc_cloud_filters_element_data cld_filter;
7005 struct i40e_pf *pf = vsi->back;
7006 int ret;
7007 static const u16 flag_table[128] = {
7008 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7009 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7010 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7011 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7012 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7013 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7014 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7015 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7016 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7017 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7018 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7019 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7020 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7021 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7022 };
7023
7024 if (filter->flags >= ARRAY_SIZE(flag_table))
7025 return I40E_ERR_CONFIG;
7026
7027
7028 i40e_set_cld_element(filter, &cld_filter);
7029
7030 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7031 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7032 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7033
7034 if (filter->n_proto == ETH_P_IPV6)
7035 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7036 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7037 else
7038 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7039 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7040
7041 if (add)
7042 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7043 &cld_filter, 1);
7044 else
7045 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7046 &cld_filter, 1);
7047 if (ret)
7048 dev_dbg(&pf->pdev->dev,
7049 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7050 add ? "add" : "delete", filter->dst_port, ret,
7051 pf->hw.aq.asq_last_status);
7052 else
7053 dev_info(&pf->pdev->dev,
7054 "%s cloud filter for VSI: %d\n",
7055 add ? "Added" : "Deleted", filter->seid);
7056 return ret;
7057}
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7069 struct i40e_cloud_filter *filter,
7070 bool add)
7071{
7072 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7073 struct i40e_pf *pf = vsi->back;
7074 int ret;
7075
7076
7077 if ((is_valid_ether_addr(filter->dst_mac) &&
7078 is_valid_ether_addr(filter->src_mac)) ||
7079 (is_multicast_ether_addr(filter->dst_mac) &&
7080 is_multicast_ether_addr(filter->src_mac)))
7081 return -EOPNOTSUPP;
7082
7083
7084
7085
7086 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7087 return -EOPNOTSUPP;
7088
7089
7090 if (filter->src_port || filter->src_ipv4 ||
7091 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7092 return -EOPNOTSUPP;
7093
7094
7095 i40e_set_cld_element(filter, &cld_filter.element);
7096
7097 if (is_valid_ether_addr(filter->dst_mac) ||
7098 is_valid_ether_addr(filter->src_mac) ||
7099 is_multicast_ether_addr(filter->dst_mac) ||
7100 is_multicast_ether_addr(filter->src_mac)) {
7101
7102 if (filter->dst_ipv4)
7103 return -EOPNOTSUPP;
7104
7105
7106
7107
7108
7109 cld_filter.element.flags =
7110 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7111
7112 if (filter->vlan_id) {
7113 cld_filter.element.flags =
7114 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7115 }
7116
7117 } else if (filter->dst_ipv4 ||
7118 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7119 cld_filter.element.flags =
7120 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7121 if (filter->n_proto == ETH_P_IPV6)
7122 cld_filter.element.flags |=
7123 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7124 else
7125 cld_filter.element.flags |=
7126 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7127 } else {
7128 dev_err(&pf->pdev->dev,
7129 "either mac or ip has to be valid for cloud filter\n");
7130 return -EINVAL;
7131 }
7132
7133
7134 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7135 be16_to_cpu(filter->dst_port);
7136
7137 if (add) {
7138
7139 ret = i40e_validate_and_set_switch_mode(vsi);
7140 if (ret) {
7141 dev_err(&pf->pdev->dev,
7142 "failed to set switch mode, ret %d\n",
7143 ret);
7144 return ret;
7145 }
7146
7147 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7148 &cld_filter, 1);
7149 } else {
7150 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7151 &cld_filter, 1);
7152 }
7153
7154 if (ret)
7155 dev_dbg(&pf->pdev->dev,
7156 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7157 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7158 else
7159 dev_info(&pf->pdev->dev,
7160 "%s cloud filter for VSI: %d, L4 port: %d\n",
7161 add ? "add" : "delete", filter->seid,
7162 ntohs(filter->dst_port));
7163 return ret;
7164}
7165
7166
7167
7168
7169
7170
7171
7172
7173static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7174 struct tc_cls_flower_offload *f,
7175 struct i40e_cloud_filter *filter)
7176{
7177 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7178 struct i40e_pf *pf = vsi->back;
7179 u8 field_flags = 0;
7180
7181 if (f->dissector->used_keys &
7182 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7183 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7184 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7185 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7186 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7187 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7188 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7189 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7190 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7191 f->dissector->used_keys);
7192 return -EOPNOTSUPP;
7193 }
7194
7195 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7196 struct flow_dissector_key_keyid *key =
7197 skb_flow_dissector_target(f->dissector,
7198 FLOW_DISSECTOR_KEY_ENC_KEYID,
7199 f->key);
7200
7201 struct flow_dissector_key_keyid *mask =
7202 skb_flow_dissector_target(f->dissector,
7203 FLOW_DISSECTOR_KEY_ENC_KEYID,
7204 f->mask);
7205
7206 if (mask->keyid != 0)
7207 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7208
7209 filter->tenant_id = be32_to_cpu(key->keyid);
7210 }
7211
7212 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
7213 struct flow_dissector_key_basic *key =
7214 skb_flow_dissector_target(f->dissector,
7215 FLOW_DISSECTOR_KEY_BASIC,
7216 f->key);
7217
7218 struct flow_dissector_key_basic *mask =
7219 skb_flow_dissector_target(f->dissector,
7220 FLOW_DISSECTOR_KEY_BASIC,
7221 f->mask);
7222
7223 n_proto_key = ntohs(key->n_proto);
7224 n_proto_mask = ntohs(mask->n_proto);
7225
7226 if (n_proto_key == ETH_P_ALL) {
7227 n_proto_key = 0;
7228 n_proto_mask = 0;
7229 }
7230 filter->n_proto = n_proto_key & n_proto_mask;
7231 filter->ip_proto = key->ip_proto;
7232 }
7233
7234 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7235 struct flow_dissector_key_eth_addrs *key =
7236 skb_flow_dissector_target(f->dissector,
7237 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7238 f->key);
7239
7240 struct flow_dissector_key_eth_addrs *mask =
7241 skb_flow_dissector_target(f->dissector,
7242 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7243 f->mask);
7244
7245
7246 if (!is_zero_ether_addr(mask->dst)) {
7247 if (is_broadcast_ether_addr(mask->dst)) {
7248 field_flags |= I40E_CLOUD_FIELD_OMAC;
7249 } else {
7250 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7251 mask->dst);
7252 return I40E_ERR_CONFIG;
7253 }
7254 }
7255
7256 if (!is_zero_ether_addr(mask->src)) {
7257 if (is_broadcast_ether_addr(mask->src)) {
7258 field_flags |= I40E_CLOUD_FIELD_IMAC;
7259 } else {
7260 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7261 mask->src);
7262 return I40E_ERR_CONFIG;
7263 }
7264 }
7265 ether_addr_copy(filter->dst_mac, key->dst);
7266 ether_addr_copy(filter->src_mac, key->src);
7267 }
7268
7269 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
7270 struct flow_dissector_key_vlan *key =
7271 skb_flow_dissector_target(f->dissector,
7272 FLOW_DISSECTOR_KEY_VLAN,
7273 f->key);
7274 struct flow_dissector_key_vlan *mask =
7275 skb_flow_dissector_target(f->dissector,
7276 FLOW_DISSECTOR_KEY_VLAN,
7277 f->mask);
7278
7279 if (mask->vlan_id) {
7280 if (mask->vlan_id == VLAN_VID_MASK) {
7281 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7282
7283 } else {
7284 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7285 mask->vlan_id);
7286 return I40E_ERR_CONFIG;
7287 }
7288 }
7289
7290 filter->vlan_id = cpu_to_be16(key->vlan_id);
7291 }
7292
7293 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
7294 struct flow_dissector_key_control *key =
7295 skb_flow_dissector_target(f->dissector,
7296 FLOW_DISSECTOR_KEY_CONTROL,
7297 f->key);
7298
7299 addr_type = key->addr_type;
7300 }
7301
7302 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7303 struct flow_dissector_key_ipv4_addrs *key =
7304 skb_flow_dissector_target(f->dissector,
7305 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7306 f->key);
7307 struct flow_dissector_key_ipv4_addrs *mask =
7308 skb_flow_dissector_target(f->dissector,
7309 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7310 f->mask);
7311
7312 if (mask->dst) {
7313 if (mask->dst == cpu_to_be32(0xffffffff)) {
7314 field_flags |= I40E_CLOUD_FIELD_IIP;
7315 } else {
7316 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7317 &mask->dst);
7318 return I40E_ERR_CONFIG;
7319 }
7320 }
7321
7322 if (mask->src) {
7323 if (mask->src == cpu_to_be32(0xffffffff)) {
7324 field_flags |= I40E_CLOUD_FIELD_IIP;
7325 } else {
7326 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7327 &mask->src);
7328 return I40E_ERR_CONFIG;
7329 }
7330 }
7331
7332 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7333 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7334 return I40E_ERR_CONFIG;
7335 }
7336 filter->dst_ipv4 = key->dst;
7337 filter->src_ipv4 = key->src;
7338 }
7339
7340 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7341 struct flow_dissector_key_ipv6_addrs *key =
7342 skb_flow_dissector_target(f->dissector,
7343 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7344 f->key);
7345 struct flow_dissector_key_ipv6_addrs *mask =
7346 skb_flow_dissector_target(f->dissector,
7347 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7348 f->mask);
7349
7350
7351
7352
7353 if (ipv6_addr_loopback(&key->dst) ||
7354 ipv6_addr_loopback(&key->src)) {
7355 dev_err(&pf->pdev->dev,
7356 "Bad ipv6, addr is LOOPBACK\n");
7357 return I40E_ERR_CONFIG;
7358 }
7359 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
7360 field_flags |= I40E_CLOUD_FIELD_IIP;
7361
7362 memcpy(&filter->src_ipv6, &key->src.s6_addr32,
7363 sizeof(filter->src_ipv6));
7364 memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
7365 sizeof(filter->dst_ipv6));
7366 }
7367
7368 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
7369 struct flow_dissector_key_ports *key =
7370 skb_flow_dissector_target(f->dissector,
7371 FLOW_DISSECTOR_KEY_PORTS,
7372 f->key);
7373 struct flow_dissector_key_ports *mask =
7374 skb_flow_dissector_target(f->dissector,
7375 FLOW_DISSECTOR_KEY_PORTS,
7376 f->mask);
7377
7378 if (mask->src) {
7379 if (mask->src == cpu_to_be16(0xffff)) {
7380 field_flags |= I40E_CLOUD_FIELD_IIP;
7381 } else {
7382 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7383 be16_to_cpu(mask->src));
7384 return I40E_ERR_CONFIG;
7385 }
7386 }
7387
7388 if (mask->dst) {
7389 if (mask->dst == cpu_to_be16(0xffff)) {
7390 field_flags |= I40E_CLOUD_FIELD_IIP;
7391 } else {
7392 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7393 be16_to_cpu(mask->dst));
7394 return I40E_ERR_CONFIG;
7395 }
7396 }
7397
7398 filter->dst_port = key->dst;
7399 filter->src_port = key->src;
7400
7401 switch (filter->ip_proto) {
7402 case IPPROTO_TCP:
7403 case IPPROTO_UDP:
7404 break;
7405 default:
7406 dev_err(&pf->pdev->dev,
7407 "Only UDP and TCP transport are supported\n");
7408 return -EINVAL;
7409 }
7410 }
7411 filter->flags = field_flags;
7412 return 0;
7413}
7414
7415
7416
7417
7418
7419
7420
7421
7422static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7423 struct i40e_cloud_filter *filter)
7424{
7425 struct i40e_channel *ch, *ch_tmp;
7426
7427
7428 if (tc == 0) {
7429 filter->seid = vsi->seid;
7430 return 0;
7431 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7432 if (!filter->dst_port) {
7433 dev_err(&vsi->back->pdev->dev,
7434 "Specify destination port to direct to traffic class that is not default\n");
7435 return -EINVAL;
7436 }
7437 if (list_empty(&vsi->ch_list))
7438 return -EINVAL;
7439 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7440 list) {
7441 if (ch->seid == vsi->tc_seid_map[tc])
7442 filter->seid = ch->seid;
7443 }
7444 return 0;
7445 }
7446 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7447 return -EINVAL;
7448}
7449
7450
7451
7452
7453
7454
7455
7456static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7457 struct tc_cls_flower_offload *cls_flower)
7458{
7459 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7460 struct i40e_cloud_filter *filter = NULL;
7461 struct i40e_pf *pf = vsi->back;
7462 int err = 0;
7463
7464 if (tc < 0) {
7465 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7466 return -EOPNOTSUPP;
7467 }
7468
7469 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7470 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7471 return -EBUSY;
7472
7473 if (pf->fdir_pf_active_filters ||
7474 (!hlist_empty(&pf->fdir_filter_list))) {
7475 dev_err(&vsi->back->pdev->dev,
7476 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7477 return -EINVAL;
7478 }
7479
7480 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7481 dev_err(&vsi->back->pdev->dev,
7482 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7483 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7484 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7485 }
7486
7487 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7488 if (!filter)
7489 return -ENOMEM;
7490
7491 filter->cookie = cls_flower->cookie;
7492
7493 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7494 if (err < 0)
7495 goto err;
7496
7497 err = i40e_handle_tclass(vsi, tc, filter);
7498 if (err < 0)
7499 goto err;
7500
7501
7502 if (filter->dst_port)
7503 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7504 else
7505 err = i40e_add_del_cloud_filter(vsi, filter, true);
7506
7507 if (err) {
7508 dev_err(&pf->pdev->dev,
7509 "Failed to add cloud filter, err %s\n",
7510 i40e_stat_str(&pf->hw, err));
7511 goto err;
7512 }
7513
7514
7515 INIT_HLIST_NODE(&filter->cloud_node);
7516
7517 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7518
7519 pf->num_cloud_filters++;
7520
7521 return err;
7522err:
7523 kfree(filter);
7524 return err;
7525}
7526
7527
7528
7529
7530
7531
7532
7533static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7534 unsigned long *cookie)
7535{
7536 struct i40e_cloud_filter *filter = NULL;
7537 struct hlist_node *node2;
7538
7539 hlist_for_each_entry_safe(filter, node2,
7540 &vsi->back->cloud_filter_list, cloud_node)
7541 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7542 return filter;
7543 return NULL;
7544}
7545
7546
7547
7548
7549
7550
7551
7552static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7553 struct tc_cls_flower_offload *cls_flower)
7554{
7555 struct i40e_cloud_filter *filter = NULL;
7556 struct i40e_pf *pf = vsi->back;
7557 int err = 0;
7558
7559 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
7560
7561 if (!filter)
7562 return -EINVAL;
7563
7564 hash_del(&filter->cloud_node);
7565
7566 if (filter->dst_port)
7567 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7568 else
7569 err = i40e_add_del_cloud_filter(vsi, filter, false);
7570
7571 kfree(filter);
7572 if (err) {
7573 dev_err(&pf->pdev->dev,
7574 "Failed to delete cloud filter, err %s\n",
7575 i40e_stat_str(&pf->hw, err));
7576 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7577 }
7578
7579 pf->num_cloud_filters--;
7580 if (!pf->num_cloud_filters)
7581 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7582 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7583 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7584 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7585 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7586 }
7587 return 0;
7588}
7589
7590
7591
7592
7593
7594
7595static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7596 struct tc_cls_flower_offload *cls_flower)
7597{
7598 struct i40e_vsi *vsi = np->vsi;
7599
7600 switch (cls_flower->command) {
7601 case TC_CLSFLOWER_REPLACE:
7602 return i40e_configure_clsflower(vsi, cls_flower);
7603 case TC_CLSFLOWER_DESTROY:
7604 return i40e_delete_clsflower(vsi, cls_flower);
7605 case TC_CLSFLOWER_STATS:
7606 return -EOPNOTSUPP;
7607 default:
7608 return -EOPNOTSUPP;
7609 }
7610}
7611
7612static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7613 void *cb_priv)
7614{
7615 struct i40e_netdev_priv *np = cb_priv;
7616
7617 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
7618 return -EOPNOTSUPP;
7619
7620 switch (type) {
7621 case TC_SETUP_CLSFLOWER:
7622 return i40e_setup_tc_cls_flower(np, type_data);
7623
7624 default:
7625 return -EOPNOTSUPP;
7626 }
7627}
7628
7629static int i40e_setup_tc_block(struct net_device *dev,
7630 struct tc_block_offload *f)
7631{
7632 struct i40e_netdev_priv *np = netdev_priv(dev);
7633
7634 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7635 return -EOPNOTSUPP;
7636
7637 switch (f->command) {
7638 case TC_BLOCK_BIND:
7639 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7640 np, np);
7641 case TC_BLOCK_UNBIND:
7642 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7643 return 0;
7644 default:
7645 return -EOPNOTSUPP;
7646 }
7647}
7648
7649static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7650 void *type_data)
7651{
7652 switch (type) {
7653 case TC_SETUP_QDISC_MQPRIO:
7654 return i40e_setup_tc(netdev, type_data);
7655 case TC_SETUP_BLOCK:
7656 return i40e_setup_tc_block(netdev, type_data);
7657 default:
7658 return -EOPNOTSUPP;
7659 }
7660}
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674int i40e_open(struct net_device *netdev)
7675{
7676 struct i40e_netdev_priv *np = netdev_priv(netdev);
7677 struct i40e_vsi *vsi = np->vsi;
7678 struct i40e_pf *pf = vsi->back;
7679 int err;
7680
7681
7682 if (test_bit(__I40E_TESTING, pf->state) ||
7683 test_bit(__I40E_BAD_EEPROM, pf->state))
7684 return -EBUSY;
7685
7686 netif_carrier_off(netdev);
7687
7688 if (i40e_force_link_state(pf, true))
7689 return -EAGAIN;
7690
7691 err = i40e_vsi_open(vsi);
7692 if (err)
7693 return err;
7694
7695
7696 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7697 TCP_FLAG_FIN) >> 16);
7698 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7699 TCP_FLAG_FIN |
7700 TCP_FLAG_CWR) >> 16);
7701 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7702
7703 udp_tunnel_get_rx_info(netdev);
7704
7705 return 0;
7706}
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718int i40e_vsi_open(struct i40e_vsi *vsi)
7719{
7720 struct i40e_pf *pf = vsi->back;
7721 char int_name[I40E_INT_NAME_STR_LEN];
7722 int err;
7723
7724
7725 err = i40e_vsi_setup_tx_resources(vsi);
7726 if (err)
7727 goto err_setup_tx;
7728 err = i40e_vsi_setup_rx_resources(vsi);
7729 if (err)
7730 goto err_setup_rx;
7731
7732 err = i40e_vsi_configure(vsi);
7733 if (err)
7734 goto err_setup_rx;
7735
7736 if (vsi->netdev) {
7737 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7738 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7739 err = i40e_vsi_request_irq(vsi, int_name);
7740 if (err)
7741 goto err_setup_rx;
7742
7743
7744 err = netif_set_real_num_tx_queues(vsi->netdev,
7745 vsi->num_queue_pairs);
7746 if (err)
7747 goto err_set_queues;
7748
7749 err = netif_set_real_num_rx_queues(vsi->netdev,
7750 vsi->num_queue_pairs);
7751 if (err)
7752 goto err_set_queues;
7753
7754 } else if (vsi->type == I40E_VSI_FDIR) {
7755 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
7756 dev_driver_string(&pf->pdev->dev),
7757 dev_name(&pf->pdev->dev));
7758 err = i40e_vsi_request_irq(vsi, int_name);
7759
7760 } else {
7761 err = -EINVAL;
7762 goto err_setup_rx;
7763 }
7764
7765 err = i40e_up_complete(vsi);
7766 if (err)
7767 goto err_up_complete;
7768
7769 return 0;
7770
7771err_up_complete:
7772 i40e_down(vsi);
7773err_set_queues:
7774 i40e_vsi_free_irq(vsi);
7775err_setup_rx:
7776 i40e_vsi_free_rx_resources(vsi);
7777err_setup_tx:
7778 i40e_vsi_free_tx_resources(vsi);
7779 if (vsi == pf->vsi[pf->lan_vsi])
7780 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7781
7782 return err;
7783}
7784
7785
7786
7787
7788
7789
7790
7791
7792static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7793{
7794 struct i40e_fdir_filter *filter;
7795 struct i40e_flex_pit *pit_entry, *tmp;
7796 struct hlist_node *node2;
7797
7798 hlist_for_each_entry_safe(filter, node2,
7799 &pf->fdir_filter_list, fdir_node) {
7800 hlist_del(&filter->fdir_node);
7801 kfree(filter);
7802 }
7803
7804 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7805 list_del(&pit_entry->list);
7806 kfree(pit_entry);
7807 }
7808 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7809
7810 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7811 list_del(&pit_entry->list);
7812 kfree(pit_entry);
7813 }
7814 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7815
7816 pf->fdir_pf_active_filters = 0;
7817 pf->fd_tcp4_filter_cnt = 0;
7818 pf->fd_udp4_filter_cnt = 0;
7819 pf->fd_sctp4_filter_cnt = 0;
7820 pf->fd_ip4_filter_cnt = 0;
7821
7822
7823 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7824 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7825 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7826
7827
7828 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7829 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7830 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7831
7832
7833 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7834 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7835 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7836
7837
7838 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7839 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7840
7841 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
7842 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7843}
7844
7845
7846
7847
7848
7849
7850
7851
7852static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7853{
7854 struct i40e_cloud_filter *cfilter;
7855 struct hlist_node *node;
7856
7857 hlist_for_each_entry_safe(cfilter, node,
7858 &pf->cloud_filter_list, cloud_node) {
7859 hlist_del(&cfilter->cloud_node);
7860 kfree(cfilter);
7861 }
7862 pf->num_cloud_filters = 0;
7863
7864 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7865 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7866 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7867 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7868 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7869 }
7870}
7871
7872
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882int i40e_close(struct net_device *netdev)
7883{
7884 struct i40e_netdev_priv *np = netdev_priv(netdev);
7885 struct i40e_vsi *vsi = np->vsi;
7886
7887 i40e_vsi_close(vsi);
7888
7889 return 0;
7890}
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
7904{
7905 u32 val;
7906
7907 WARN_ON(in_interrupt());
7908
7909
7910
7911 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
7912
7913
7914
7915
7916
7917
7918
7919
7920
7921 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
7922 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7923 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
7924 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7925
7926 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
7927
7928
7929
7930
7931
7932 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
7933 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7934 val |= I40E_GLGEN_RTRIG_CORER_MASK;
7935 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7936 i40e_flush(&pf->hw);
7937
7938 } else if (reset_flags & I40E_PF_RESET_FLAG) {
7939
7940
7941
7942
7943
7944
7945
7946
7947
7948 dev_dbg(&pf->pdev->dev, "PFR requested\n");
7949 i40e_handle_reset_warning(pf, lock_acquired);
7950
7951 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
7952 int v;
7953
7954
7955 dev_info(&pf->pdev->dev,
7956 "VSI reinit requested\n");
7957 for (v = 0; v < pf->num_alloc_vsi; v++) {
7958 struct i40e_vsi *vsi = pf->vsi[v];
7959
7960 if (vsi != NULL &&
7961 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
7962 vsi->state))
7963 i40e_vsi_reinit_locked(pf->vsi[v]);
7964 }
7965 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
7966 int v;
7967
7968
7969 dev_info(&pf->pdev->dev, "VSI down requested\n");
7970 for (v = 0; v < pf->num_alloc_vsi; v++) {
7971 struct i40e_vsi *vsi = pf->vsi[v];
7972
7973 if (vsi != NULL &&
7974 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
7975 vsi->state)) {
7976 set_bit(__I40E_VSI_DOWN, vsi->state);
7977 i40e_down(vsi);
7978 }
7979 }
7980 } else {
7981 dev_info(&pf->pdev->dev,
7982 "bad reset request 0x%08x\n", reset_flags);
7983 }
7984}
7985
7986#ifdef CONFIG_I40E_DCB
7987
7988
7989
7990
7991
7992
7993bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
7994 struct i40e_dcbx_config *old_cfg,
7995 struct i40e_dcbx_config *new_cfg)
7996{
7997 bool need_reconfig = false;
7998
7999
8000 if (memcmp(&new_cfg->etscfg,
8001 &old_cfg->etscfg,
8002 sizeof(new_cfg->etscfg))) {
8003
8004 if (memcmp(&new_cfg->etscfg.prioritytable,
8005 &old_cfg->etscfg.prioritytable,
8006 sizeof(new_cfg->etscfg.prioritytable))) {
8007 need_reconfig = true;
8008 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8009 }
8010
8011 if (memcmp(&new_cfg->etscfg.tcbwtable,
8012 &old_cfg->etscfg.tcbwtable,
8013 sizeof(new_cfg->etscfg.tcbwtable)))
8014 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8015
8016 if (memcmp(&new_cfg->etscfg.tsatable,
8017 &old_cfg->etscfg.tsatable,
8018 sizeof(new_cfg->etscfg.tsatable)))
8019 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8020 }
8021
8022
8023 if (memcmp(&new_cfg->pfc,
8024 &old_cfg->pfc,
8025 sizeof(new_cfg->pfc))) {
8026 need_reconfig = true;
8027 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8028 }
8029
8030
8031 if (memcmp(&new_cfg->app,
8032 &old_cfg->app,
8033 sizeof(new_cfg->app))) {
8034 need_reconfig = true;
8035 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8036 }
8037
8038 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8039 return need_reconfig;
8040}
8041
8042
8043
8044
8045
8046
8047static int i40e_handle_lldp_event(struct i40e_pf *pf,
8048 struct i40e_arq_event_info *e)
8049{
8050 struct i40e_aqc_lldp_get_mib *mib =
8051 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8052 struct i40e_hw *hw = &pf->hw;
8053 struct i40e_dcbx_config tmp_dcbx_cfg;
8054 bool need_reconfig = false;
8055 int ret = 0;
8056 u8 type;
8057
8058
8059 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8060 return ret;
8061
8062
8063 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8064 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8065 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8066 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8067 return ret;
8068
8069
8070 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8071 dev_dbg(&pf->pdev->dev,
8072 "LLDP event mib type %s\n", type ? "remote" : "local");
8073 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8074
8075 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8076 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8077 &hw->remote_dcbx_config);
8078 goto exit;
8079 }
8080
8081
8082 tmp_dcbx_cfg = hw->local_dcbx_config;
8083
8084
8085 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8086
8087 ret = i40e_get_dcb_config(&pf->hw);
8088 if (ret) {
8089 dev_info(&pf->pdev->dev,
8090 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8091 i40e_stat_str(&pf->hw, ret),
8092 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8093 goto exit;
8094 }
8095
8096
8097 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8098 sizeof(tmp_dcbx_cfg))) {
8099 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8100 goto exit;
8101 }
8102
8103 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8104 &hw->local_dcbx_config);
8105
8106 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8107
8108 if (!need_reconfig)
8109 goto exit;
8110
8111
8112 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8113 pf->flags |= I40E_FLAG_DCB_ENABLED;
8114 else
8115 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8116
8117 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8118
8119 i40e_pf_quiesce_all_vsi(pf);
8120
8121
8122 i40e_dcb_reconfigure(pf);
8123
8124 ret = i40e_resume_port_tx(pf);
8125
8126 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8127
8128 if (ret)
8129 goto exit;
8130
8131
8132 ret = i40e_pf_wait_queues_disabled(pf);
8133 if (ret) {
8134
8135 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8136 i40e_service_event_schedule(pf);
8137 } else {
8138 i40e_pf_unquiesce_all_vsi(pf);
8139 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8140 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8141 }
8142
8143exit:
8144 return ret;
8145}
8146#endif
8147
8148
8149
8150
8151
8152
8153
8154void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8155{
8156 rtnl_lock();
8157 i40e_do_reset(pf, reset_flags, true);
8158 rtnl_unlock();
8159}
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8170 struct i40e_arq_event_info *e)
8171{
8172 struct i40e_aqc_lan_overflow *data =
8173 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8174 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8175 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8176 struct i40e_hw *hw = &pf->hw;
8177 struct i40e_vf *vf;
8178 u16 vf_id;
8179
8180 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8181 queue, qtx_ctl);
8182
8183
8184 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8185 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8186 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8187 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8188 vf_id -= hw->func_caps.vf_base_id;
8189 vf = &pf->vf[vf_id];
8190 i40e_vc_notify_vf_reset(vf);
8191
8192 msleep(20);
8193 i40e_reset_vf(vf, false);
8194 }
8195}
8196
8197
8198
8199
8200
8201u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8202{
8203 u32 val, fcnt_prog;
8204
8205 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8206 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8207 return fcnt_prog;
8208}
8209
8210
8211
8212
8213
8214u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8215{
8216 u32 val, fcnt_prog;
8217
8218 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8219 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8220 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8221 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8222 return fcnt_prog;
8223}
8224
8225
8226
8227
8228
8229u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8230{
8231 u32 val, fcnt_prog;
8232
8233 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8234 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8235 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8236 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8237 return fcnt_prog;
8238}
8239
8240
8241
8242
8243
8244static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8245{
8246 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8247 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8248 (I40E_DEBUG_FD & pf->hw.debug_mask))
8249 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8250}
8251
8252
8253
8254
8255
8256static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8257{
8258 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8259
8260
8261
8262
8263
8264 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8265 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8266 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8267
8268 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8269 (I40E_DEBUG_FD & pf->hw.debug_mask))
8270 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8271 }
8272}
8273
8274
8275
8276
8277
8278
8279static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8280 struct i40e_fdir_filter *filter)
8281{
8282
8283 pf->fdir_pf_active_filters--;
8284 pf->fd_inv = 0;
8285
8286 switch (filter->flow_type) {
8287 case TCP_V4_FLOW:
8288 pf->fd_tcp4_filter_cnt--;
8289 break;
8290 case UDP_V4_FLOW:
8291 pf->fd_udp4_filter_cnt--;
8292 break;
8293 case SCTP_V4_FLOW:
8294 pf->fd_sctp4_filter_cnt--;
8295 break;
8296 case IP_USER_FLOW:
8297 switch (filter->ip4_proto) {
8298 case IPPROTO_TCP:
8299 pf->fd_tcp4_filter_cnt--;
8300 break;
8301 case IPPROTO_UDP:
8302 pf->fd_udp4_filter_cnt--;
8303 break;
8304 case IPPROTO_SCTP:
8305 pf->fd_sctp4_filter_cnt--;
8306 break;
8307 case IPPROTO_IP:
8308 pf->fd_ip4_filter_cnt--;
8309 break;
8310 }
8311 break;
8312 }
8313
8314
8315 hlist_del(&filter->fdir_node);
8316 kfree(filter);
8317}
8318
8319
8320
8321
8322
8323void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8324{
8325 struct i40e_fdir_filter *filter;
8326 u32 fcnt_prog, fcnt_avail;
8327 struct hlist_node *node;
8328
8329 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8330 return;
8331
8332
8333 fcnt_prog = i40e_get_global_fd_count(pf);
8334 fcnt_avail = pf->fdir_pf_filter_count;
8335 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8336 (pf->fd_add_err == 0) ||
8337 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8338 i40e_reenable_fdir_sb(pf);
8339
8340
8341
8342
8343
8344 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8345 (pf->fd_tcp4_filter_cnt == 0))
8346 i40e_reenable_fdir_atr(pf);
8347
8348
8349 if (pf->fd_inv > 0) {
8350 hlist_for_each_entry_safe(filter, node,
8351 &pf->fdir_filter_list, fdir_node)
8352 if (filter->fd_id == pf->fd_inv)
8353 i40e_delete_invalid_filter(pf, filter);
8354 }
8355}
8356
8357#define I40E_MIN_FD_FLUSH_INTERVAL 10
8358#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8359
8360
8361
8362
8363static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8364{
8365 unsigned long min_flush_time;
8366 int flush_wait_retry = 50;
8367 bool disable_atr = false;
8368 int fd_room;
8369 int reg;
8370
8371 if (!time_after(jiffies, pf->fd_flush_timestamp +
8372 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8373 return;
8374
8375
8376
8377
8378 min_flush_time = pf->fd_flush_timestamp +
8379 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8380 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8381
8382 if (!(time_after(jiffies, min_flush_time)) &&
8383 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8384 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8385 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8386 disable_atr = true;
8387 }
8388
8389 pf->fd_flush_timestamp = jiffies;
8390 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8391
8392 wr32(&pf->hw, I40E_PFQF_CTL_1,
8393 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8394 i40e_flush(&pf->hw);
8395 pf->fd_flush_cnt++;
8396 pf->fd_add_err = 0;
8397 do {
8398
8399 usleep_range(5000, 6000);
8400 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8401 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8402 break;
8403 } while (flush_wait_retry--);
8404 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8405 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8406 } else {
8407
8408 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8409 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8410 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8411 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8412 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8413 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8414 }
8415}
8416
8417
8418
8419
8420
8421u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8422{
8423 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8424}
8425
8426
8427
8428
8429
8430
8431#define I40E_MAX_FD_PROGRAM_ERROR 256
8432
8433
8434
8435
8436
8437static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8438{
8439
8440
8441 if (test_bit(__I40E_DOWN, pf->state))
8442 return;
8443
8444 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8445 i40e_fdir_flush_and_replay(pf);
8446
8447 i40e_fdir_check_and_reenable(pf);
8448
8449}
8450
8451
8452
8453
8454
8455
8456static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8457{
8458 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8459 return;
8460
8461 switch (vsi->type) {
8462 case I40E_VSI_MAIN:
8463 if (!vsi->netdev || !vsi->netdev_registered)
8464 break;
8465
8466 if (link_up) {
8467 netif_carrier_on(vsi->netdev);
8468 netif_tx_wake_all_queues(vsi->netdev);
8469 } else {
8470 netif_carrier_off(vsi->netdev);
8471 netif_tx_stop_all_queues(vsi->netdev);
8472 }
8473 break;
8474
8475 case I40E_VSI_SRIOV:
8476 case I40E_VSI_VMDQ2:
8477 case I40E_VSI_CTRL:
8478 case I40E_VSI_IWARP:
8479 case I40E_VSI_MIRROR:
8480 default:
8481
8482 break;
8483 }
8484}
8485
8486
8487
8488
8489
8490
8491static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8492{
8493 struct i40e_pf *pf;
8494 int i;
8495
8496 if (!veb || !veb->pf)
8497 return;
8498 pf = veb->pf;
8499
8500
8501 for (i = 0; i < I40E_MAX_VEB; i++)
8502 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8503 i40e_veb_link_event(pf->veb[i], link_up);
8504
8505
8506 for (i = 0; i < pf->num_alloc_vsi; i++)
8507 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8508 i40e_vsi_link_event(pf->vsi[i], link_up);
8509}
8510
8511
8512
8513
8514
8515static void i40e_link_event(struct i40e_pf *pf)
8516{
8517 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8518 u8 new_link_speed, old_link_speed;
8519 i40e_status status;
8520 bool new_link, old_link;
8521
8522
8523 pf->hw.phy.get_link_info = true;
8524 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8525 status = i40e_get_link_status(&pf->hw, &new_link);
8526
8527
8528 if (status == I40E_SUCCESS) {
8529 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8530 } else {
8531
8532
8533
8534 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8535 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8536 status);
8537 return;
8538 }
8539
8540 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8541 new_link_speed = pf->hw.phy.link_info.link_speed;
8542
8543 if (new_link == old_link &&
8544 new_link_speed == old_link_speed &&
8545 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
8546 new_link == netif_carrier_ok(vsi->netdev)))
8547 return;
8548
8549 i40e_print_link_message(vsi, new_link);
8550
8551
8552
8553
8554 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8555 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8556 else
8557 i40e_vsi_link_event(vsi, new_link);
8558
8559 if (pf->vf)
8560 i40e_vc_notify_link_state(pf);
8561
8562 if (pf->flags & I40E_FLAG_PTP)
8563 i40e_ptp_set_increment(pf);
8564}
8565
8566
8567
8568
8569
8570static void i40e_watchdog_subtask(struct i40e_pf *pf)
8571{
8572 int i;
8573
8574
8575 if (test_bit(__I40E_DOWN, pf->state) ||
8576 test_bit(__I40E_CONFIG_BUSY, pf->state))
8577 return;
8578
8579
8580 if (time_before(jiffies, (pf->service_timer_previous +
8581 pf->service_timer_period)))
8582 return;
8583 pf->service_timer_previous = jiffies;
8584
8585 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
8586 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
8587 i40e_link_event(pf);
8588
8589
8590
8591
8592 for (i = 0; i < pf->num_alloc_vsi; i++)
8593 if (pf->vsi[i] && pf->vsi[i]->netdev)
8594 i40e_update_stats(pf->vsi[i]);
8595
8596 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8597
8598 for (i = 0; i < I40E_MAX_VEB; i++)
8599 if (pf->veb[i])
8600 i40e_update_veb_stats(pf->veb[i]);
8601 }
8602
8603 i40e_ptp_rx_hang(pf);
8604 i40e_ptp_tx_hang(pf);
8605}
8606
8607
8608
8609
8610
8611static void i40e_reset_subtask(struct i40e_pf *pf)
8612{
8613 u32 reset_flags = 0;
8614
8615 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
8616 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
8617 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
8618 }
8619 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
8620 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
8621 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8622 }
8623 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
8624 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
8625 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
8626 }
8627 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
8628 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
8629 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
8630 }
8631 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8632 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8633 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
8634 }
8635
8636
8637
8638
8639 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
8640 i40e_prep_for_reset(pf, false);
8641 i40e_reset(pf);
8642 i40e_rebuild(pf, false, false);
8643 }
8644
8645
8646 if (reset_flags &&
8647 !test_bit(__I40E_DOWN, pf->state) &&
8648 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
8649 i40e_do_reset(pf, reset_flags, false);
8650 }
8651}
8652
8653
8654
8655
8656
8657
8658static void i40e_handle_link_event(struct i40e_pf *pf,
8659 struct i40e_arq_event_info *e)
8660{
8661 struct i40e_aqc_get_link_status *status =
8662 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
8663
8664
8665
8666
8667
8668
8669
8670 i40e_link_event(pf);
8671
8672
8673 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
8674 dev_err(&pf->pdev->dev,
8675 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8676 dev_err(&pf->pdev->dev,
8677 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8678 } else {
8679
8680
8681
8682 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8683 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8684 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8685 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8686 dev_err(&pf->pdev->dev,
8687 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8688 dev_err(&pf->pdev->dev,
8689 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8690 }
8691 }
8692}
8693
8694
8695
8696
8697
8698static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8699{
8700 struct i40e_arq_event_info event;
8701 struct i40e_hw *hw = &pf->hw;
8702 u16 pending, i = 0;
8703 i40e_status ret;
8704 u16 opcode;
8705 u32 oldval;
8706 u32 val;
8707
8708
8709 if (test_bit(__I40E_RESET_FAILED, pf->state))
8710 return;
8711
8712
8713 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8714 oldval = val;
8715 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
8716 if (hw->debug_mask & I40E_DEBUG_AQ)
8717 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
8718 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8719 }
8720 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
8721 if (hw->debug_mask & I40E_DEBUG_AQ)
8722 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
8723 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
8724 pf->arq_overflows++;
8725 }
8726 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
8727 if (hw->debug_mask & I40E_DEBUG_AQ)
8728 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
8729 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8730 }
8731 if (oldval != val)
8732 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8733
8734 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8735 oldval = val;
8736 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
8737 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8738 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
8739 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8740 }
8741 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
8742 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8743 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
8744 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8745 }
8746 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
8747 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8748 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
8749 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8750 }
8751 if (oldval != val)
8752 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8753
8754 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8755 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
8756 if (!event.msg_buf)
8757 return;
8758
8759 do {
8760 ret = i40e_clean_arq_element(hw, &event, &pending);
8761 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
8762 break;
8763 else if (ret) {
8764 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8765 break;
8766 }
8767
8768 opcode = le16_to_cpu(event.desc.opcode);
8769 switch (opcode) {
8770
8771 case i40e_aqc_opc_get_link_status:
8772 i40e_handle_link_event(pf, &event);
8773 break;
8774 case i40e_aqc_opc_send_msg_to_pf:
8775 ret = i40e_vc_process_vf_msg(pf,
8776 le16_to_cpu(event.desc.retval),
8777 le32_to_cpu(event.desc.cookie_high),
8778 le32_to_cpu(event.desc.cookie_low),
8779 event.msg_buf,
8780 event.msg_len);
8781 break;
8782 case i40e_aqc_opc_lldp_update_mib:
8783 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
8784#ifdef CONFIG_I40E_DCB
8785 rtnl_lock();
8786 ret = i40e_handle_lldp_event(pf, &event);
8787 rtnl_unlock();
8788#endif
8789 break;
8790 case i40e_aqc_opc_event_lan_overflow:
8791 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
8792 i40e_handle_lan_overflow_event(pf, &event);
8793 break;
8794 case i40e_aqc_opc_send_msg_to_peer:
8795 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8796 break;
8797 case i40e_aqc_opc_nvm_erase:
8798 case i40e_aqc_opc_nvm_update:
8799 case i40e_aqc_opc_oem_post_update:
8800 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8801 "ARQ NVM operation 0x%04x completed\n",
8802 opcode);
8803 break;
8804 default:
8805 dev_info(&pf->pdev->dev,
8806 "ARQ: Unknown event 0x%04x ignored\n",
8807 opcode);
8808 break;
8809 }
8810 } while (i++ < pf->adminq_work_limit);
8811
8812 if (i < pf->adminq_work_limit)
8813 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
8814
8815
8816 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8817 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8818 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8819 i40e_flush(hw);
8820
8821 kfree(event.msg_buf);
8822}
8823
8824
8825
8826
8827
8828static void i40e_verify_eeprom(struct i40e_pf *pf)
8829{
8830 int err;
8831
8832 err = i40e_diag_eeprom_test(&pf->hw);
8833 if (err) {
8834
8835 err = i40e_diag_eeprom_test(&pf->hw);
8836 if (err) {
8837 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8838 err);
8839 set_bit(__I40E_BAD_EEPROM, pf->state);
8840 }
8841 }
8842
8843 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
8844 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
8845 clear_bit(__I40E_BAD_EEPROM, pf->state);
8846 }
8847}
8848
8849
8850
8851
8852
8853
8854
8855static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8856{
8857 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8858 struct i40e_vsi_context ctxt;
8859 int ret;
8860
8861 ctxt.seid = pf->main_vsi_seid;
8862 ctxt.pf_num = pf->hw.pf_id;
8863 ctxt.vf_num = 0;
8864 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8865 if (ret) {
8866 dev_info(&pf->pdev->dev,
8867 "couldn't get PF vsi config, err %s aq_err %s\n",
8868 i40e_stat_str(&pf->hw, ret),
8869 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8870 return;
8871 }
8872 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8873 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8874 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8875
8876 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8877 if (ret) {
8878 dev_info(&pf->pdev->dev,
8879 "update vsi switch failed, err %s aq_err %s\n",
8880 i40e_stat_str(&pf->hw, ret),
8881 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8882 }
8883}
8884
8885
8886
8887
8888
8889
8890
8891static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8892{
8893 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8894 struct i40e_vsi_context ctxt;
8895 int ret;
8896
8897 ctxt.seid = pf->main_vsi_seid;
8898 ctxt.pf_num = pf->hw.pf_id;
8899 ctxt.vf_num = 0;
8900 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8901 if (ret) {
8902 dev_info(&pf->pdev->dev,
8903 "couldn't get PF vsi config, err %s aq_err %s\n",
8904 i40e_stat_str(&pf->hw, ret),
8905 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8906 return;
8907 }
8908 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8909 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8910 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8911
8912 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8913 if (ret) {
8914 dev_info(&pf->pdev->dev,
8915 "update vsi switch failed, err %s aq_err %s\n",
8916 i40e_stat_str(&pf->hw, ret),
8917 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8918 }
8919}
8920
8921
8922
8923
8924
8925
8926
8927
8928
8929static void i40e_config_bridge_mode(struct i40e_veb *veb)
8930{
8931 struct i40e_pf *pf = veb->pf;
8932
8933 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
8934 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
8935 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8936 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
8937 i40e_disable_pf_switch_lb(pf);
8938 else
8939 i40e_enable_pf_switch_lb(pf);
8940}
8941
8942
8943
8944
8945
8946
8947
8948
8949
8950
8951static int i40e_reconstitute_veb(struct i40e_veb *veb)
8952{
8953 struct i40e_vsi *ctl_vsi = NULL;
8954 struct i40e_pf *pf = veb->pf;
8955 int v, veb_idx;
8956 int ret;
8957
8958
8959 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
8960 if (pf->vsi[v] &&
8961 pf->vsi[v]->veb_idx == veb->idx &&
8962 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
8963 ctl_vsi = pf->vsi[v];
8964 break;
8965 }
8966 }
8967 if (!ctl_vsi) {
8968 dev_info(&pf->pdev->dev,
8969 "missing owner VSI for veb_idx %d\n", veb->idx);
8970 ret = -ENOENT;
8971 goto end_reconstitute;
8972 }
8973 if (ctl_vsi != pf->vsi[pf->lan_vsi])
8974 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8975 ret = i40e_add_vsi(ctl_vsi);
8976 if (ret) {
8977 dev_info(&pf->pdev->dev,
8978 "rebuild of veb_idx %d owner VSI failed: %d\n",
8979 veb->idx, ret);
8980 goto end_reconstitute;
8981 }
8982 i40e_vsi_reset_stats(ctl_vsi);
8983
8984
8985 ret = i40e_add_veb(veb, ctl_vsi);
8986 if (ret)
8987 goto end_reconstitute;
8988
8989 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
8990 veb->bridge_mode = BRIDGE_MODE_VEB;
8991 else
8992 veb->bridge_mode = BRIDGE_MODE_VEPA;
8993 i40e_config_bridge_mode(veb);
8994
8995
8996 for (v = 0; v < pf->num_alloc_vsi; v++) {
8997 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
8998 continue;
8999
9000 if (pf->vsi[v]->veb_idx == veb->idx) {
9001 struct i40e_vsi *vsi = pf->vsi[v];
9002
9003 vsi->uplink_seid = veb->seid;
9004 ret = i40e_add_vsi(vsi);
9005 if (ret) {
9006 dev_info(&pf->pdev->dev,
9007 "rebuild of vsi_idx %d failed: %d\n",
9008 v, ret);
9009 goto end_reconstitute;
9010 }
9011 i40e_vsi_reset_stats(vsi);
9012 }
9013 }
9014
9015
9016 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9017 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9018 pf->veb[veb_idx]->uplink_seid = veb->seid;
9019 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9020 if (ret)
9021 break;
9022 }
9023 }
9024
9025end_reconstitute:
9026 return ret;
9027}
9028
9029
9030
9031
9032
9033static int i40e_get_capabilities(struct i40e_pf *pf,
9034 enum i40e_admin_queue_opc list_type)
9035{
9036 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9037 u16 data_size;
9038 int buf_len;
9039 int err;
9040
9041 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9042 do {
9043 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9044 if (!cap_buf)
9045 return -ENOMEM;
9046
9047
9048 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9049 &data_size, list_type,
9050 NULL);
9051
9052 kfree(cap_buf);
9053
9054 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9055
9056 buf_len = data_size;
9057 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9058 dev_info(&pf->pdev->dev,
9059 "capability discovery failed, err %s aq_err %s\n",
9060 i40e_stat_str(&pf->hw, err),
9061 i40e_aq_str(&pf->hw,
9062 pf->hw.aq.asq_last_status));
9063 return -ENODEV;
9064 }
9065 } while (err);
9066
9067 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9068 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9069 dev_info(&pf->pdev->dev,
9070 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9071 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9072 pf->hw.func_caps.num_msix_vectors,
9073 pf->hw.func_caps.num_msix_vectors_vf,
9074 pf->hw.func_caps.fd_filters_guaranteed,
9075 pf->hw.func_caps.fd_filters_best_effort,
9076 pf->hw.func_caps.num_tx_qp,
9077 pf->hw.func_caps.num_vsis);
9078 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9079 dev_info(&pf->pdev->dev,
9080 "switch_mode=0x%04x, function_valid=0x%08x\n",
9081 pf->hw.dev_caps.switch_mode,
9082 pf->hw.dev_caps.valid_functions);
9083 dev_info(&pf->pdev->dev,
9084 "SR-IOV=%d, num_vfs for all function=%u\n",
9085 pf->hw.dev_caps.sr_iov_1_1,
9086 pf->hw.dev_caps.num_vfs);
9087 dev_info(&pf->pdev->dev,
9088 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9089 pf->hw.dev_caps.num_vsis,
9090 pf->hw.dev_caps.num_rx_qp,
9091 pf->hw.dev_caps.num_tx_qp);
9092 }
9093 }
9094 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9095#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9096 + pf->hw.func_caps.num_vfs)
9097 if (pf->hw.revision_id == 0 &&
9098 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9099 dev_info(&pf->pdev->dev,
9100 "got num_vsis %d, setting num_vsis to %d\n",
9101 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9102 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9103 }
9104 }
9105 return 0;
9106}
9107
9108static int i40e_vsi_clear(struct i40e_vsi *vsi);
9109
9110
9111
9112
9113
9114static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9115{
9116 struct i40e_vsi *vsi;
9117
9118
9119
9120
9121 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9122 static const u32 hkey[] = {
9123 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9124 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9125 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9126 0x95b3a76d};
9127 int i;
9128
9129 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9130 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9131 }
9132
9133 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9134 return;
9135
9136
9137 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9138
9139
9140 if (!vsi) {
9141 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9142 pf->vsi[pf->lan_vsi]->seid, 0);
9143 if (!vsi) {
9144 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9145 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9146 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9147 return;
9148 }
9149 }
9150
9151 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9152}
9153
9154
9155
9156
9157
9158static void i40e_fdir_teardown(struct i40e_pf *pf)
9159{
9160 struct i40e_vsi *vsi;
9161
9162 i40e_fdir_filter_exit(pf);
9163 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9164 if (vsi)
9165 i40e_vsi_release(vsi);
9166}
9167
9168
9169
9170
9171
9172
9173
9174
9175
9176static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9177{
9178 struct i40e_cloud_filter *cfilter;
9179 struct i40e_pf *pf = vsi->back;
9180 struct hlist_node *node;
9181 i40e_status ret;
9182
9183
9184 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9185 cloud_node) {
9186 if (cfilter->seid != seid)
9187 continue;
9188
9189 if (cfilter->dst_port)
9190 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9191 true);
9192 else
9193 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9194
9195 if (ret) {
9196 dev_dbg(&pf->pdev->dev,
9197 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9198 i40e_stat_str(&pf->hw, ret),
9199 i40e_aq_str(&pf->hw,
9200 pf->hw.aq.asq_last_status));
9201 return ret;
9202 }
9203 }
9204 return 0;
9205}
9206
9207
9208
9209
9210
9211
9212
9213static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9214{
9215 struct i40e_channel *ch, *ch_tmp;
9216 i40e_status ret;
9217
9218 if (list_empty(&vsi->ch_list))
9219 return 0;
9220
9221 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9222 if (!ch->initialized)
9223 break;
9224
9225 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9226 if (ret) {
9227 dev_info(&vsi->back->pdev->dev,
9228 "failed to rebuild channels using uplink_seid %u\n",
9229 vsi->uplink_seid);
9230 return ret;
9231 }
9232
9233 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9234 if (ret) {
9235 dev_info(&vsi->back->pdev->dev,
9236 "failed to configure TX rings for channel %u\n",
9237 ch->seid);
9238 return ret;
9239 }
9240
9241 vsi->next_base_queue = vsi->next_base_queue +
9242 ch->num_queue_pairs;
9243 if (ch->max_tx_rate) {
9244 u64 credits = ch->max_tx_rate;
9245
9246 if (i40e_set_bw_limit(vsi, ch->seid,
9247 ch->max_tx_rate))
9248 return -EINVAL;
9249
9250 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9251 dev_dbg(&vsi->back->pdev->dev,
9252 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9253 ch->max_tx_rate,
9254 credits,
9255 ch->seid);
9256 }
9257 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9258 if (ret) {
9259 dev_dbg(&vsi->back->pdev->dev,
9260 "Failed to rebuild cloud filters for channel VSI %u\n",
9261 ch->seid);
9262 return ret;
9263 }
9264 }
9265 return 0;
9266}
9267
9268
9269
9270
9271
9272
9273
9274
9275
9276static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9277{
9278 struct i40e_hw *hw = &pf->hw;
9279 i40e_status ret = 0;
9280 u32 v;
9281
9282 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9283 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9284 return;
9285 if (i40e_check_asq_alive(&pf->hw))
9286 i40e_vc_notify_reset(pf);
9287
9288 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9289
9290
9291
9292 if (!lock_acquired)
9293 rtnl_lock();
9294 i40e_pf_quiesce_all_vsi(pf);
9295 if (!lock_acquired)
9296 rtnl_unlock();
9297
9298 for (v = 0; v < pf->num_alloc_vsi; v++) {
9299 if (pf->vsi[v])
9300 pf->vsi[v]->seid = 0;
9301 }
9302
9303 i40e_shutdown_adminq(&pf->hw);
9304
9305
9306 if (hw->hmc.hmc_obj) {
9307 ret = i40e_shutdown_lan_hmc(hw);
9308 if (ret)
9309 dev_warn(&pf->pdev->dev,
9310 "shutdown_lan_hmc failed: %d\n", ret);
9311 }
9312}
9313
9314
9315
9316
9317
9318static void i40e_send_version(struct i40e_pf *pf)
9319{
9320 struct i40e_driver_version dv;
9321
9322 dv.major_version = DRV_VERSION_MAJOR;
9323 dv.minor_version = DRV_VERSION_MINOR;
9324 dv.build_version = DRV_VERSION_BUILD;
9325 dv.subbuild_version = 0;
9326 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9327 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9328}
9329
9330
9331
9332
9333
9334static void i40e_get_oem_version(struct i40e_hw *hw)
9335{
9336 u16 block_offset = 0xffff;
9337 u16 block_length = 0;
9338 u16 capabilities = 0;
9339 u16 gen_snap = 0;
9340 u16 release = 0;
9341
9342#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9343#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9344#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9345#define I40E_NVM_OEM_GEN_OFFSET 0x02
9346#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9347#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9348#define I40E_NVM_OEM_LENGTH 3
9349
9350
9351 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9352 if (block_offset == 0xffff)
9353 return;
9354
9355
9356 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9357 &block_length);
9358 if (block_length < I40E_NVM_OEM_LENGTH)
9359 return;
9360
9361
9362 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9363 &capabilities);
9364 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9365 return;
9366
9367 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9368 &gen_snap);
9369 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9370 &release);
9371 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9372 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9373}
9374
9375
9376
9377
9378
9379static int i40e_reset(struct i40e_pf *pf)
9380{
9381 struct i40e_hw *hw = &pf->hw;
9382 i40e_status ret;
9383
9384 ret = i40e_pf_reset(hw);
9385 if (ret) {
9386 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9387 set_bit(__I40E_RESET_FAILED, pf->state);
9388 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9389 } else {
9390 pf->pfr_count++;
9391 }
9392 return ret;
9393}
9394
9395
9396
9397
9398
9399
9400
9401
9402static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9403{
9404 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9405 struct i40e_hw *hw = &pf->hw;
9406 u8 set_fc_aq_fail = 0;
9407 i40e_status ret;
9408 u32 val;
9409 int v;
9410
9411 if (test_bit(__I40E_DOWN, pf->state))
9412 goto clear_recovery;
9413 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9414
9415
9416 ret = i40e_init_adminq(&pf->hw);
9417 if (ret) {
9418 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9419 i40e_stat_str(&pf->hw, ret),
9420 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9421 goto clear_recovery;
9422 }
9423 i40e_get_oem_version(&pf->hw);
9424
9425 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9426 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9427 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9428
9429
9430
9431
9432
9433 mdelay(300);
9434 }
9435
9436
9437 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9438 i40e_verify_eeprom(pf);
9439
9440 i40e_clear_pxe_mode(hw);
9441 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
9442 if (ret)
9443 goto end_core_reset;
9444
9445 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9446 hw->func_caps.num_rx_qp, 0, 0);
9447 if (ret) {
9448 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9449 goto end_core_reset;
9450 }
9451 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9452 if (ret) {
9453 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9454 goto end_core_reset;
9455 }
9456
9457
9458 i40e_aq_set_dcb_parameters(hw, true, NULL);
9459
9460#ifdef CONFIG_I40E_DCB
9461 ret = i40e_init_pf_dcb(pf);
9462 if (ret) {
9463 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9464 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9465
9466 }
9467#endif
9468
9469 if (!lock_acquired)
9470 rtnl_lock();
9471 ret = i40e_setup_pf_switch(pf, reinit);
9472 if (ret)
9473 goto end_unlock;
9474
9475
9476
9477
9478 ret = i40e_aq_set_phy_int_mask(&pf->hw,
9479 ~(I40E_AQ_EVENT_LINK_UPDOWN |
9480 I40E_AQ_EVENT_MEDIA_NA |
9481 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
9482 if (ret)
9483 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9484 i40e_stat_str(&pf->hw, ret),
9485 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9486
9487
9488 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
9489 if (ret)
9490 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
9491 i40e_stat_str(&pf->hw, ret),
9492 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9493
9494
9495
9496
9497
9498
9499
9500
9501 if (vsi->uplink_seid != pf->mac_seid) {
9502 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
9503
9504 for (v = 0; v < I40E_MAX_VEB; v++) {
9505 if (!pf->veb[v])
9506 continue;
9507
9508 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9509 pf->veb[v]->uplink_seid == 0) {
9510 ret = i40e_reconstitute_veb(pf->veb[v]);
9511
9512 if (!ret)
9513 continue;
9514
9515
9516
9517
9518
9519
9520
9521 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9522 dev_info(&pf->pdev->dev,
9523 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9524 ret);
9525 vsi->uplink_seid = pf->mac_seid;
9526 break;
9527 } else if (pf->veb[v]->uplink_seid == 0) {
9528 dev_info(&pf->pdev->dev,
9529 "rebuild of orphan VEB failed: %d\n",
9530 ret);
9531 }
9532 }
9533 }
9534 }
9535
9536 if (vsi->uplink_seid == pf->mac_seid) {
9537 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
9538
9539 ret = i40e_add_vsi(vsi);
9540 if (ret) {
9541 dev_info(&pf->pdev->dev,
9542 "rebuild of Main VSI failed: %d\n", ret);
9543 goto end_unlock;
9544 }
9545 }
9546
9547 if (vsi->mqprio_qopt.max_rate[0]) {
9548 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9549 u64 credits = 0;
9550
9551 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
9552 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
9553 if (ret)
9554 goto end_unlock;
9555
9556 credits = max_tx_rate;
9557 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9558 dev_dbg(&vsi->back->pdev->dev,
9559 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9560 max_tx_rate,
9561 credits,
9562 vsi->seid);
9563 }
9564
9565 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9566 if (ret)
9567 goto end_unlock;
9568
9569
9570
9571
9572 ret = i40e_rebuild_channels(vsi);
9573 if (ret)
9574 goto end_unlock;
9575
9576
9577
9578
9579
9580#define I40E_REG_MSS 0x000E64DC
9581#define I40E_REG_MSS_MIN_MASK 0x3FF0000
9582#define I40E_64BYTE_MSS 0x400000
9583 val = rd32(hw, I40E_REG_MSS);
9584 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9585 val &= ~I40E_REG_MSS_MIN_MASK;
9586 val |= I40E_64BYTE_MSS;
9587 wr32(hw, I40E_REG_MSS, val);
9588 }
9589
9590 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
9591 msleep(75);
9592 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9593 if (ret)
9594 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9595 i40e_stat_str(&pf->hw, ret),
9596 i40e_aq_str(&pf->hw,
9597 pf->hw.aq.asq_last_status));
9598 }
9599
9600 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9601 ret = i40e_setup_misc_vector(pf);
9602
9603
9604
9605
9606
9607
9608
9609 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9610 pf->main_vsi_seid);
9611
9612
9613 i40e_pf_unquiesce_all_vsi(pf);
9614
9615
9616 if (!lock_acquired)
9617 rtnl_unlock();
9618
9619
9620 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9621 if (ret)
9622 dev_warn(&pf->pdev->dev,
9623 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9624 pf->cur_promisc ? "on" : "off",
9625 i40e_stat_str(&pf->hw, ret),
9626 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9627
9628 i40e_reset_all_vfs(pf, true);
9629
9630
9631 i40e_send_version(pf);
9632
9633
9634 goto end_core_reset;
9635
9636end_unlock:
9637 if (!lock_acquired)
9638 rtnl_unlock();
9639end_core_reset:
9640 clear_bit(__I40E_RESET_FAILED, pf->state);
9641clear_recovery:
9642 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9643 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
9644}
9645
9646
9647
9648
9649
9650
9651
9652
9653static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9654 bool lock_acquired)
9655{
9656 int ret;
9657
9658
9659
9660
9661 ret = i40e_reset(pf);
9662 if (!ret)
9663 i40e_rebuild(pf, reinit, lock_acquired);
9664}
9665
9666
9667
9668
9669
9670
9671
9672
9673
9674
9675static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
9676{
9677 i40e_prep_for_reset(pf, lock_acquired);
9678 i40e_reset_and_rebuild(pf, false, lock_acquired);
9679}
9680
9681
9682
9683
9684
9685
9686
9687static void i40e_handle_mdd_event(struct i40e_pf *pf)
9688{
9689 struct i40e_hw *hw = &pf->hw;
9690 bool mdd_detected = false;
9691 bool pf_mdd_detected = false;
9692 struct i40e_vf *vf;
9693 u32 reg;
9694 int i;
9695
9696 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
9697 return;
9698
9699
9700 reg = rd32(hw, I40E_GL_MDET_TX);
9701 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
9702 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9703 I40E_GL_MDET_TX_PF_NUM_SHIFT;
9704 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
9705 I40E_GL_MDET_TX_VF_NUM_SHIFT;
9706 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
9707 I40E_GL_MDET_TX_EVENT_SHIFT;
9708 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9709 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9710 pf->hw.func_caps.base_queue;
9711 if (netif_msg_tx_err(pf))
9712 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9713 event, queue, pf_num, vf_num);
9714 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9715 mdd_detected = true;
9716 }
9717 reg = rd32(hw, I40E_GL_MDET_RX);
9718 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
9719 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9720 I40E_GL_MDET_RX_FUNCTION_SHIFT;
9721 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
9722 I40E_GL_MDET_RX_EVENT_SHIFT;
9723 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9724 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9725 pf->hw.func_caps.base_queue;
9726 if (netif_msg_rx_err(pf))
9727 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9728 event, queue, func);
9729 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9730 mdd_detected = true;
9731 }
9732
9733 if (mdd_detected) {
9734 reg = rd32(hw, I40E_PF_MDET_TX);
9735 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9736 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9737 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
9738 pf_mdd_detected = true;
9739 }
9740 reg = rd32(hw, I40E_PF_MDET_RX);
9741 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9742 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9743 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
9744 pf_mdd_detected = true;
9745 }
9746
9747 if (pf_mdd_detected) {
9748 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9749 i40e_service_event_schedule(pf);
9750 }
9751 }
9752
9753
9754 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9755 vf = &(pf->vf[i]);
9756 reg = rd32(hw, I40E_VP_MDET_TX(i));
9757 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9758 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9759 vf->num_mdd_events++;
9760 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9761 i);
9762 }
9763
9764 reg = rd32(hw, I40E_VP_MDET_RX(i));
9765 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9766 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9767 vf->num_mdd_events++;
9768 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9769 i);
9770 }
9771
9772 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9773 dev_info(&pf->pdev->dev,
9774 "Too many MDD events on VF %d, disabled\n", i);
9775 dev_info(&pf->pdev->dev,
9776 "Use PF Control I/F to re-enable the VF\n");
9777 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9778 }
9779 }
9780
9781
9782 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
9783 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9784 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9785 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9786 i40e_flush(hw);
9787}
9788
9789static const char *i40e_tunnel_name(u8 type)
9790{
9791 switch (type) {
9792 case UDP_TUNNEL_TYPE_VXLAN:
9793 return "vxlan";
9794 case UDP_TUNNEL_TYPE_GENEVE:
9795 return "geneve";
9796 default:
9797 return "unknown";
9798 }
9799}
9800
9801
9802
9803
9804
9805static void i40e_sync_udp_filters(struct i40e_pf *pf)
9806{
9807 int i;
9808
9809
9810 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9811 if (pf->udp_ports[i].port)
9812 pf->pending_udp_bitmap |= BIT_ULL(i);
9813 }
9814
9815 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
9816}
9817
9818
9819
9820
9821
9822static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9823{
9824 struct i40e_hw *hw = &pf->hw;
9825 u8 filter_index, type;
9826 u16 port;
9827 int i;
9828
9829 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
9830 return;
9831
9832
9833 rtnl_lock();
9834
9835 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9836 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9837 struct i40e_udp_port_config *udp_port;
9838 i40e_status ret = 0;
9839
9840 udp_port = &pf->udp_ports[i];
9841 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9842
9843 port = READ_ONCE(udp_port->port);
9844 type = READ_ONCE(udp_port->type);
9845 filter_index = READ_ONCE(udp_port->filter_index);
9846
9847
9848 rtnl_unlock();
9849
9850 if (port)
9851 ret = i40e_aq_add_udp_tunnel(hw, port,
9852 type,
9853 &filter_index,
9854 NULL);
9855 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9856 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9857 NULL);
9858
9859
9860 rtnl_lock();
9861
9862 if (ret) {
9863 dev_info(&pf->pdev->dev,
9864 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9865 i40e_tunnel_name(type),
9866 port ? "add" : "delete",
9867 port,
9868 filter_index,
9869 i40e_stat_str(&pf->hw, ret),
9870 i40e_aq_str(&pf->hw,
9871 pf->hw.aq.asq_last_status));
9872 if (port) {
9873
9874
9875
9876 udp_port->port = 0;
9877 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9878 }
9879 } else if (port) {
9880
9881 udp_port->filter_index = filter_index;
9882 }
9883 }
9884 }
9885
9886 rtnl_unlock();
9887}
9888
9889
9890
9891
9892
9893static void i40e_service_task(struct work_struct *work)
9894{
9895 struct i40e_pf *pf = container_of(work,
9896 struct i40e_pf,
9897 service_task);
9898 unsigned long start_time = jiffies;
9899
9900
9901 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9902 return;
9903
9904 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
9905 return;
9906
9907 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
9908 i40e_sync_filters_subtask(pf);
9909 i40e_reset_subtask(pf);
9910 i40e_handle_mdd_event(pf);
9911 i40e_vc_process_vflr_event(pf);
9912 i40e_watchdog_subtask(pf);
9913 i40e_fdir_reinit_subtask(pf);
9914 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
9915
9916 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
9917 } else {
9918 i40e_client_subtask(pf);
9919 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
9920 pf->state))
9921 i40e_notify_client_of_l2_param_changes(
9922 pf->vsi[pf->lan_vsi]);
9923 }
9924 i40e_sync_filters_subtask(pf);
9925 i40e_sync_udp_filters_subtask(pf);
9926 i40e_clean_adminq_subtask(pf);
9927
9928
9929 smp_mb__before_atomic();
9930 clear_bit(__I40E_SERVICE_SCHED, pf->state);
9931
9932
9933
9934
9935
9936 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
9937 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
9938 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
9939 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
9940 i40e_service_event_schedule(pf);
9941}
9942
9943
9944
9945
9946
9947static void i40e_service_timer(struct timer_list *t)
9948{
9949 struct i40e_pf *pf = from_timer(pf, t, service_timer);
9950
9951 mod_timer(&pf->service_timer,
9952 round_jiffies(jiffies + pf->service_timer_period));
9953 i40e_service_event_schedule(pf);
9954}
9955
9956
9957
9958
9959
9960static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
9961{
9962 struct i40e_pf *pf = vsi->back;
9963
9964 switch (vsi->type) {
9965 case I40E_VSI_MAIN:
9966 vsi->alloc_queue_pairs = pf->num_lan_qps;
9967 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9968 I40E_REQ_DESCRIPTOR_MULTIPLE);
9969 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9970 vsi->num_q_vectors = pf->num_lan_msix;
9971 else
9972 vsi->num_q_vectors = 1;
9973
9974 break;
9975
9976 case I40E_VSI_FDIR:
9977 vsi->alloc_queue_pairs = 1;
9978 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
9979 I40E_REQ_DESCRIPTOR_MULTIPLE);
9980 vsi->num_q_vectors = pf->num_fdsb_msix;
9981 break;
9982
9983 case I40E_VSI_VMDQ2:
9984 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
9985 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9986 I40E_REQ_DESCRIPTOR_MULTIPLE);
9987 vsi->num_q_vectors = pf->num_vmdq_msix;
9988 break;
9989
9990 case I40E_VSI_SRIOV:
9991 vsi->alloc_queue_pairs = pf->num_vf_qps;
9992 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9993 I40E_REQ_DESCRIPTOR_MULTIPLE);
9994 break;
9995
9996 default:
9997 WARN_ON(1);
9998 return -ENODATA;
9999 }
10000
10001 return 0;
10002}
10003
10004
10005
10006
10007
10008
10009
10010
10011
10012static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10013{
10014 struct i40e_ring **next_rings;
10015 int size;
10016 int ret = 0;
10017
10018
10019 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10020 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10021 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10022 if (!vsi->tx_rings)
10023 return -ENOMEM;
10024 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10025 if (i40e_enabled_xdp_vsi(vsi)) {
10026 vsi->xdp_rings = next_rings;
10027 next_rings += vsi->alloc_queue_pairs;
10028 }
10029 vsi->rx_rings = next_rings;
10030
10031 if (alloc_qvectors) {
10032
10033 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10034 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10035 if (!vsi->q_vectors) {
10036 ret = -ENOMEM;
10037 goto err_vectors;
10038 }
10039 }
10040 return ret;
10041
10042err_vectors:
10043 kfree(vsi->tx_rings);
10044 return ret;
10045}
10046
10047
10048
10049
10050
10051
10052
10053
10054
10055static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10056{
10057 int ret = -ENODEV;
10058 struct i40e_vsi *vsi;
10059 int vsi_idx;
10060 int i;
10061
10062
10063 mutex_lock(&pf->switch_mutex);
10064
10065
10066
10067
10068
10069
10070
10071 i = pf->next_vsi;
10072 while (i < pf->num_alloc_vsi && pf->vsi[i])
10073 i++;
10074 if (i >= pf->num_alloc_vsi) {
10075 i = 0;
10076 while (i < pf->next_vsi && pf->vsi[i])
10077 i++;
10078 }
10079
10080 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10081 vsi_idx = i;
10082 } else {
10083 ret = -ENODEV;
10084 goto unlock_pf;
10085 }
10086 pf->next_vsi = ++i;
10087
10088 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10089 if (!vsi) {
10090 ret = -ENOMEM;
10091 goto unlock_pf;
10092 }
10093 vsi->type = type;
10094 vsi->back = pf;
10095 set_bit(__I40E_VSI_DOWN, vsi->state);
10096 vsi->flags = 0;
10097 vsi->idx = vsi_idx;
10098 vsi->int_rate_limit = 0;
10099 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10100 pf->rss_table_size : 64;
10101 vsi->netdev_registered = false;
10102 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10103 hash_init(vsi->mac_filter_hash);
10104 vsi->irqs_ready = false;
10105
10106 ret = i40e_set_num_rings_in_vsi(vsi);
10107 if (ret)
10108 goto err_rings;
10109
10110 ret = i40e_vsi_alloc_arrays(vsi, true);
10111 if (ret)
10112 goto err_rings;
10113
10114
10115 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10116
10117
10118 spin_lock_init(&vsi->mac_filter_hash_lock);
10119 pf->vsi[vsi_idx] = vsi;
10120 ret = vsi_idx;
10121 goto unlock_pf;
10122
10123err_rings:
10124 pf->next_vsi = i - 1;
10125 kfree(vsi);
10126unlock_pf:
10127 mutex_unlock(&pf->switch_mutex);
10128 return ret;
10129}
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10140{
10141
10142 if (free_qvectors) {
10143 kfree(vsi->q_vectors);
10144 vsi->q_vectors = NULL;
10145 }
10146 kfree(vsi->tx_rings);
10147 vsi->tx_rings = NULL;
10148 vsi->rx_rings = NULL;
10149 vsi->xdp_rings = NULL;
10150}
10151
10152
10153
10154
10155
10156
10157static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10158{
10159 if (!vsi)
10160 return;
10161
10162 kfree(vsi->rss_hkey_user);
10163 vsi->rss_hkey_user = NULL;
10164
10165 kfree(vsi->rss_lut_user);
10166 vsi->rss_lut_user = NULL;
10167}
10168
10169
10170
10171
10172
10173static int i40e_vsi_clear(struct i40e_vsi *vsi)
10174{
10175 struct i40e_pf *pf;
10176
10177 if (!vsi)
10178 return 0;
10179
10180 if (!vsi->back)
10181 goto free_vsi;
10182 pf = vsi->back;
10183
10184 mutex_lock(&pf->switch_mutex);
10185 if (!pf->vsi[vsi->idx]) {
10186 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10187 vsi->idx, vsi->idx, vsi->type);
10188 goto unlock_vsi;
10189 }
10190
10191 if (pf->vsi[vsi->idx] != vsi) {
10192 dev_err(&pf->pdev->dev,
10193 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10194 pf->vsi[vsi->idx]->idx,
10195 pf->vsi[vsi->idx]->type,
10196 vsi->idx, vsi->type);
10197 goto unlock_vsi;
10198 }
10199
10200
10201 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10202 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10203
10204 i40e_vsi_free_arrays(vsi, true);
10205 i40e_clear_rss_config_user(vsi);
10206
10207 pf->vsi[vsi->idx] = NULL;
10208 if (vsi->idx < pf->next_vsi)
10209 pf->next_vsi = vsi->idx;
10210
10211unlock_vsi:
10212 mutex_unlock(&pf->switch_mutex);
10213free_vsi:
10214 kfree(vsi);
10215
10216 return 0;
10217}
10218
10219
10220
10221
10222
10223static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10224{
10225 int i;
10226
10227 if (vsi->tx_rings && vsi->tx_rings[0]) {
10228 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10229 kfree_rcu(vsi->tx_rings[i], rcu);
10230 vsi->tx_rings[i] = NULL;
10231 vsi->rx_rings[i] = NULL;
10232 if (vsi->xdp_rings)
10233 vsi->xdp_rings[i] = NULL;
10234 }
10235 }
10236}
10237
10238
10239
10240
10241
10242static int i40e_alloc_rings(struct i40e_vsi *vsi)
10243{
10244 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10245 struct i40e_pf *pf = vsi->back;
10246 struct i40e_ring *ring;
10247
10248
10249 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10250
10251 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10252 if (!ring)
10253 goto err_out;
10254
10255 ring->queue_index = i;
10256 ring->reg_idx = vsi->base_queue + i;
10257 ring->ring_active = false;
10258 ring->vsi = vsi;
10259 ring->netdev = vsi->netdev;
10260 ring->dev = &pf->pdev->dev;
10261 ring->count = vsi->num_desc;
10262 ring->size = 0;
10263 ring->dcb_tc = 0;
10264 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10265 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10266 ring->itr_setting = pf->tx_itr_default;
10267 vsi->tx_rings[i] = ring++;
10268
10269 if (!i40e_enabled_xdp_vsi(vsi))
10270 goto setup_rx;
10271
10272 ring->queue_index = vsi->alloc_queue_pairs + i;
10273 ring->reg_idx = vsi->base_queue + ring->queue_index;
10274 ring->ring_active = false;
10275 ring->vsi = vsi;
10276 ring->netdev = NULL;
10277 ring->dev = &pf->pdev->dev;
10278 ring->count = vsi->num_desc;
10279 ring->size = 0;
10280 ring->dcb_tc = 0;
10281 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10282 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10283 set_ring_xdp(ring);
10284 ring->itr_setting = pf->tx_itr_default;
10285 vsi->xdp_rings[i] = ring++;
10286
10287setup_rx:
10288 ring->queue_index = i;
10289 ring->reg_idx = vsi->base_queue + i;
10290 ring->ring_active = false;
10291 ring->vsi = vsi;
10292 ring->netdev = vsi->netdev;
10293 ring->dev = &pf->pdev->dev;
10294 ring->count = vsi->num_desc;
10295 ring->size = 0;
10296 ring->dcb_tc = 0;
10297 ring->itr_setting = pf->rx_itr_default;
10298 vsi->rx_rings[i] = ring;
10299 }
10300
10301 return 0;
10302
10303err_out:
10304 i40e_vsi_clear_rings(vsi);
10305 return -ENOMEM;
10306}
10307
10308
10309
10310
10311
10312
10313
10314
10315static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10316{
10317 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10318 I40E_MIN_MSIX, vectors);
10319 if (vectors < 0) {
10320 dev_info(&pf->pdev->dev,
10321 "MSI-X vector reservation failed: %d\n", vectors);
10322 vectors = 0;
10323 }
10324
10325 return vectors;
10326}
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336static int i40e_init_msix(struct i40e_pf *pf)
10337{
10338 struct i40e_hw *hw = &pf->hw;
10339 int cpus, extra_vectors;
10340 int vectors_left;
10341 int v_budget, i;
10342 int v_actual;
10343 int iwarp_requested = 0;
10344
10345 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10346 return -ENODEV;
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363 vectors_left = hw->func_caps.num_msix_vectors;
10364 v_budget = 0;
10365
10366
10367 if (vectors_left) {
10368 v_budget++;
10369 vectors_left--;
10370 }
10371
10372
10373
10374
10375
10376
10377
10378
10379 cpus = num_online_cpus();
10380 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10381 vectors_left -= pf->num_lan_msix;
10382
10383
10384 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10385 if (vectors_left) {
10386 pf->num_fdsb_msix = 1;
10387 v_budget++;
10388 vectors_left--;
10389 } else {
10390 pf->num_fdsb_msix = 0;
10391 }
10392 }
10393
10394
10395 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10396 iwarp_requested = pf->num_iwarp_msix;
10397
10398 if (!vectors_left)
10399 pf->num_iwarp_msix = 0;
10400 else if (vectors_left < pf->num_iwarp_msix)
10401 pf->num_iwarp_msix = 1;
10402 v_budget += pf->num_iwarp_msix;
10403 vectors_left -= pf->num_iwarp_msix;
10404 }
10405
10406
10407 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10408 if (!vectors_left) {
10409 pf->num_vmdq_msix = 0;
10410 pf->num_vmdq_qps = 0;
10411 } else {
10412 int vmdq_vecs_wanted =
10413 pf->num_vmdq_vsis * pf->num_vmdq_qps;
10414 int vmdq_vecs =
10415 min_t(int, vectors_left, vmdq_vecs_wanted);
10416
10417
10418
10419
10420
10421
10422
10423 if (vectors_left < vmdq_vecs_wanted) {
10424 pf->num_vmdq_qps = 1;
10425 vmdq_vecs_wanted = pf->num_vmdq_vsis;
10426 vmdq_vecs = min_t(int,
10427 vectors_left,
10428 vmdq_vecs_wanted);
10429 }
10430 pf->num_vmdq_msix = pf->num_vmdq_qps;
10431
10432 v_budget += vmdq_vecs;
10433 vectors_left -= vmdq_vecs;
10434 }
10435 }
10436
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10447 pf->num_lan_msix += extra_vectors;
10448 vectors_left -= extra_vectors;
10449
10450 WARN(vectors_left < 0,
10451 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10452
10453 v_budget += pf->num_lan_msix;
10454 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10455 GFP_KERNEL);
10456 if (!pf->msix_entries)
10457 return -ENOMEM;
10458
10459 for (i = 0; i < v_budget; i++)
10460 pf->msix_entries[i].entry = i;
10461 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
10462
10463 if (v_actual < I40E_MIN_MSIX) {
10464 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10465 kfree(pf->msix_entries);
10466 pf->msix_entries = NULL;
10467 pci_disable_msix(pf->pdev);
10468 return -ENODEV;
10469
10470 } else if (v_actual == I40E_MIN_MSIX) {
10471
10472 pf->num_vmdq_vsis = 0;
10473 pf->num_vmdq_qps = 0;
10474 pf->num_lan_qps = 1;
10475 pf->num_lan_msix = 1;
10476
10477 } else if (v_actual != v_budget) {
10478
10479
10480
10481
10482
10483 int vec;
10484
10485 dev_info(&pf->pdev->dev,
10486 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10487 v_actual, v_budget);
10488
10489 vec = v_actual - 1;
10490
10491
10492 pf->num_vmdq_msix = 1;
10493 pf->num_vmdq_vsis = 1;
10494 pf->num_vmdq_qps = 1;
10495
10496
10497 switch (vec) {
10498 case 2:
10499 pf->num_lan_msix = 1;
10500 break;
10501 case 3:
10502 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10503 pf->num_lan_msix = 1;
10504 pf->num_iwarp_msix = 1;
10505 } else {
10506 pf->num_lan_msix = 2;
10507 }
10508 break;
10509 default:
10510 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10511 pf->num_iwarp_msix = min_t(int, (vec / 3),
10512 iwarp_requested);
10513 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10514 I40E_DEFAULT_NUM_VMDQ_VSI);
10515 } else {
10516 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10517 I40E_DEFAULT_NUM_VMDQ_VSI);
10518 }
10519 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10520 pf->num_fdsb_msix = 1;
10521 vec--;
10522 }
10523 pf->num_lan_msix = min_t(int,
10524 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10525 pf->num_lan_msix);
10526 pf->num_lan_qps = pf->num_lan_msix;
10527 break;
10528 }
10529 }
10530
10531 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10532 (pf->num_fdsb_msix == 0)) {
10533 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10534 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10535 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10536 }
10537 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10538 (pf->num_vmdq_msix == 0)) {
10539 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10540 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10541 }
10542
10543 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10544 (pf->num_iwarp_msix == 0)) {
10545 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10546 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10547 }
10548 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10549 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10550 pf->num_lan_msix,
10551 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10552 pf->num_fdsb_msix,
10553 pf->num_iwarp_msix);
10554
10555 return v_actual;
10556}
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
10567{
10568 struct i40e_q_vector *q_vector;
10569
10570
10571 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10572 if (!q_vector)
10573 return -ENOMEM;
10574
10575 q_vector->vsi = vsi;
10576 q_vector->v_idx = v_idx;
10577 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
10578
10579 if (vsi->netdev)
10580 netif_napi_add(vsi->netdev, &q_vector->napi,
10581 i40e_napi_poll, NAPI_POLL_WEIGHT);
10582
10583
10584 vsi->q_vectors[v_idx] = q_vector;
10585
10586 return 0;
10587}
10588
10589
10590
10591
10592
10593
10594
10595
10596static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
10597{
10598 struct i40e_pf *pf = vsi->back;
10599 int err, v_idx, num_q_vectors, current_cpu;
10600
10601
10602 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10603 num_q_vectors = vsi->num_q_vectors;
10604 else if (vsi == pf->vsi[pf->lan_vsi])
10605 num_q_vectors = 1;
10606 else
10607 return -EINVAL;
10608
10609 current_cpu = cpumask_first(cpu_online_mask);
10610
10611 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10612 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
10613 if (err)
10614 goto err_out;
10615 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10616 if (unlikely(current_cpu >= nr_cpu_ids))
10617 current_cpu = cpumask_first(cpu_online_mask);
10618 }
10619
10620 return 0;
10621
10622err_out:
10623 while (v_idx--)
10624 i40e_free_q_vector(vsi, v_idx);
10625
10626 return err;
10627}
10628
10629
10630
10631
10632
10633static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10634{
10635 int vectors = 0;
10636 ssize_t size;
10637
10638 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10639 vectors = i40e_init_msix(pf);
10640 if (vectors < 0) {
10641 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
10642 I40E_FLAG_IWARP_ENABLED |
10643 I40E_FLAG_RSS_ENABLED |
10644 I40E_FLAG_DCB_CAPABLE |
10645 I40E_FLAG_DCB_ENABLED |
10646 I40E_FLAG_SRIOV_ENABLED |
10647 I40E_FLAG_FD_SB_ENABLED |
10648 I40E_FLAG_FD_ATR_ENABLED |
10649 I40E_FLAG_VMDQ_ENABLED);
10650 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10651
10652
10653 i40e_determine_queue_usage(pf);
10654 }
10655 }
10656
10657 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10658 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
10659 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
10660 vectors = pci_enable_msi(pf->pdev);
10661 if (vectors < 0) {
10662 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10663 vectors);
10664 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10665 }
10666 vectors = 1;
10667 }
10668
10669 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
10670 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10671
10672
10673 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10674 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10675 if (!pf->irq_pile)
10676 return -ENOMEM;
10677
10678 pf->irq_pile->num_entries = vectors;
10679 pf->irq_pile->search_hint = 0;
10680
10681
10682 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
10683
10684 return 0;
10685}
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10696{
10697 int err, i;
10698
10699
10700
10701
10702
10703 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10704
10705 err = i40e_init_interrupt_scheme(pf);
10706 if (err)
10707 return err;
10708
10709
10710
10711
10712 for (i = 0; i < pf->num_alloc_vsi; i++) {
10713 if (pf->vsi[i]) {
10714 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10715 if (err)
10716 goto err_unwind;
10717 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10718 }
10719 }
10720
10721 err = i40e_setup_misc_vector(pf);
10722 if (err)
10723 goto err_unwind;
10724
10725 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
10726 i40e_client_update_msix_info(pf);
10727
10728 return 0;
10729
10730err_unwind:
10731 while (i--) {
10732 if (pf->vsi[i])
10733 i40e_vsi_free_q_vectors(pf->vsi[i]);
10734 }
10735
10736 return err;
10737}
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747static int i40e_setup_misc_vector(struct i40e_pf *pf)
10748{
10749 struct i40e_hw *hw = &pf->hw;
10750 int err = 0;
10751
10752
10753 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
10754 err = request_irq(pf->msix_entries[0].vector,
10755 i40e_intr, 0, pf->int_name, pf);
10756 if (err) {
10757 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
10758 dev_info(&pf->pdev->dev,
10759 "request_irq for %s failed: %d\n",
10760 pf->int_name, err);
10761 return -EFAULT;
10762 }
10763 }
10764
10765 i40e_enable_misc_int_causes(pf);
10766
10767
10768 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10769 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
10770
10771 i40e_flush(hw);
10772
10773 i40e_irq_dynamic_enable_icr0(pf);
10774
10775 return err;
10776}
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10788 u8 *lut, u16 lut_size)
10789{
10790 struct i40e_pf *pf = vsi->back;
10791 struct i40e_hw *hw = &pf->hw;
10792 int ret = 0;
10793
10794 if (seed) {
10795 ret = i40e_aq_get_rss_key(hw, vsi->id,
10796 (struct i40e_aqc_get_set_rss_key_data *)seed);
10797 if (ret) {
10798 dev_info(&pf->pdev->dev,
10799 "Cannot get RSS key, err %s aq_err %s\n",
10800 i40e_stat_str(&pf->hw, ret),
10801 i40e_aq_str(&pf->hw,
10802 pf->hw.aq.asq_last_status));
10803 return ret;
10804 }
10805 }
10806
10807 if (lut) {
10808 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10809
10810 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10811 if (ret) {
10812 dev_info(&pf->pdev->dev,
10813 "Cannot get RSS lut, err %s aq_err %s\n",
10814 i40e_stat_str(&pf->hw, ret),
10815 i40e_aq_str(&pf->hw,
10816 pf->hw.aq.asq_last_status));
10817 return ret;
10818 }
10819 }
10820
10821 return ret;
10822}
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10834 const u8 *lut, u16 lut_size)
10835{
10836 struct i40e_pf *pf = vsi->back;
10837 struct i40e_hw *hw = &pf->hw;
10838 u16 vf_id = vsi->vf_id;
10839 u8 i;
10840
10841
10842 if (seed) {
10843 u32 *seed_dw = (u32 *)seed;
10844
10845 if (vsi->type == I40E_VSI_MAIN) {
10846 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10847 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
10848 } else if (vsi->type == I40E_VSI_SRIOV) {
10849 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
10850 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
10851 } else {
10852 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10853 }
10854 }
10855
10856 if (lut) {
10857 u32 *lut_dw = (u32 *)lut;
10858
10859 if (vsi->type == I40E_VSI_MAIN) {
10860 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10861 return -EINVAL;
10862 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10863 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10864 } else if (vsi->type == I40E_VSI_SRIOV) {
10865 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10866 return -EINVAL;
10867 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
10868 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
10869 } else {
10870 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10871 }
10872 }
10873 i40e_flush(hw);
10874
10875 return 0;
10876}
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
10888 u8 *lut, u16 lut_size)
10889{
10890 struct i40e_pf *pf = vsi->back;
10891 struct i40e_hw *hw = &pf->hw;
10892 u16 i;
10893
10894 if (seed) {
10895 u32 *seed_dw = (u32 *)seed;
10896
10897 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10898 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
10899 }
10900 if (lut) {
10901 u32 *lut_dw = (u32 *)lut;
10902
10903 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10904 return -EINVAL;
10905 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10906 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
10907 }
10908
10909 return 0;
10910}
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10922{
10923 struct i40e_pf *pf = vsi->back;
10924
10925 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10926 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
10927 else
10928 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
10929}
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939
10940int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10941{
10942 struct i40e_pf *pf = vsi->back;
10943
10944 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10945 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
10946 else
10947 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
10948}
10949
10950
10951
10952
10953
10954
10955
10956
10957void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
10958 u16 rss_table_size, u16 rss_size)
10959{
10960 u16 i;
10961
10962 for (i = 0; i < rss_table_size; i++)
10963 lut[i] = i % rss_size;
10964}
10965
10966
10967
10968
10969
10970static int i40e_pf_config_rss(struct i40e_pf *pf)
10971{
10972 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10973 u8 seed[I40E_HKEY_ARRAY_SIZE];
10974 u8 *lut;
10975 struct i40e_hw *hw = &pf->hw;
10976 u32 reg_val;
10977 u64 hena;
10978 int ret;
10979
10980
10981 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
10982 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
10983 hena |= i40e_pf_get_default_rss_hena(pf);
10984
10985 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
10986 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
10987
10988
10989 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
10990 reg_val = (pf->rss_table_size == 512) ?
10991 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
10992 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
10993 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
10994
10995
10996 if (!vsi->rss_size) {
10997 u16 qcount;
10998
10999
11000
11001
11002
11003 qcount = vsi->num_queue_pairs /
11004 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11005 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11006 }
11007 if (!vsi->rss_size)
11008 return -EINVAL;
11009
11010 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11011 if (!lut)
11012 return -ENOMEM;
11013
11014
11015 if (vsi->rss_lut_user)
11016 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11017 else
11018 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11019
11020
11021
11022
11023 if (vsi->rss_hkey_user)
11024 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11025 else
11026 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11027 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11028 kfree(lut);
11029
11030 return ret;
11031}
11032
11033
11034
11035
11036
11037
11038
11039
11040
11041
11042int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11043{
11044 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11045 int new_rss_size;
11046
11047 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11048 return 0;
11049
11050 queue_count = min_t(int, queue_count, num_online_cpus());
11051 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11052
11053 if (queue_count != vsi->num_queue_pairs) {
11054 u16 qcount;
11055
11056 vsi->req_queue_pairs = queue_count;
11057 i40e_prep_for_reset(pf, true);
11058
11059 pf->alloc_rss_size = new_rss_size;
11060
11061 i40e_reset_and_rebuild(pf, true, true);
11062
11063
11064
11065
11066 if (queue_count < vsi->rss_size) {
11067 i40e_clear_rss_config_user(vsi);
11068 dev_dbg(&pf->pdev->dev,
11069 "discard user configured hash keys and lut\n");
11070 }
11071
11072
11073 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11074 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11075
11076 i40e_pf_config_rss(pf);
11077 }
11078 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11079 vsi->req_queue_pairs, pf->rss_size_max);
11080 return pf->alloc_rss_size;
11081}
11082
11083
11084
11085
11086
11087i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11088{
11089 i40e_status status;
11090 bool min_valid, max_valid;
11091 u32 max_bw, min_bw;
11092
11093 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11094 &min_valid, &max_valid);
11095
11096 if (!status) {
11097 if (min_valid)
11098 pf->min_bw = min_bw;
11099 if (max_valid)
11100 pf->max_bw = max_bw;
11101 }
11102
11103 return status;
11104}
11105
11106
11107
11108
11109
11110i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11111{
11112 struct i40e_aqc_configure_partition_bw_data bw_data;
11113 i40e_status status;
11114
11115
11116 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11117 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11118 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11119
11120
11121 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11122
11123 return status;
11124}
11125
11126
11127
11128
11129
11130i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11131{
11132
11133 enum i40e_admin_queue_err last_aq_status;
11134 i40e_status ret;
11135 u16 nvm_word;
11136
11137 if (pf->hw.partition_id != 1) {
11138 dev_info(&pf->pdev->dev,
11139 "Commit BW only works on partition 1! This is partition %d",
11140 pf->hw.partition_id);
11141 ret = I40E_NOT_SUPPORTED;
11142 goto bw_commit_out;
11143 }
11144
11145
11146 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11147 last_aq_status = pf->hw.aq.asq_last_status;
11148 if (ret) {
11149 dev_info(&pf->pdev->dev,
11150 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11151 i40e_stat_str(&pf->hw, ret),
11152 i40e_aq_str(&pf->hw, last_aq_status));
11153 goto bw_commit_out;
11154 }
11155
11156
11157 ret = i40e_aq_read_nvm(&pf->hw,
11158 I40E_SR_NVM_CONTROL_WORD,
11159 0x10, sizeof(nvm_word), &nvm_word,
11160 false, NULL);
11161
11162
11163
11164 last_aq_status = pf->hw.aq.asq_last_status;
11165 i40e_release_nvm(&pf->hw);
11166 if (ret) {
11167 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11168 i40e_stat_str(&pf->hw, ret),
11169 i40e_aq_str(&pf->hw, last_aq_status));
11170 goto bw_commit_out;
11171 }
11172
11173
11174 msleep(50);
11175
11176
11177 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11178 last_aq_status = pf->hw.aq.asq_last_status;
11179 if (ret) {
11180 dev_info(&pf->pdev->dev,
11181 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11182 i40e_stat_str(&pf->hw, ret),
11183 i40e_aq_str(&pf->hw, last_aq_status));
11184 goto bw_commit_out;
11185 }
11186
11187
11188
11189
11190 ret = i40e_aq_update_nvm(&pf->hw,
11191 I40E_SR_NVM_CONTROL_WORD,
11192 0x10, sizeof(nvm_word),
11193 &nvm_word, true, 0, NULL);
11194
11195
11196
11197 last_aq_status = pf->hw.aq.asq_last_status;
11198 i40e_release_nvm(&pf->hw);
11199 if (ret)
11200 dev_info(&pf->pdev->dev,
11201 "BW settings NOT SAVED, err %s aq_err %s\n",
11202 i40e_stat_str(&pf->hw, ret),
11203 i40e_aq_str(&pf->hw, last_aq_status));
11204bw_commit_out:
11205
11206 return ret;
11207}
11208
11209
11210
11211
11212
11213
11214
11215
11216
11217static int i40e_sw_init(struct i40e_pf *pf)
11218{
11219 int err = 0;
11220 int size;
11221
11222
11223 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11224 I40E_FLAG_MSI_ENABLED |
11225 I40E_FLAG_MSIX_ENABLED;
11226
11227
11228 pf->rx_itr_default = I40E_ITR_RX_DEF;
11229 pf->tx_itr_default = I40E_ITR_TX_DEF;
11230
11231
11232
11233
11234 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11235 pf->alloc_rss_size = 1;
11236 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11237 pf->rss_size_max = min_t(int, pf->rss_size_max,
11238 pf->hw.func_caps.num_tx_qp);
11239 if (pf->hw.func_caps.rss) {
11240 pf->flags |= I40E_FLAG_RSS_ENABLED;
11241 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11242 num_online_cpus());
11243 }
11244
11245
11246 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11247 pf->flags |= I40E_FLAG_MFP_ENABLED;
11248 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11249 if (i40e_get_partition_bw_setting(pf)) {
11250 dev_warn(&pf->pdev->dev,
11251 "Could not get partition bw settings\n");
11252 } else {
11253 dev_info(&pf->pdev->dev,
11254 "Partition BW Min = %8.8x, Max = %8.8x\n",
11255 pf->min_bw, pf->max_bw);
11256
11257
11258 i40e_set_partition_bw_setting(pf);
11259 }
11260 }
11261
11262 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11263 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11264 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11265 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11266 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11267 pf->hw.num_partitions > 1)
11268 dev_info(&pf->pdev->dev,
11269 "Flow Director Sideband mode Disabled in MFP mode\n");
11270 else
11271 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11272 pf->fdir_pf_filter_count =
11273 pf->hw.func_caps.fd_filters_guaranteed;
11274 pf->hw.fdir_shared_filter_count =
11275 pf->hw.func_caps.fd_filters_best_effort;
11276 }
11277
11278 if (pf->hw.mac.type == I40E_MAC_X722) {
11279 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11280 I40E_HW_128_QP_RSS_CAPABLE |
11281 I40E_HW_ATR_EVICT_CAPABLE |
11282 I40E_HW_WB_ON_ITR_CAPABLE |
11283 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11284 I40E_HW_NO_PCI_LINK_CHECK |
11285 I40E_HW_USE_SET_LLDP_MIB |
11286 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11287 I40E_HW_PTP_L4_CAPABLE |
11288 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11289 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11290
11291#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11292 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11293 I40E_FDEVICT_PCTYPE_DEFAULT) {
11294 dev_warn(&pf->pdev->dev,
11295 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11296 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11297 }
11298 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11299 ((pf->hw.aq.api_maj_ver == 1) &&
11300 (pf->hw.aq.api_min_ver > 4))) {
11301
11302 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11303 }
11304
11305
11306 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11307 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11308
11309 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11310 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11311 (pf->hw.aq.fw_maj_ver < 4))) {
11312 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11313
11314 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11315 }
11316
11317
11318 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11319 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11320 (pf->hw.aq.fw_maj_ver < 4)))
11321 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11322
11323
11324 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11325 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11326 (pf->hw.aq.fw_maj_ver >= 5)))
11327 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11328
11329
11330 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11331 pf->hw.aq.fw_maj_ver >= 6)
11332 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11333
11334 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11335 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11336 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11337 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11338 }
11339
11340 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11341 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11342
11343 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11344 }
11345
11346
11347
11348
11349
11350 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11351 pf->hw.func_caps.npar_enable &&
11352 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
11353 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
11354
11355#ifdef CONFIG_PCI_IOV
11356 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11357 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11358 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11359 pf->num_req_vfs = min_t(int,
11360 pf->hw.func_caps.num_vfs,
11361 I40E_MAX_VF_COUNT);
11362 }
11363#endif
11364 pf->eeprom_version = 0xDEAD;
11365 pf->lan_veb = I40E_NO_VEB;
11366 pf->lan_vsi = I40E_NO_VSI;
11367
11368
11369 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11370
11371
11372 size = sizeof(struct i40e_lump_tracking)
11373 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11374 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11375 if (!pf->qp_pile) {
11376 err = -ENOMEM;
11377 goto sw_init_done;
11378 }
11379 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11380 pf->qp_pile->search_hint = 0;
11381
11382 pf->tx_timeout_recovery_level = 1;
11383
11384 mutex_init(&pf->switch_mutex);
11385
11386sw_init_done:
11387 return err;
11388}
11389
11390
11391
11392
11393
11394
11395
11396
11397bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11398{
11399 bool need_reset = false;
11400
11401
11402
11403
11404 if (features & NETIF_F_NTUPLE) {
11405
11406 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11407 need_reset = true;
11408
11409
11410
11411 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
11412 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11413 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11414 }
11415 } else {
11416
11417 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11418 need_reset = true;
11419 i40e_fdir_filter_exit(pf);
11420 }
11421 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11422 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
11423 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11424
11425
11426 pf->fd_add_err = 0;
11427 pf->fd_atr_cnt = 0;
11428
11429 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
11430 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11431 (I40E_DEBUG_FD & pf->hw.debug_mask))
11432 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
11433 }
11434 return need_reset;
11435}
11436
11437
11438
11439
11440
11441static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11442{
11443 struct i40e_pf *pf = vsi->back;
11444 struct i40e_hw *hw = &pf->hw;
11445 u16 vf_id = vsi->vf_id;
11446 u8 i;
11447
11448 if (vsi->type == I40E_VSI_MAIN) {
11449 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11450 wr32(hw, I40E_PFQF_HLUT(i), 0);
11451 } else if (vsi->type == I40E_VSI_SRIOV) {
11452 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11453 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11454 } else {
11455 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11456 }
11457}
11458
11459
11460
11461
11462
11463
11464
11465static int i40e_set_features(struct net_device *netdev,
11466 netdev_features_t features)
11467{
11468 struct i40e_netdev_priv *np = netdev_priv(netdev);
11469 struct i40e_vsi *vsi = np->vsi;
11470 struct i40e_pf *pf = vsi->back;
11471 bool need_reset;
11472
11473 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11474 i40e_pf_config_rss(pf);
11475 else if (!(features & NETIF_F_RXHASH) &&
11476 netdev->features & NETIF_F_RXHASH)
11477 i40e_clear_rss_lut(vsi);
11478
11479 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11480 i40e_vlan_stripping_enable(vsi);
11481 else
11482 i40e_vlan_stripping_disable(vsi);
11483
11484 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11485 dev_err(&pf->pdev->dev,
11486 "Offloaded tc filters active, can't turn hw_tc_offload off");
11487 return -EINVAL;
11488 }
11489
11490 need_reset = i40e_set_ntuple(pf, features);
11491
11492 if (need_reset)
11493 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11494
11495 return 0;
11496}
11497
11498
11499
11500
11501
11502
11503
11504
11505static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11506{
11507 u8 i;
11508
11509 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11510
11511
11512
11513 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11514 continue;
11515 if (pf->udp_ports[i].port == port)
11516 return i;
11517 }
11518
11519 return i;
11520}
11521
11522
11523
11524
11525
11526
11527static void i40e_udp_tunnel_add(struct net_device *netdev,
11528 struct udp_tunnel_info *ti)
11529{
11530 struct i40e_netdev_priv *np = netdev_priv(netdev);
11531 struct i40e_vsi *vsi = np->vsi;
11532 struct i40e_pf *pf = vsi->back;
11533 u16 port = ntohs(ti->port);
11534 u8 next_idx;
11535 u8 idx;
11536
11537 idx = i40e_get_udp_port_idx(pf, port);
11538
11539
11540 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11541 netdev_info(netdev, "port %d already offloaded\n", port);
11542 return;
11543 }
11544
11545
11546 next_idx = i40e_get_udp_port_idx(pf, 0);
11547
11548 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11549 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11550 port);
11551 return;
11552 }
11553
11554 switch (ti->type) {
11555 case UDP_TUNNEL_TYPE_VXLAN:
11556 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11557 break;
11558 case UDP_TUNNEL_TYPE_GENEVE:
11559 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11560 return;
11561 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11562 break;
11563 default:
11564 return;
11565 }
11566
11567
11568 pf->udp_ports[next_idx].port = port;
11569 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
11570 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11571 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11572}
11573
11574
11575
11576
11577
11578
11579static void i40e_udp_tunnel_del(struct net_device *netdev,
11580 struct udp_tunnel_info *ti)
11581{
11582 struct i40e_netdev_priv *np = netdev_priv(netdev);
11583 struct i40e_vsi *vsi = np->vsi;
11584 struct i40e_pf *pf = vsi->back;
11585 u16 port = ntohs(ti->port);
11586 u8 idx;
11587
11588 idx = i40e_get_udp_port_idx(pf, port);
11589
11590
11591 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11592 goto not_found;
11593
11594 switch (ti->type) {
11595 case UDP_TUNNEL_TYPE_VXLAN:
11596 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11597 goto not_found;
11598 break;
11599 case UDP_TUNNEL_TYPE_GENEVE:
11600 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11601 goto not_found;
11602 break;
11603 default:
11604 goto not_found;
11605 }
11606
11607
11608
11609
11610 pf->udp_ports[idx].port = 0;
11611
11612
11613
11614
11615
11616 pf->pending_udp_bitmap ^= BIT_ULL(idx);
11617 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11618
11619 return;
11620not_found:
11621 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11622 port);
11623}
11624
11625static int i40e_get_phys_port_id(struct net_device *netdev,
11626 struct netdev_phys_item_id *ppid)
11627{
11628 struct i40e_netdev_priv *np = netdev_priv(netdev);
11629 struct i40e_pf *pf = np->vsi->back;
11630 struct i40e_hw *hw = &pf->hw;
11631
11632 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
11633 return -EOPNOTSUPP;
11634
11635 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11636 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11637
11638 return 0;
11639}
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11651 struct net_device *dev,
11652 const unsigned char *addr, u16 vid,
11653 u16 flags)
11654{
11655 struct i40e_netdev_priv *np = netdev_priv(dev);
11656 struct i40e_pf *pf = np->vsi->back;
11657 int err = 0;
11658
11659 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11660 return -EOPNOTSUPP;
11661
11662 if (vid) {
11663 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11664 return -EINVAL;
11665 }
11666
11667
11668
11669
11670 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11671 netdev_info(dev, "FDB only supports static addresses\n");
11672 return -EINVAL;
11673 }
11674
11675 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11676 err = dev_uc_add_excl(dev, addr);
11677 else if (is_multicast_ether_addr(addr))
11678 err = dev_mc_add_excl(dev, addr);
11679 else
11680 err = -EINVAL;
11681
11682
11683 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11684 err = 0;
11685
11686 return err;
11687}
11688
11689
11690
11691
11692
11693
11694
11695
11696
11697
11698
11699
11700
11701
11702
11703
11704static int i40e_ndo_bridge_setlink(struct net_device *dev,
11705 struct nlmsghdr *nlh,
11706 u16 flags)
11707{
11708 struct i40e_netdev_priv *np = netdev_priv(dev);
11709 struct i40e_vsi *vsi = np->vsi;
11710 struct i40e_pf *pf = vsi->back;
11711 struct i40e_veb *veb = NULL;
11712 struct nlattr *attr, *br_spec;
11713 int i, rem;
11714
11715
11716 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11717 return -EOPNOTSUPP;
11718
11719
11720 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11721 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11722 veb = pf->veb[i];
11723 }
11724
11725 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11726
11727 nla_for_each_nested(attr, br_spec, rem) {
11728 __u16 mode;
11729
11730 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11731 continue;
11732
11733 mode = nla_get_u16(attr);
11734 if ((mode != BRIDGE_MODE_VEPA) &&
11735 (mode != BRIDGE_MODE_VEB))
11736 return -EINVAL;
11737
11738
11739 if (!veb) {
11740 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11741 vsi->tc_config.enabled_tc);
11742 if (veb) {
11743 veb->bridge_mode = mode;
11744 i40e_config_bridge_mode(veb);
11745 } else {
11746
11747 return -ENOENT;
11748 }
11749 break;
11750 } else if (mode != veb->bridge_mode) {
11751
11752 veb->bridge_mode = mode;
11753
11754 if (mode == BRIDGE_MODE_VEB)
11755 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11756 else
11757 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
11758 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11759 break;
11760 }
11761 }
11762
11763 return 0;
11764}
11765
11766
11767
11768
11769
11770
11771
11772
11773
11774
11775
11776
11777
11778static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11779 struct net_device *dev,
11780 u32 filter_mask, int nlflags)
11781{
11782 struct i40e_netdev_priv *np = netdev_priv(dev);
11783 struct i40e_vsi *vsi = np->vsi;
11784 struct i40e_pf *pf = vsi->back;
11785 struct i40e_veb *veb = NULL;
11786 int i;
11787
11788
11789 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11790 return -EOPNOTSUPP;
11791
11792
11793 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11794 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11795 veb = pf->veb[i];
11796 }
11797
11798 if (!veb)
11799 return 0;
11800
11801 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
11802 0, 0, nlflags, filter_mask, NULL);
11803}
11804
11805
11806
11807
11808
11809
11810
11811static netdev_features_t i40e_features_check(struct sk_buff *skb,
11812 struct net_device *dev,
11813 netdev_features_t features)
11814{
11815 size_t len;
11816
11817
11818
11819
11820
11821 if (skb->ip_summed != CHECKSUM_PARTIAL)
11822 return features;
11823
11824
11825
11826
11827 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11828 features &= ~NETIF_F_GSO_MASK;
11829
11830
11831 len = skb_network_header(skb) - skb->data;
11832 if (len & ~(63 * 2))
11833 goto out_err;
11834
11835
11836 len = skb_transport_header(skb) - skb_network_header(skb);
11837 if (len & ~(127 * 4))
11838 goto out_err;
11839
11840 if (skb->encapsulation) {
11841
11842 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11843 if (len & ~(127 * 2))
11844 goto out_err;
11845
11846
11847 len = skb_inner_transport_header(skb) -
11848 skb_inner_network_header(skb);
11849 if (len & ~(127 * 4))
11850 goto out_err;
11851 }
11852
11853
11854
11855
11856
11857
11858 return features;
11859out_err:
11860 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11861}
11862
11863
11864
11865
11866
11867
11868static int i40e_xdp_setup(struct i40e_vsi *vsi,
11869 struct bpf_prog *prog)
11870{
11871 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11872 struct i40e_pf *pf = vsi->back;
11873 struct bpf_prog *old_prog;
11874 bool need_reset;
11875 int i;
11876
11877
11878 if (frame_size > vsi->rx_buf_len)
11879 return -EINVAL;
11880
11881 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
11882 return 0;
11883
11884
11885 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
11886
11887 if (need_reset)
11888 i40e_prep_for_reset(pf, true);
11889
11890 old_prog = xchg(&vsi->xdp_prog, prog);
11891
11892 if (need_reset)
11893 i40e_reset_and_rebuild(pf, true, true);
11894
11895 for (i = 0; i < vsi->num_queue_pairs; i++)
11896 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
11897
11898 if (old_prog)
11899 bpf_prog_put(old_prog);
11900
11901 return 0;
11902}
11903
11904
11905
11906
11907
11908
11909
11910static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
11911{
11912 struct i40e_pf *pf = vsi->back;
11913 int timeout = 50;
11914
11915 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
11916 timeout--;
11917 if (!timeout)
11918 return -EBUSY;
11919 usleep_range(1000, 2000);
11920 }
11921
11922 return 0;
11923}
11924
11925
11926
11927
11928
11929static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
11930{
11931 struct i40e_pf *pf = vsi->back;
11932
11933 clear_bit(__I40E_CONFIG_BUSY, pf->state);
11934}
11935
11936
11937
11938
11939
11940
11941static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11942{
11943 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
11944 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
11945 memset(&vsi->tx_rings[queue_pair]->stats, 0,
11946 sizeof(vsi->tx_rings[queue_pair]->stats));
11947 if (i40e_enabled_xdp_vsi(vsi)) {
11948 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
11949 sizeof(vsi->xdp_rings[queue_pair]->stats));
11950 }
11951}
11952
11953
11954
11955
11956
11957
11958static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11959{
11960 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11961 if (i40e_enabled_xdp_vsi(vsi))
11962 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11963 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11964}
11965
11966
11967
11968
11969
11970
11971
11972static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
11973 bool enable)
11974{
11975 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
11976 struct i40e_q_vector *q_vector = rxr->q_vector;
11977
11978 if (!vsi->netdev)
11979 return;
11980
11981
11982 if (q_vector->rx.ring || q_vector->tx.ring) {
11983 if (enable)
11984 napi_enable(&q_vector->napi);
11985 else
11986 napi_disable(&q_vector->napi);
11987 }
11988}
11989
11990
11991
11992
11993
11994
11995
11996
11997
11998static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
11999 bool enable)
12000{
12001 struct i40e_pf *pf = vsi->back;
12002 int pf_q, ret = 0;
12003
12004 pf_q = vsi->base_queue + queue_pair;
12005 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12006 false , enable);
12007 if (ret) {
12008 dev_info(&pf->pdev->dev,
12009 "VSI seid %d Tx ring %d %sable timeout\n",
12010 vsi->seid, pf_q, (enable ? "en" : "dis"));
12011 return ret;
12012 }
12013
12014 i40e_control_rx_q(pf, pf_q, enable);
12015 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12016 if (ret) {
12017 dev_info(&pf->pdev->dev,
12018 "VSI seid %d Rx ring %d %sable timeout\n",
12019 vsi->seid, pf_q, (enable ? "en" : "dis"));
12020 return ret;
12021 }
12022
12023
12024
12025
12026 if (!enable)
12027 mdelay(50);
12028
12029 if (!i40e_enabled_xdp_vsi(vsi))
12030 return ret;
12031
12032 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12033 pf_q + vsi->alloc_queue_pairs,
12034 true , enable);
12035 if (ret) {
12036 dev_info(&pf->pdev->dev,
12037 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12038 vsi->seid, pf_q, (enable ? "en" : "dis"));
12039 }
12040
12041 return ret;
12042}
12043
12044
12045
12046
12047
12048
12049static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12050{
12051 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12052 struct i40e_pf *pf = vsi->back;
12053 struct i40e_hw *hw = &pf->hw;
12054
12055
12056 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12057 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12058 else
12059 i40e_irq_dynamic_enable_icr0(pf);
12060
12061 i40e_flush(hw);
12062}
12063
12064
12065
12066
12067
12068
12069static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12070{
12071 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12072 struct i40e_pf *pf = vsi->back;
12073 struct i40e_hw *hw = &pf->hw;
12074
12075
12076
12077
12078
12079
12080
12081 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12082 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12083
12084 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12085 i40e_flush(hw);
12086 synchronize_irq(pf->msix_entries[intpf].vector);
12087 } else {
12088
12089 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12090 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12091 i40e_flush(hw);
12092 synchronize_irq(pf->pdev->irq);
12093 }
12094}
12095
12096
12097
12098
12099
12100
12101
12102
12103int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12104{
12105 int err;
12106
12107 err = i40e_enter_busy_conf(vsi);
12108 if (err)
12109 return err;
12110
12111 i40e_queue_pair_disable_irq(vsi, queue_pair);
12112 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false );
12113 i40e_queue_pair_toggle_napi(vsi, queue_pair, false );
12114 i40e_queue_pair_clean_rings(vsi, queue_pair);
12115 i40e_queue_pair_reset_stats(vsi, queue_pair);
12116
12117 return err;
12118}
12119
12120
12121
12122
12123
12124
12125
12126
12127int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12128{
12129 int err;
12130
12131 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12132 if (err)
12133 return err;
12134
12135 if (i40e_enabled_xdp_vsi(vsi)) {
12136 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12137 if (err)
12138 return err;
12139 }
12140
12141 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12142 if (err)
12143 return err;
12144
12145 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true );
12146 i40e_queue_pair_toggle_napi(vsi, queue_pair, true );
12147 i40e_queue_pair_enable_irq(vsi, queue_pair);
12148
12149 i40e_exit_busy_conf(vsi);
12150
12151 return err;
12152}
12153
12154
12155
12156
12157
12158
12159static int __maybe_unused i40e_xdp(struct net_device *dev,
12160 struct netdev_xdp *xdp)
12161{
12162 struct i40e_netdev_priv *np = netdev_priv(dev);
12163 struct i40e_vsi *vsi = np->vsi;
12164
12165 if (vsi->type != I40E_VSI_MAIN)
12166 return -EINVAL;
12167
12168 switch (xdp->command) {
12169 case XDP_SETUP_PROG:
12170 return i40e_xdp_setup(vsi, xdp->prog);
12171 case XDP_QUERY_PROG:
12172 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12173 return 0;
12174 case XDP_QUERY_XSK_UMEM:
12175 return i40e_xsk_umem_query(vsi, &xdp->xsk.umem,
12176 xdp->xsk.queue_id);
12177 case XDP_SETUP_XSK_UMEM:
12178 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12179 xdp->xsk.queue_id);
12180 default:
12181 return -EINVAL;
12182 }
12183}
12184
12185static const struct net_device_ops i40e_netdev_ops = {
12186 .ndo_size = sizeof(struct net_device_ops),
12187 .ndo_open = i40e_open,
12188 .ndo_stop = i40e_close,
12189 .ndo_start_xmit = i40e_lan_xmit_frame,
12190 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12191 .ndo_set_rx_mode = i40e_set_rx_mode,
12192 .ndo_validate_addr = eth_validate_addr,
12193 .ndo_set_mac_address = i40e_set_mac,
12194 .extended.ndo_change_mtu = i40e_change_mtu,
12195 .ndo_do_ioctl = i40e_ioctl,
12196 .ndo_tx_timeout = i40e_tx_timeout,
12197 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12198 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12199#ifdef CONFIG_NET_POLL_CONTROLLER
12200 .ndo_poll_controller = i40e_netpoll,
12201#endif
12202 .extended.ndo_setup_tc_rh = __i40e_setup_tc,
12203 .ndo_set_features = i40e_set_features,
12204 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12205 .extended.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12206 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12207 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12208 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12209 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12210 .extended.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12211 .extended.ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12212 .extended.ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12213 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12214 .ndo_fdb_add = i40e_ndo_fdb_add,
12215 .ndo_features_check = i40e_features_check,
12216 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12217 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12218#if 0
12219 .extended.ndo_xdp = i40e_xdp,
12220 .extended.ndo_xdp_xmit = i40e_xdp_xmit,
12221 .ndo_xsk_async_xmit = i40e_xsk_async_xmit,
12222#endif
12223};
12224
12225
12226
12227
12228
12229
12230
12231static int i40e_config_netdev(struct i40e_vsi *vsi)
12232{
12233 struct i40e_pf *pf = vsi->back;
12234 struct i40e_hw *hw = &pf->hw;
12235 struct i40e_netdev_priv *np;
12236 struct net_device *netdev;
12237 u8 broadcast[ETH_ALEN];
12238 u8 mac_addr[ETH_ALEN];
12239 int etherdev_size;
12240 netdev_features_t hw_enc_features;
12241 netdev_features_t hw_features;
12242
12243 etherdev_size = sizeof(struct i40e_netdev_priv);
12244 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12245 if (!netdev)
12246 return -ENOMEM;
12247
12248 vsi->netdev = netdev;
12249 np = netdev_priv(netdev);
12250 np->vsi = vsi;
12251
12252 hw_enc_features = NETIF_F_SG |
12253 NETIF_F_IP_CSUM |
12254 NETIF_F_IPV6_CSUM |
12255 NETIF_F_HIGHDMA |
12256 NETIF_F_SOFT_FEATURES |
12257 NETIF_F_TSO |
12258 NETIF_F_TSO_ECN |
12259 NETIF_F_TSO6 |
12260 NETIF_F_GSO_GRE |
12261 NETIF_F_GSO_GRE_CSUM |
12262 NETIF_F_GSO_IPIP |
12263 NETIF_F_GSO_SIT |
12264 NETIF_F_GSO_PARTIAL |
12265 NETIF_F_GSO_UDP_TUNNEL |
12266 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12267 NETIF_F_SCTP_CRC |
12268 NETIF_F_RXHASH |
12269 NETIF_F_RXCSUM |
12270 0;
12271
12272 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12273 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12274
12275 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12276
12277 netdev->hw_enc_features |= hw_enc_features;
12278
12279
12280 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12281
12282 hw_features = hw_enc_features |
12283 NETIF_F_HW_VLAN_CTAG_TX |
12284 NETIF_F_HW_VLAN_CTAG_RX;
12285
12286 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12287 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12288
12289 netdev->hw_features |= hw_features;
12290
12291 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12292 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12293
12294 if (vsi->type == I40E_VSI_MAIN) {
12295 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12296 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307 i40e_rm_default_mac_filter(vsi, mac_addr);
12308 spin_lock_bh(&vsi->mac_filter_hash_lock);
12309 i40e_add_mac_filter(vsi, mac_addr);
12310 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12311 } else {
12312
12313
12314
12315
12316
12317 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12318 IFNAMSIZ - 4,
12319 pf->vsi[pf->lan_vsi]->netdev->name);
12320 eth_random_addr(mac_addr);
12321
12322 spin_lock_bh(&vsi->mac_filter_hash_lock);
12323 i40e_add_mac_filter(vsi, mac_addr);
12324 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12325 }
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340 eth_broadcast_addr(broadcast);
12341 spin_lock_bh(&vsi->mac_filter_hash_lock);
12342 i40e_add_mac_filter(vsi, broadcast);
12343 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12344
12345 ether_addr_copy(netdev->dev_addr, mac_addr);
12346 ether_addr_copy(netdev->perm_addr, mac_addr);
12347
12348
12349 netdev->neigh_priv_len = sizeof(u32) * 4;
12350
12351 netdev->priv_flags |= IFF_UNICAST_FLT;
12352 netdev->priv_flags |= IFF_SUPP_NOFCS;
12353
12354 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
12355
12356 netdev->netdev_ops = &i40e_netdev_ops;
12357 netdev->watchdog_timeo = 5 * HZ;
12358 i40e_set_ethtool_ops(netdev);
12359
12360
12361 netdev->extended->min_mtu = ETH_MIN_MTU;
12362 netdev->extended->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
12363
12364 return 0;
12365}
12366
12367
12368
12369
12370
12371
12372
12373static void i40e_vsi_delete(struct i40e_vsi *vsi)
12374{
12375
12376 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
12377 return;
12378
12379 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
12380}
12381
12382
12383
12384
12385
12386
12387
12388int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
12389{
12390 struct i40e_veb *veb;
12391 struct i40e_pf *pf = vsi->back;
12392
12393
12394 if (vsi->veb_idx == I40E_NO_VEB)
12395 return 1;
12396
12397 veb = pf->veb[vsi->veb_idx];
12398 if (!veb) {
12399 dev_info(&pf->pdev->dev,
12400 "There is no veb associated with the bridge\n");
12401 return -ENOENT;
12402 }
12403
12404
12405 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
12406 return 0;
12407 } else {
12408
12409 return 1;
12410 }
12411
12412
12413 return 0;
12414}
12415
12416
12417
12418
12419
12420
12421
12422
12423static int i40e_add_vsi(struct i40e_vsi *vsi)
12424{
12425 int ret = -ENODEV;
12426 struct i40e_pf *pf = vsi->back;
12427 struct i40e_hw *hw = &pf->hw;
12428 struct i40e_vsi_context ctxt;
12429 struct i40e_mac_filter *f;
12430 struct hlist_node *h;
12431 int bkt;
12432
12433 u8 enabled_tc = 0x1;
12434 int f_count = 0;
12435
12436 memset(&ctxt, 0, sizeof(ctxt));
12437 switch (vsi->type) {
12438 case I40E_VSI_MAIN:
12439
12440
12441
12442
12443
12444 ctxt.seid = pf->main_vsi_seid;
12445 ctxt.pf_num = pf->hw.pf_id;
12446 ctxt.vf_num = 0;
12447 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
12448 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12449 if (ret) {
12450 dev_info(&pf->pdev->dev,
12451 "couldn't get PF vsi config, err %s aq_err %s\n",
12452 i40e_stat_str(&pf->hw, ret),
12453 i40e_aq_str(&pf->hw,
12454 pf->hw.aq.asq_last_status));
12455 return -ENOENT;
12456 }
12457 vsi->info = ctxt.info;
12458 vsi->info.valid_sections = 0;
12459
12460 vsi->seid = ctxt.seid;
12461 vsi->id = ctxt.vsi_number;
12462
12463 enabled_tc = i40e_pf_get_tc_map(pf);
12464
12465
12466
12467
12468
12469 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
12470 memset(&ctxt, 0, sizeof(ctxt));
12471 ctxt.seid = pf->main_vsi_seid;
12472 ctxt.pf_num = pf->hw.pf_id;
12473 ctxt.vf_num = 0;
12474 ctxt.info.valid_sections |=
12475 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12476 ctxt.info.switch_id =
12477 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
12478 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12479 if (ret) {
12480 dev_info(&pf->pdev->dev,
12481 "update vsi failed, err %s aq_err %s\n",
12482 i40e_stat_str(&pf->hw, ret),
12483 i40e_aq_str(&pf->hw,
12484 pf->hw.aq.asq_last_status));
12485 ret = -ENOENT;
12486 goto err;
12487 }
12488 }
12489
12490
12491 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
12492 !(pf->hw.func_caps.iscsi)) {
12493 memset(&ctxt, 0, sizeof(ctxt));
12494 ctxt.seid = pf->main_vsi_seid;
12495 ctxt.pf_num = pf->hw.pf_id;
12496 ctxt.vf_num = 0;
12497 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
12498 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12499 if (ret) {
12500 dev_info(&pf->pdev->dev,
12501 "update vsi failed, err %s aq_err %s\n",
12502 i40e_stat_str(&pf->hw, ret),
12503 i40e_aq_str(&pf->hw,
12504 pf->hw.aq.asq_last_status));
12505 ret = -ENOENT;
12506 goto err;
12507 }
12508
12509 i40e_vsi_update_queue_map(vsi, &ctxt);
12510 vsi->info.valid_sections = 0;
12511 } else {
12512
12513
12514
12515
12516
12517
12518 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12519 if (ret) {
12520
12521
12522
12523 dev_info(&pf->pdev->dev,
12524 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12525 enabled_tc,
12526 i40e_stat_str(&pf->hw, ret),
12527 i40e_aq_str(&pf->hw,
12528 pf->hw.aq.asq_last_status));
12529 }
12530 }
12531 break;
12532
12533 case I40E_VSI_FDIR:
12534 ctxt.pf_num = hw->pf_id;
12535 ctxt.vf_num = 0;
12536 ctxt.uplink_seid = vsi->uplink_seid;
12537 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12538 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12539 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12540 (i40e_is_vsi_uplink_mode_veb(vsi))) {
12541 ctxt.info.valid_sections |=
12542 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12543 ctxt.info.switch_id =
12544 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12545 }
12546 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12547 break;
12548
12549 case I40E_VSI_VMDQ2:
12550 ctxt.pf_num = hw->pf_id;
12551 ctxt.vf_num = 0;
12552 ctxt.uplink_seid = vsi->uplink_seid;
12553 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12554 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12555
12556
12557
12558
12559 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12560 ctxt.info.valid_sections |=
12561 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12562 ctxt.info.switch_id =
12563 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12564 }
12565
12566
12567 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12568 break;
12569
12570 case I40E_VSI_SRIOV:
12571 ctxt.pf_num = hw->pf_id;
12572 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12573 ctxt.uplink_seid = vsi->uplink_seid;
12574 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12575 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12576
12577
12578
12579
12580 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12581 ctxt.info.valid_sections |=
12582 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12583 ctxt.info.switch_id =
12584 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12585 }
12586
12587 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12588 ctxt.info.valid_sections |=
12589 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12590 ctxt.info.queueing_opt_flags |=
12591 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12592 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
12593 }
12594
12595 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12596 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
12597 if (pf->vf[vsi->vf_id].spoofchk) {
12598 ctxt.info.valid_sections |=
12599 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12600 ctxt.info.sec_flags |=
12601 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12602 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12603 }
12604
12605 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12606 break;
12607
12608 case I40E_VSI_IWARP:
12609
12610 break;
12611
12612 default:
12613 return -ENODEV;
12614 }
12615
12616 if (vsi->type != I40E_VSI_MAIN) {
12617 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12618 if (ret) {
12619 dev_info(&vsi->back->pdev->dev,
12620 "add vsi failed, err %s aq_err %s\n",
12621 i40e_stat_str(&pf->hw, ret),
12622 i40e_aq_str(&pf->hw,
12623 pf->hw.aq.asq_last_status));
12624 ret = -ENOENT;
12625 goto err;
12626 }
12627 vsi->info = ctxt.info;
12628 vsi->info.valid_sections = 0;
12629 vsi->seid = ctxt.seid;
12630 vsi->id = ctxt.vsi_number;
12631 }
12632
12633 vsi->active_filters = 0;
12634 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
12635 spin_lock_bh(&vsi->mac_filter_hash_lock);
12636
12637 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
12638 f->state = I40E_FILTER_NEW;
12639 f_count++;
12640 }
12641 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12642
12643 if (f_count) {
12644 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
12645 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
12646 }
12647
12648
12649 ret = i40e_vsi_get_bw_info(vsi);
12650 if (ret) {
12651 dev_info(&pf->pdev->dev,
12652 "couldn't get vsi bw info, err %s aq_err %s\n",
12653 i40e_stat_str(&pf->hw, ret),
12654 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12655
12656 ret = 0;
12657 }
12658
12659err:
12660 return ret;
12661}
12662
12663
12664
12665
12666
12667
12668
12669int i40e_vsi_release(struct i40e_vsi *vsi)
12670{
12671 struct i40e_mac_filter *f;
12672 struct hlist_node *h;
12673 struct i40e_veb *veb = NULL;
12674 struct i40e_pf *pf;
12675 u16 uplink_seid;
12676 int i, n, bkt;
12677
12678 pf = vsi->back;
12679
12680
12681 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12682 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12683 vsi->seid, vsi->uplink_seid);
12684 return -ENODEV;
12685 }
12686 if (vsi == pf->vsi[pf->lan_vsi] &&
12687 !test_bit(__I40E_DOWN, pf->state)) {
12688 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12689 return -ENODEV;
12690 }
12691
12692 uplink_seid = vsi->uplink_seid;
12693 if (vsi->type != I40E_VSI_SRIOV) {
12694 if (vsi->netdev_registered) {
12695 vsi->netdev_registered = false;
12696 if (vsi->netdev) {
12697
12698 unregister_netdev(vsi->netdev);
12699 }
12700 } else {
12701 i40e_vsi_close(vsi);
12702 }
12703 i40e_vsi_disable_irq(vsi);
12704 }
12705
12706 spin_lock_bh(&vsi->mac_filter_hash_lock);
12707
12708
12709 if (vsi->netdev) {
12710 __dev_uc_unsync(vsi->netdev, NULL);
12711 __dev_mc_unsync(vsi->netdev, NULL);
12712 }
12713
12714
12715 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
12716 __i40e_del_filter(vsi, f);
12717
12718 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12719
12720 i40e_sync_vsi_filters(vsi);
12721
12722 i40e_vsi_delete(vsi);
12723 i40e_vsi_free_q_vectors(vsi);
12724 if (vsi->netdev) {
12725 free_netdev(vsi->netdev);
12726 vsi->netdev = NULL;
12727 }
12728 i40e_vsi_clear_rings(vsi);
12729 i40e_vsi_clear(vsi);
12730
12731
12732
12733
12734
12735
12736
12737
12738
12739 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
12740 if (pf->vsi[i] &&
12741 pf->vsi[i]->uplink_seid == uplink_seid &&
12742 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12743 n++;
12744 }
12745 }
12746 for (i = 0; i < I40E_MAX_VEB; i++) {
12747 if (!pf->veb[i])
12748 continue;
12749 if (pf->veb[i]->uplink_seid == uplink_seid)
12750 n++;
12751 if (pf->veb[i]->seid == uplink_seid)
12752 veb = pf->veb[i];
12753 }
12754 if (n == 0 && veb && veb->uplink_seid != 0)
12755 i40e_veb_release(veb);
12756
12757 return 0;
12758}
12759
12760
12761
12762
12763
12764
12765
12766
12767
12768
12769
12770static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12771{
12772 int ret = -ENOENT;
12773 struct i40e_pf *pf = vsi->back;
12774
12775 if (vsi->q_vectors[0]) {
12776 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12777 vsi->seid);
12778 return -EEXIST;
12779 }
12780
12781 if (vsi->base_vector) {
12782 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
12783 vsi->seid, vsi->base_vector);
12784 return -EEXIST;
12785 }
12786
12787 ret = i40e_vsi_alloc_q_vectors(vsi);
12788 if (ret) {
12789 dev_info(&pf->pdev->dev,
12790 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12791 vsi->num_q_vectors, vsi->seid, ret);
12792 vsi->num_q_vectors = 0;
12793 goto vector_setup_out;
12794 }
12795
12796
12797
12798
12799 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12800 return ret;
12801 if (vsi->num_q_vectors)
12802 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12803 vsi->num_q_vectors, vsi->idx);
12804 if (vsi->base_vector < 0) {
12805 dev_info(&pf->pdev->dev,
12806 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12807 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
12808 i40e_vsi_free_q_vectors(vsi);
12809 ret = -ENOENT;
12810 goto vector_setup_out;
12811 }
12812
12813vector_setup_out:
12814 return ret;
12815}
12816
12817
12818
12819
12820
12821
12822
12823
12824
12825
12826static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12827{
12828 u16 alloc_queue_pairs;
12829 struct i40e_pf *pf;
12830 u8 enabled_tc;
12831 int ret;
12832
12833 if (!vsi)
12834 return NULL;
12835
12836 pf = vsi->back;
12837
12838 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12839 i40e_vsi_clear_rings(vsi);
12840
12841 i40e_vsi_free_arrays(vsi, false);
12842 i40e_set_num_rings_in_vsi(vsi);
12843 ret = i40e_vsi_alloc_arrays(vsi, false);
12844 if (ret)
12845 goto err_vsi;
12846
12847 alloc_queue_pairs = vsi->alloc_queue_pairs *
12848 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12849
12850 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12851 if (ret < 0) {
12852 dev_info(&pf->pdev->dev,
12853 "failed to get tracking for %d queues for VSI %d err %d\n",
12854 alloc_queue_pairs, vsi->seid, ret);
12855 goto err_vsi;
12856 }
12857 vsi->base_queue = ret;
12858
12859
12860
12861
12862 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12863 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12864 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12865 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
12866 if (vsi->type == I40E_VSI_MAIN)
12867 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
12868
12869
12870 ret = i40e_alloc_rings(vsi);
12871 if (ret)
12872 goto err_rings;
12873
12874
12875 i40e_vsi_map_rings_to_vectors(vsi);
12876 return vsi;
12877
12878err_rings:
12879 i40e_vsi_free_q_vectors(vsi);
12880 if (vsi->netdev_registered) {
12881 vsi->netdev_registered = false;
12882 unregister_netdev(vsi->netdev);
12883 free_netdev(vsi->netdev);
12884 vsi->netdev = NULL;
12885 }
12886 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12887err_vsi:
12888 i40e_vsi_clear(vsi);
12889 return NULL;
12890}
12891
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903
12904
12905struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
12906 u16 uplink_seid, u32 param1)
12907{
12908 struct i40e_vsi *vsi = NULL;
12909 struct i40e_veb *veb = NULL;
12910 u16 alloc_queue_pairs;
12911 int ret, i;
12912 int v_idx;
12913
12914
12915
12916
12917
12918
12919
12920
12921
12922
12923
12924
12925
12926
12927 for (i = 0; i < I40E_MAX_VEB; i++) {
12928 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
12929 veb = pf->veb[i];
12930 break;
12931 }
12932 }
12933
12934 if (!veb && uplink_seid != pf->mac_seid) {
12935
12936 for (i = 0; i < pf->num_alloc_vsi; i++) {
12937 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
12938 vsi = pf->vsi[i];
12939 break;
12940 }
12941 }
12942 if (!vsi) {
12943 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
12944 uplink_seid);
12945 return NULL;
12946 }
12947
12948 if (vsi->uplink_seid == pf->mac_seid)
12949 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
12950 vsi->tc_config.enabled_tc);
12951 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
12952 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12953 vsi->tc_config.enabled_tc);
12954 if (veb) {
12955 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
12956 dev_info(&vsi->back->pdev->dev,
12957 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12958 return NULL;
12959 }
12960
12961
12962
12963
12964 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
12965 veb->bridge_mode = BRIDGE_MODE_VEPA;
12966 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12967 }
12968 i40e_config_bridge_mode(veb);
12969 }
12970 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12971 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12972 veb = pf->veb[i];
12973 }
12974 if (!veb) {
12975 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
12976 return NULL;
12977 }
12978
12979 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12980 uplink_seid = veb->seid;
12981 }
12982
12983
12984 v_idx = i40e_vsi_mem_alloc(pf, type);
12985 if (v_idx < 0)
12986 goto err_alloc;
12987 vsi = pf->vsi[v_idx];
12988 if (!vsi)
12989 goto err_alloc;
12990 vsi->type = type;
12991 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
12992
12993 if (type == I40E_VSI_MAIN)
12994 pf->lan_vsi = v_idx;
12995 else if (type == I40E_VSI_SRIOV)
12996 vsi->vf_id = param1;
12997
12998 alloc_queue_pairs = vsi->alloc_queue_pairs *
12999 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13000
13001 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13002 if (ret < 0) {
13003 dev_info(&pf->pdev->dev,
13004 "failed to get tracking for %d queues for VSI %d err=%d\n",
13005 alloc_queue_pairs, vsi->seid, ret);
13006 goto err_vsi;
13007 }
13008 vsi->base_queue = ret;
13009
13010
13011 vsi->uplink_seid = uplink_seid;
13012 ret = i40e_add_vsi(vsi);
13013 if (ret)
13014 goto err_vsi;
13015
13016 switch (vsi->type) {
13017
13018 case I40E_VSI_MAIN:
13019 case I40E_VSI_VMDQ2:
13020 ret = i40e_config_netdev(vsi);
13021 if (ret)
13022 goto err_netdev;
13023 ret = register_netdev(vsi->netdev);
13024 if (ret)
13025 goto err_netdev;
13026 vsi->netdev_registered = true;
13027 netif_carrier_off(vsi->netdev);
13028#ifdef CONFIG_I40E_DCB
13029
13030 i40e_dcbnl_setup(vsi);
13031#endif
13032
13033
13034 case I40E_VSI_FDIR:
13035
13036 ret = i40e_vsi_setup_vectors(vsi);
13037 if (ret)
13038 goto err_msix;
13039
13040 ret = i40e_alloc_rings(vsi);
13041 if (ret)
13042 goto err_rings;
13043
13044
13045 i40e_vsi_map_rings_to_vectors(vsi);
13046
13047 i40e_vsi_reset_stats(vsi);
13048 break;
13049
13050 default:
13051
13052 break;
13053 }
13054
13055 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13056 (vsi->type == I40E_VSI_VMDQ2)) {
13057 ret = i40e_vsi_config_rss(vsi);
13058 }
13059 return vsi;
13060
13061err_rings:
13062 i40e_vsi_free_q_vectors(vsi);
13063err_msix:
13064 if (vsi->netdev_registered) {
13065 vsi->netdev_registered = false;
13066 unregister_netdev(vsi->netdev);
13067 free_netdev(vsi->netdev);
13068 vsi->netdev = NULL;
13069 }
13070err_netdev:
13071 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13072err_vsi:
13073 i40e_vsi_clear(vsi);
13074err_alloc:
13075 return NULL;
13076}
13077
13078
13079
13080
13081
13082
13083
13084static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13085{
13086 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13087 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13088 struct i40e_pf *pf = veb->pf;
13089 struct i40e_hw *hw = &pf->hw;
13090 u32 tc_bw_max;
13091 int ret = 0;
13092 int i;
13093
13094 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13095 &bw_data, NULL);
13096 if (ret) {
13097 dev_info(&pf->pdev->dev,
13098 "query veb bw config failed, err %s aq_err %s\n",
13099 i40e_stat_str(&pf->hw, ret),
13100 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13101 goto out;
13102 }
13103
13104 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13105 &ets_data, NULL);
13106 if (ret) {
13107 dev_info(&pf->pdev->dev,
13108 "query veb bw ets config failed, err %s aq_err %s\n",
13109 i40e_stat_str(&pf->hw, ret),
13110 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13111 goto out;
13112 }
13113
13114 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13115 veb->bw_max_quanta = ets_data.tc_bw_max;
13116 veb->is_abs_credits = bw_data.absolute_credits_enable;
13117 veb->enabled_tc = ets_data.tc_valid_bits;
13118 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13119 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13120 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13121 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13122 veb->bw_tc_limit_credits[i] =
13123 le16_to_cpu(bw_data.tc_bw_limits[i]);
13124 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13125 }
13126
13127out:
13128 return ret;
13129}
13130
13131
13132
13133
13134
13135
13136
13137
13138static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13139{
13140 int ret = -ENOENT;
13141 struct i40e_veb *veb;
13142 int i;
13143
13144
13145 mutex_lock(&pf->switch_mutex);
13146
13147
13148
13149
13150
13151
13152
13153 i = 0;
13154 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13155 i++;
13156 if (i >= I40E_MAX_VEB) {
13157 ret = -ENOMEM;
13158 goto err_alloc_veb;
13159 }
13160
13161 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13162 if (!veb) {
13163 ret = -ENOMEM;
13164 goto err_alloc_veb;
13165 }
13166 veb->pf = pf;
13167 veb->idx = i;
13168 veb->enabled_tc = 1;
13169
13170 pf->veb[i] = veb;
13171 ret = i;
13172err_alloc_veb:
13173 mutex_unlock(&pf->switch_mutex);
13174 return ret;
13175}
13176
13177
13178
13179
13180
13181
13182
13183
13184static void i40e_switch_branch_release(struct i40e_veb *branch)
13185{
13186 struct i40e_pf *pf = branch->pf;
13187 u16 branch_seid = branch->seid;
13188 u16 veb_idx = branch->idx;
13189 int i;
13190
13191
13192 for (i = 0; i < I40E_MAX_VEB; i++) {
13193 if (!pf->veb[i])
13194 continue;
13195 if (pf->veb[i]->uplink_seid == branch->seid)
13196 i40e_switch_branch_release(pf->veb[i]);
13197 }
13198
13199
13200
13201
13202
13203
13204 for (i = 0; i < pf->num_alloc_vsi; i++) {
13205 if (!pf->vsi[i])
13206 continue;
13207 if (pf->vsi[i]->uplink_seid == branch_seid &&
13208 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13209 i40e_vsi_release(pf->vsi[i]);
13210 }
13211 }
13212
13213
13214
13215
13216
13217
13218 if (pf->veb[veb_idx])
13219 i40e_veb_release(pf->veb[veb_idx]);
13220}
13221
13222
13223
13224
13225
13226static void i40e_veb_clear(struct i40e_veb *veb)
13227{
13228 if (!veb)
13229 return;
13230
13231 if (veb->pf) {
13232 struct i40e_pf *pf = veb->pf;
13233
13234 mutex_lock(&pf->switch_mutex);
13235 if (pf->veb[veb->idx] == veb)
13236 pf->veb[veb->idx] = NULL;
13237 mutex_unlock(&pf->switch_mutex);
13238 }
13239
13240 kfree(veb);
13241}
13242
13243
13244
13245
13246
13247void i40e_veb_release(struct i40e_veb *veb)
13248{
13249 struct i40e_vsi *vsi = NULL;
13250 struct i40e_pf *pf;
13251 int i, n = 0;
13252
13253 pf = veb->pf;
13254
13255
13256 for (i = 0; i < pf->num_alloc_vsi; i++) {
13257 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13258 n++;
13259 vsi = pf->vsi[i];
13260 }
13261 }
13262 if (n != 1) {
13263 dev_info(&pf->pdev->dev,
13264 "can't remove VEB %d with %d VSIs left\n",
13265 veb->seid, n);
13266 return;
13267 }
13268
13269
13270 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13271 if (veb->uplink_seid) {
13272 vsi->uplink_seid = veb->uplink_seid;
13273 if (veb->uplink_seid == pf->mac_seid)
13274 vsi->veb_idx = I40E_NO_VEB;
13275 else
13276 vsi->veb_idx = veb->veb_idx;
13277 } else {
13278
13279 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13280 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13281 }
13282
13283 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13284 i40e_veb_clear(veb);
13285}
13286
13287
13288
13289
13290
13291
13292static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13293{
13294 struct i40e_pf *pf = veb->pf;
13295 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13296 int ret;
13297
13298 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13299 veb->enabled_tc, false,
13300 &veb->seid, enable_stats, NULL);
13301
13302
13303 if (ret) {
13304 dev_info(&pf->pdev->dev,
13305 "couldn't add VEB, err %s aq_err %s\n",
13306 i40e_stat_str(&pf->hw, ret),
13307 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13308 return -EPERM;
13309 }
13310
13311
13312 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13313 &veb->stats_idx, NULL, NULL, NULL);
13314 if (ret) {
13315 dev_info(&pf->pdev->dev,
13316 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13317 i40e_stat_str(&pf->hw, ret),
13318 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13319 return -EPERM;
13320 }
13321 ret = i40e_veb_get_bw_info(veb);
13322 if (ret) {
13323 dev_info(&pf->pdev->dev,
13324 "couldn't get VEB bw info, err %s aq_err %s\n",
13325 i40e_stat_str(&pf->hw, ret),
13326 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13327 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13328 return -ENOENT;
13329 }
13330
13331 vsi->uplink_seid = veb->seid;
13332 vsi->veb_idx = veb->idx;
13333 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13334
13335 return 0;
13336}
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353
13354struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
13355 u16 uplink_seid, u16 vsi_seid,
13356 u8 enabled_tc)
13357{
13358 struct i40e_veb *veb, *uplink_veb = NULL;
13359 int vsi_idx, veb_idx;
13360 int ret;
13361
13362
13363 if ((uplink_seid == 0 || vsi_seid == 0) &&
13364 (uplink_seid + vsi_seid != 0)) {
13365 dev_info(&pf->pdev->dev,
13366 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13367 uplink_seid, vsi_seid);
13368 return NULL;
13369 }
13370
13371
13372 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
13373 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
13374 break;
13375 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
13376 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
13377 vsi_seid);
13378 return NULL;
13379 }
13380
13381 if (uplink_seid && uplink_seid != pf->mac_seid) {
13382 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
13383 if (pf->veb[veb_idx] &&
13384 pf->veb[veb_idx]->seid == uplink_seid) {
13385 uplink_veb = pf->veb[veb_idx];
13386 break;
13387 }
13388 }
13389 if (!uplink_veb) {
13390 dev_info(&pf->pdev->dev,
13391 "uplink seid %d not found\n", uplink_seid);
13392 return NULL;
13393 }
13394 }
13395
13396
13397 veb_idx = i40e_veb_mem_alloc(pf);
13398 if (veb_idx < 0)
13399 goto err_alloc;
13400 veb = pf->veb[veb_idx];
13401 veb->flags = flags;
13402 veb->uplink_seid = uplink_seid;
13403 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
13404 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
13405
13406
13407 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
13408 if (ret)
13409 goto err_veb;
13410 if (vsi_idx == pf->lan_vsi)
13411 pf->lan_veb = veb->idx;
13412
13413 return veb;
13414
13415err_veb:
13416 i40e_veb_clear(veb);
13417err_alloc:
13418 return NULL;
13419}
13420
13421
13422
13423
13424
13425
13426
13427
13428
13429
13430static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
13431 struct i40e_aqc_switch_config_element_resp *ele,
13432 u16 num_reported, bool printconfig)
13433{
13434 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
13435 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
13436 u8 element_type = ele->element_type;
13437 u16 seid = le16_to_cpu(ele->seid);
13438
13439 if (printconfig)
13440 dev_info(&pf->pdev->dev,
13441 "type=%d seid=%d uplink=%d downlink=%d\n",
13442 element_type, seid, uplink_seid, downlink_seid);
13443
13444 switch (element_type) {
13445 case I40E_SWITCH_ELEMENT_TYPE_MAC:
13446 pf->mac_seid = seid;
13447 break;
13448 case I40E_SWITCH_ELEMENT_TYPE_VEB:
13449
13450 if (uplink_seid != pf->mac_seid)
13451 break;
13452 if (pf->lan_veb == I40E_NO_VEB) {
13453 int v;
13454
13455
13456 for (v = 0; v < I40E_MAX_VEB; v++) {
13457 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
13458 pf->lan_veb = v;
13459 break;
13460 }
13461 }
13462 if (pf->lan_veb == I40E_NO_VEB) {
13463 v = i40e_veb_mem_alloc(pf);
13464 if (v < 0)
13465 break;
13466 pf->lan_veb = v;
13467 }
13468 }
13469
13470 pf->veb[pf->lan_veb]->seid = seid;
13471 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
13472 pf->veb[pf->lan_veb]->pf = pf;
13473 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
13474 break;
13475 case I40E_SWITCH_ELEMENT_TYPE_VSI:
13476 if (num_reported != 1)
13477 break;
13478
13479
13480
13481 pf->mac_seid = uplink_seid;
13482 pf->pf_seid = downlink_seid;
13483 pf->main_vsi_seid = seid;
13484 if (printconfig)
13485 dev_info(&pf->pdev->dev,
13486 "pf_seid=%d main_vsi_seid=%d\n",
13487 pf->pf_seid, pf->main_vsi_seid);
13488 break;
13489 case I40E_SWITCH_ELEMENT_TYPE_PF:
13490 case I40E_SWITCH_ELEMENT_TYPE_VF:
13491 case I40E_SWITCH_ELEMENT_TYPE_EMP:
13492 case I40E_SWITCH_ELEMENT_TYPE_BMC:
13493 case I40E_SWITCH_ELEMENT_TYPE_PE:
13494 case I40E_SWITCH_ELEMENT_TYPE_PA:
13495
13496 break;
13497 default:
13498 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
13499 element_type, seid);
13500 break;
13501 }
13502}
13503
13504
13505
13506
13507
13508
13509
13510
13511
13512int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
13513{
13514 struct i40e_aqc_get_switch_config_resp *sw_config;
13515 u16 next_seid = 0;
13516 int ret = 0;
13517 u8 *aq_buf;
13518 int i;
13519
13520 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13521 if (!aq_buf)
13522 return -ENOMEM;
13523
13524 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13525 do {
13526 u16 num_reported, num_total;
13527
13528 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13529 I40E_AQ_LARGE_BUF,
13530 &next_seid, NULL);
13531 if (ret) {
13532 dev_info(&pf->pdev->dev,
13533 "get switch config failed err %s aq_err %s\n",
13534 i40e_stat_str(&pf->hw, ret),
13535 i40e_aq_str(&pf->hw,
13536 pf->hw.aq.asq_last_status));
13537 kfree(aq_buf);
13538 return -ENOENT;
13539 }
13540
13541 num_reported = le16_to_cpu(sw_config->header.num_reported);
13542 num_total = le16_to_cpu(sw_config->header.num_total);
13543
13544 if (printconfig)
13545 dev_info(&pf->pdev->dev,
13546 "header: %d reported %d total\n",
13547 num_reported, num_total);
13548
13549 for (i = 0; i < num_reported; i++) {
13550 struct i40e_aqc_switch_config_element_resp *ele =
13551 &sw_config->element[i];
13552
13553 i40e_setup_pf_switch_element(pf, ele, num_reported,
13554 printconfig);
13555 }
13556 } while (next_seid != 0);
13557
13558 kfree(aq_buf);
13559 return ret;
13560}
13561
13562
13563
13564
13565
13566
13567
13568
13569static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
13570{
13571 u16 flags = 0;
13572 int ret;
13573
13574
13575 ret = i40e_fetch_switch_configuration(pf, false);
13576 if (ret) {
13577 dev_info(&pf->pdev->dev,
13578 "couldn't fetch switch config, err %s aq_err %s\n",
13579 i40e_stat_str(&pf->hw, ret),
13580 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13581 return ret;
13582 }
13583 i40e_pf_reset_stats(pf);
13584
13585
13586
13587
13588
13589
13590
13591 if ((pf->hw.pf_id == 0) &&
13592 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
13593 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13594 pf->last_sw_conf_flags = flags;
13595 }
13596
13597 if (pf->hw.pf_id == 0) {
13598 u16 valid_flags;
13599
13600 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13601 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
13602 NULL);
13603 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13604 dev_info(&pf->pdev->dev,
13605 "couldn't set switch config bits, err %s aq_err %s\n",
13606 i40e_stat_str(&pf->hw, ret),
13607 i40e_aq_str(&pf->hw,
13608 pf->hw.aq.asq_last_status));
13609
13610 }
13611 pf->last_sw_conf_valid_flags = valid_flags;
13612 }
13613
13614
13615 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
13616 struct i40e_vsi *vsi = NULL;
13617 u16 uplink_seid;
13618
13619
13620
13621
13622 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
13623 uplink_seid = pf->veb[pf->lan_veb]->seid;
13624 else
13625 uplink_seid = pf->mac_seid;
13626 if (pf->lan_vsi == I40E_NO_VSI)
13627 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13628 else if (reinit)
13629 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
13630 if (!vsi) {
13631 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
13632 i40e_cloud_filter_exit(pf);
13633 i40e_fdir_teardown(pf);
13634 return -EAGAIN;
13635 }
13636 } else {
13637
13638 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13639
13640 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13641 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13642 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13643 }
13644 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13645
13646 i40e_fdir_sb_setup(pf);
13647
13648
13649 ret = i40e_setup_pf_filter_control(pf);
13650 if (ret) {
13651 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13652 ret);
13653
13654 }
13655
13656
13657
13658
13659 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
13660 i40e_pf_config_rss(pf);
13661
13662
13663 i40e_link_event(pf);
13664
13665
13666 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13667 I40E_AQ_AN_COMPLETED) ? true : false);
13668
13669 i40e_ptp_init(pf);
13670
13671
13672 i40e_sync_udp_filters(pf);
13673
13674 return ret;
13675}
13676
13677
13678
13679
13680
13681static void i40e_determine_queue_usage(struct i40e_pf *pf)
13682{
13683 int queues_left;
13684 int q_max;
13685
13686 pf->num_lan_qps = 0;
13687
13688
13689
13690
13691
13692 queues_left = pf->hw.func_caps.num_tx_qp;
13693
13694 if ((queues_left == 1) ||
13695 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
13696
13697 queues_left = 0;
13698 pf->alloc_rss_size = pf->num_lan_qps = 1;
13699
13700
13701 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13702 I40E_FLAG_IWARP_ENABLED |
13703 I40E_FLAG_FD_SB_ENABLED |
13704 I40E_FLAG_FD_ATR_ENABLED |
13705 I40E_FLAG_DCB_CAPABLE |
13706 I40E_FLAG_DCB_ENABLED |
13707 I40E_FLAG_SRIOV_ENABLED |
13708 I40E_FLAG_VMDQ_ENABLED);
13709 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13710 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13711 I40E_FLAG_FD_SB_ENABLED |
13712 I40E_FLAG_FD_ATR_ENABLED |
13713 I40E_FLAG_DCB_CAPABLE))) {
13714
13715 pf->alloc_rss_size = pf->num_lan_qps = 1;
13716 queues_left -= pf->num_lan_qps;
13717
13718 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13719 I40E_FLAG_IWARP_ENABLED |
13720 I40E_FLAG_FD_SB_ENABLED |
13721 I40E_FLAG_FD_ATR_ENABLED |
13722 I40E_FLAG_DCB_ENABLED |
13723 I40E_FLAG_VMDQ_ENABLED);
13724 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13725 } else {
13726
13727 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
13728 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
13729 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13730 I40E_FLAG_DCB_ENABLED);
13731 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13732 }
13733
13734
13735 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13736 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13737 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13738 pf->num_lan_qps = q_max;
13739
13740 queues_left -= pf->num_lan_qps;
13741 }
13742
13743 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13744 if (queues_left > 1) {
13745 queues_left -= 1;
13746 } else {
13747 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
13748 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13749 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13750 }
13751 }
13752
13753 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13754 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
13755 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13756 (queues_left / pf->num_vf_qps));
13757 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13758 }
13759
13760 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13761 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13762 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13763 (queues_left / pf->num_vmdq_qps));
13764 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13765 }
13766
13767 pf->queues_left = queues_left;
13768 dev_dbg(&pf->pdev->dev,
13769 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13770 pf->hw.func_caps.num_tx_qp,
13771 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
13772 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13773 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13774 queues_left);
13775}
13776
13777
13778
13779
13780
13781
13782
13783
13784
13785
13786
13787
13788static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13789{
13790 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13791
13792 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13793
13794
13795 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
13796 settings->enable_fdir = true;
13797
13798
13799 settings->enable_ethtype = true;
13800 settings->enable_macvlan = true;
13801
13802 if (i40e_set_filter_control(&pf->hw, settings))
13803 return -ENOENT;
13804
13805 return 0;
13806}
13807
13808#define INFO_STRING_LEN 255
13809#define REMAIN(__x) (INFO_STRING_LEN - (__x))
13810static void i40e_print_features(struct i40e_pf *pf)
13811{
13812 struct i40e_hw *hw = &pf->hw;
13813 char *buf;
13814 int i;
13815
13816 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13817 if (!buf)
13818 return;
13819
13820 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
13821#ifdef CONFIG_PCI_IOV
13822 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
13823#endif
13824 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
13825 pf->hw.func_caps.num_vsis,
13826 pf->vsi[pf->lan_vsi]->num_queue_pairs);
13827 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13828 i += snprintf(&buf[i], REMAIN(i), " RSS");
13829 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13830 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
13831 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13832 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13833 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
13834 }
13835 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13836 i += snprintf(&buf[i], REMAIN(i), " DCB");
13837 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13838 i += snprintf(&buf[i], REMAIN(i), " Geneve");
13839 if (pf->flags & I40E_FLAG_PTP)
13840 i += snprintf(&buf[i], REMAIN(i), " PTP");
13841 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13842 i += snprintf(&buf[i], REMAIN(i), " VEB");
13843 else
13844 i += snprintf(&buf[i], REMAIN(i), " VEPA");
13845
13846 dev_info(&pf->pdev->dev, "%s\n", buf);
13847 kfree(buf);
13848 WARN_ON(i > INFO_STRING_LEN);
13849}
13850
13851
13852
13853
13854
13855
13856
13857
13858
13859
13860
13861static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13862{
13863 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13864 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
13865}
13866
13867
13868
13869
13870
13871
13872void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
13873{
13874 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
13875 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
13876 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
13877 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
13878 *flags |= I40E_FLAG_RS_FEC;
13879 *flags &= ~I40E_FLAG_BASE_R_FEC;
13880 }
13881 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
13882 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
13883 *flags |= I40E_FLAG_BASE_R_FEC;
13884 *flags &= ~I40E_FLAG_RS_FEC;
13885 }
13886 if (fec_cfg == 0)
13887 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
13888}
13889
13890
13891
13892
13893
13894
13895
13896
13897
13898
13899
13900
13901static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13902{
13903 struct i40e_aq_get_phy_abilities_resp abilities;
13904 struct i40e_pf *pf;
13905 struct i40e_hw *hw;
13906 static u16 pfs_found;
13907 u16 wol_nvm_bits;
13908 u16 link_status;
13909 int err;
13910 u32 val;
13911 u32 i;
13912 u8 set_fc_aq_fail;
13913
13914 err = pci_enable_device_mem(pdev);
13915 if (err)
13916 return err;
13917
13918
13919 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
13920 if (err) {
13921 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13922 if (err) {
13923 dev_err(&pdev->dev,
13924 "DMA configuration failed: 0x%x\n", err);
13925 goto err_dma;
13926 }
13927 }
13928
13929
13930 err = pci_request_mem_regions(pdev, i40e_driver_name);
13931 if (err) {
13932 dev_info(&pdev->dev,
13933 "pci_request_selected_regions failed %d\n", err);
13934 goto err_pci_reg;
13935 }
13936
13937 pci_enable_pcie_error_reporting(pdev);
13938 pci_set_master(pdev);
13939
13940
13941
13942
13943
13944
13945 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
13946 if (!pf) {
13947 err = -ENOMEM;
13948 goto err_pf_alloc;
13949 }
13950 pf->next_vsi = 0;
13951 pf->pdev = pdev;
13952 set_bit(__I40E_DOWN, pf->state);
13953
13954 hw = &pf->hw;
13955 hw->back = pf;
13956
13957 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
13958 I40E_MAX_CSR_SPACE);
13959
13960 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
13961 if (!hw->hw_addr) {
13962 err = -EIO;
13963 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13964 (unsigned int)pci_resource_start(pdev, 0),
13965 pf->ioremap_len, err);
13966 goto err_ioremap;
13967 }
13968 hw->vendor_id = pdev->vendor;
13969 hw->device_id = pdev->device;
13970 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
13971 hw->subsystem_vendor_id = pdev->subsystem_vendor;
13972 hw->subsystem_device_id = pdev->subsystem_device;
13973 hw->bus.device = PCI_SLOT(pdev->devfn);
13974 hw->bus.func = PCI_FUNC(pdev->devfn);
13975 hw->bus.bus_id = pdev->bus->number;
13976 pf->instance = pfs_found;
13977
13978
13979
13980
13981 hw->switch_tag = 0xffff;
13982 hw->first_tag = ETH_P_8021AD;
13983 hw->second_tag = ETH_P_8021Q;
13984
13985 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
13986 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
13987
13988
13989
13990
13991 mutex_init(&hw->aq.asq_mutex);
13992 mutex_init(&hw->aq.arq_mutex);
13993
13994 pf->msg_enable = netif_msg_init(debug,
13995 NETIF_MSG_DRV |
13996 NETIF_MSG_PROBE |
13997 NETIF_MSG_LINK);
13998 if (debug < -1)
13999 pf->hw.debug_mask = debug;
14000
14001
14002 if (hw->revision_id == 0 &&
14003 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14004 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14005 i40e_flush(hw);
14006 msleep(200);
14007 pf->corer_count++;
14008
14009 i40e_clear_pxe_mode(hw);
14010 }
14011
14012
14013 i40e_clear_hw(hw);
14014 err = i40e_pf_reset(hw);
14015 if (err) {
14016 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14017 goto err_pf_reset;
14018 }
14019 pf->pfr_count++;
14020
14021 hw->aq.num_arq_entries = I40E_AQ_LEN;
14022 hw->aq.num_asq_entries = I40E_AQ_LEN;
14023 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14024 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14025 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14026
14027 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14028 "%s-%s:misc",
14029 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14030
14031 err = i40e_init_shared_code(hw);
14032 if (err) {
14033 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14034 err);
14035 goto err_pf_reset;
14036 }
14037
14038
14039 pf->hw.fc.requested_mode = I40E_FC_NONE;
14040
14041 err = i40e_init_adminq(hw);
14042 if (err) {
14043 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14044 dev_info(&pdev->dev,
14045 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
14046 else
14047 dev_info(&pdev->dev,
14048 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14049
14050 goto err_pf_reset;
14051 }
14052 i40e_get_oem_version(hw);
14053
14054
14055 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
14056 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14057 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14058 i40e_nvm_version_str(hw));
14059
14060 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14061 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14062 dev_info(&pdev->dev,
14063 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
14064 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14065 dev_info(&pdev->dev,
14066 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
14067
14068 i40e_verify_eeprom(pf);
14069
14070
14071 if (hw->revision_id < 1)
14072 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14073
14074 i40e_clear_pxe_mode(hw);
14075 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14076 if (err)
14077 goto err_adminq_setup;
14078
14079 err = i40e_sw_init(pf);
14080 if (err) {
14081 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14082 goto err_sw_init;
14083 }
14084
14085 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14086 hw->func_caps.num_rx_qp, 0, 0);
14087 if (err) {
14088 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14089 goto err_init_lan_hmc;
14090 }
14091
14092 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14093 if (err) {
14094 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14095 err = -ENOENT;
14096 goto err_configure_lan_hmc;
14097 }
14098
14099
14100
14101
14102
14103 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14104 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14105 i40e_aq_stop_lldp(hw, true, NULL);
14106 }
14107
14108
14109 i40e_get_platform_mac_addr(pdev, pf);
14110
14111 if (!is_valid_ether_addr(hw->mac.addr)) {
14112 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14113 err = -EIO;
14114 goto err_mac_addr;
14115 }
14116 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14117 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14118 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14119 if (is_valid_ether_addr(hw->mac.port_addr))
14120 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14121
14122 pci_set_drvdata(pdev, pf);
14123 pci_save_state(pdev);
14124
14125
14126 i40e_aq_set_dcb_parameters(hw, true, NULL);
14127
14128#ifdef CONFIG_I40E_DCB
14129 err = i40e_init_pf_dcb(pf);
14130 if (err) {
14131 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
14132 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
14133
14134 }
14135#endif
14136
14137
14138 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14139 pf->service_timer_period = HZ;
14140
14141 INIT_WORK(&pf->service_task, i40e_service_task);
14142 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14143
14144
14145 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
14146 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
14147 pf->wol_en = false;
14148 else
14149 pf->wol_en = true;
14150 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
14151
14152
14153 i40e_determine_queue_usage(pf);
14154 err = i40e_init_interrupt_scheme(pf);
14155 if (err)
14156 goto err_switch_setup;
14157
14158
14159
14160
14161
14162
14163 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14164 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14165 else
14166 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14167
14168
14169 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14170 GFP_KERNEL);
14171 if (!pf->vsi) {
14172 err = -ENOMEM;
14173 goto err_switch_setup;
14174 }
14175
14176#ifdef CONFIG_PCI_IOV
14177
14178 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14179 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14180 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14181 if (pci_num_vf(pdev))
14182 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
14183 }
14184#endif
14185 err = i40e_setup_pf_switch(pf, false);
14186 if (err) {
14187 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
14188 goto err_vsis;
14189 }
14190 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
14191
14192
14193 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
14194 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
14195 dev_dbg(&pf->pdev->dev,
14196 "Set fc with err %s aq_err %s on get_phy_cap\n",
14197 i40e_stat_str(hw, err),
14198 i40e_aq_str(hw, hw->aq.asq_last_status));
14199 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
14200 dev_dbg(&pf->pdev->dev,
14201 "Set fc with err %s aq_err %s on set_phy_config\n",
14202 i40e_stat_str(hw, err),
14203 i40e_aq_str(hw, hw->aq.asq_last_status));
14204 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
14205 dev_dbg(&pf->pdev->dev,
14206 "Set fc with err %s aq_err %s on get_link_info\n",
14207 i40e_stat_str(hw, err),
14208 i40e_aq_str(hw, hw->aq.asq_last_status));
14209
14210
14211 for (i = 0; i < pf->num_alloc_vsi; i++) {
14212 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
14213 i40e_vsi_open(pf->vsi[i]);
14214 break;
14215 }
14216 }
14217
14218
14219
14220
14221 err = i40e_aq_set_phy_int_mask(&pf->hw,
14222 ~(I40E_AQ_EVENT_LINK_UPDOWN |
14223 I40E_AQ_EVENT_MEDIA_NA |
14224 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
14225 if (err)
14226 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
14227 i40e_stat_str(&pf->hw, err),
14228 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14229
14230
14231
14232
14233
14234 val = rd32(hw, I40E_REG_MSS);
14235 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
14236 val &= ~I40E_REG_MSS_MIN_MASK;
14237 val |= I40E_64BYTE_MSS;
14238 wr32(hw, I40E_REG_MSS, val);
14239 }
14240
14241 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
14242 msleep(75);
14243 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
14244 if (err)
14245 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
14246 i40e_stat_str(&pf->hw, err),
14247 i40e_aq_str(&pf->hw,
14248 pf->hw.aq.asq_last_status));
14249 }
14250
14251
14252
14253
14254 clear_bit(__I40E_DOWN, pf->state);
14255
14256
14257
14258
14259
14260
14261 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
14262 err = i40e_setup_misc_vector(pf);
14263 if (err) {
14264 dev_info(&pdev->dev,
14265 "setup of misc vector failed: %d\n", err);
14266 goto err_vsis;
14267 }
14268 }
14269
14270#ifdef CONFIG_PCI_IOV
14271
14272 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14273 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14274 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14275
14276 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
14277 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
14278 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
14279 i40e_flush(hw);
14280
14281 if (pci_num_vf(pdev)) {
14282 dev_info(&pdev->dev,
14283 "Active VFs found, allocating resources.\n");
14284 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
14285 if (err)
14286 dev_info(&pdev->dev,
14287 "Error %d allocating resources for existing VFs\n",
14288 err);
14289 }
14290 }
14291#endif
14292
14293 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14294 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
14295 pf->num_iwarp_msix,
14296 I40E_IWARP_IRQ_PILE_ID);
14297 if (pf->iwarp_base_vector < 0) {
14298 dev_info(&pdev->dev,
14299 "failed to get tracking for %d vectors for IWARP err=%d\n",
14300 pf->num_iwarp_msix, pf->iwarp_base_vector);
14301 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
14302 }
14303 }
14304
14305 i40e_dbg_pf_init(pf);
14306
14307
14308 i40e_send_version(pf);
14309
14310
14311 mod_timer(&pf->service_timer,
14312 round_jiffies(jiffies + pf->service_timer_period));
14313
14314
14315 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14316 err = i40e_lan_add_device(pf);
14317 if (err)
14318 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
14319 err);
14320 }
14321
14322#define PCI_SPEED_SIZE 8
14323#define PCI_WIDTH_SIZE 8
14324
14325
14326
14327
14328 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
14329 char speed[PCI_SPEED_SIZE] = "Unknown";
14330 char width[PCI_WIDTH_SIZE] = "Unknown";
14331
14332
14333
14334
14335 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
14336 &link_status);
14337
14338 i40e_set_pci_config_data(hw, link_status);
14339
14340 switch (hw->bus.speed) {
14341 case i40e_bus_speed_8000:
14342 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
14343 case i40e_bus_speed_5000:
14344 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
14345 case i40e_bus_speed_2500:
14346 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
14347 default:
14348 break;
14349 }
14350 switch (hw->bus.width) {
14351 case i40e_bus_width_pcie_x8:
14352 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
14353 case i40e_bus_width_pcie_x4:
14354 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
14355 case i40e_bus_width_pcie_x2:
14356 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
14357 case i40e_bus_width_pcie_x1:
14358 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
14359 default:
14360 break;
14361 }
14362
14363 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
14364 speed, width);
14365
14366 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
14367 hw->bus.speed < i40e_bus_speed_8000) {
14368 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
14369 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
14370 }
14371 }
14372
14373
14374 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
14375 if (err)
14376 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
14377 i40e_stat_str(&pf->hw, err),
14378 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14379 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
14380
14381
14382 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
14383
14384
14385 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
14386 if (err)
14387 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
14388 i40e_stat_str(&pf->hw, err),
14389 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14390
14391
14392
14393
14394
14395
14396
14397 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
14398 pf->main_vsi_seid);
14399
14400 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
14401 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
14402 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
14403 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
14404 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
14405
14406 i40e_print_features(pf);
14407
14408 return 0;
14409
14410
14411err_vsis:
14412 set_bit(__I40E_DOWN, pf->state);
14413 i40e_clear_interrupt_scheme(pf);
14414 kfree(pf->vsi);
14415err_switch_setup:
14416 i40e_reset_interrupt_capability(pf);
14417 del_timer_sync(&pf->service_timer);
14418err_mac_addr:
14419err_configure_lan_hmc:
14420 (void)i40e_shutdown_lan_hmc(hw);
14421err_init_lan_hmc:
14422 kfree(pf->qp_pile);
14423err_sw_init:
14424err_adminq_setup:
14425err_pf_reset:
14426 iounmap(hw->hw_addr);
14427err_ioremap:
14428 kfree(pf);
14429err_pf_alloc:
14430 pci_disable_pcie_error_reporting(pdev);
14431 pci_release_mem_regions(pdev);
14432err_pci_reg:
14433err_dma:
14434 pci_disable_device(pdev);
14435 return err;
14436}
14437
14438
14439
14440
14441
14442
14443
14444
14445
14446
14447static void i40e_remove(struct pci_dev *pdev)
14448{
14449 struct i40e_pf *pf = pci_get_drvdata(pdev);
14450 struct i40e_hw *hw = &pf->hw;
14451 i40e_status ret_code;
14452 int i;
14453
14454 i40e_dbg_pf_exit(pf);
14455
14456 i40e_ptp_stop(pf);
14457
14458
14459 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
14460 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
14461
14462
14463 set_bit(__I40E_SUSPENDED, pf->state);
14464 set_bit(__I40E_DOWN, pf->state);
14465 if (pf->service_timer.function)
14466 del_timer_sync(&pf->service_timer);
14467 if (pf->service_task.func)
14468 cancel_work_sync(&pf->service_task);
14469
14470
14471
14472
14473 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14474
14475 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
14476 i40e_free_vfs(pf);
14477 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
14478 }
14479
14480 i40e_fdir_teardown(pf);
14481
14482
14483
14484
14485 for (i = 0; i < I40E_MAX_VEB; i++) {
14486 if (!pf->veb[i])
14487 continue;
14488
14489 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
14490 pf->veb[i]->uplink_seid == 0)
14491 i40e_switch_branch_release(pf->veb[i]);
14492 }
14493
14494
14495
14496
14497 if (pf->vsi[pf->lan_vsi])
14498 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
14499
14500 i40e_cloud_filter_exit(pf);
14501
14502
14503 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14504 ret_code = i40e_lan_del_device(pf);
14505 if (ret_code)
14506 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
14507 ret_code);
14508 }
14509
14510
14511 if (hw->hmc.hmc_obj) {
14512 ret_code = i40e_shutdown_lan_hmc(hw);
14513 if (ret_code)
14514 dev_warn(&pdev->dev,
14515 "Failed to destroy the HMC resources: %d\n",
14516 ret_code);
14517 }
14518
14519
14520 i40e_shutdown_adminq(hw);
14521
14522
14523 mutex_destroy(&hw->aq.arq_mutex);
14524 mutex_destroy(&hw->aq.asq_mutex);
14525
14526
14527 rtnl_lock();
14528 i40e_clear_interrupt_scheme(pf);
14529 for (i = 0; i < pf->num_alloc_vsi; i++) {
14530 if (pf->vsi[i]) {
14531 i40e_vsi_clear_rings(pf->vsi[i]);
14532 i40e_vsi_clear(pf->vsi[i]);
14533 pf->vsi[i] = NULL;
14534 }
14535 }
14536 rtnl_unlock();
14537
14538 for (i = 0; i < I40E_MAX_VEB; i++) {
14539 kfree(pf->veb[i]);
14540 pf->veb[i] = NULL;
14541 }
14542
14543 kfree(pf->qp_pile);
14544 kfree(pf->vsi);
14545
14546 iounmap(hw->hw_addr);
14547 kfree(pf);
14548 pci_release_mem_regions(pdev);
14549
14550 pci_disable_pcie_error_reporting(pdev);
14551 pci_disable_device(pdev);
14552}
14553
14554
14555
14556
14557
14558
14559
14560
14561
14562
14563static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14564 enum pci_channel_state error)
14565{
14566 struct i40e_pf *pf = pci_get_drvdata(pdev);
14567
14568 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14569
14570 if (!pf) {
14571 dev_info(&pdev->dev,
14572 "Cannot recover - error happened during device probe\n");
14573 return PCI_ERS_RESULT_DISCONNECT;
14574 }
14575
14576
14577 if (!test_bit(__I40E_SUSPENDED, pf->state))
14578 i40e_prep_for_reset(pf, false);
14579
14580
14581 return PCI_ERS_RESULT_NEED_RESET;
14582}
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14594{
14595 struct i40e_pf *pf = pci_get_drvdata(pdev);
14596 pci_ers_result_t result;
14597 int err;
14598 u32 reg;
14599
14600 dev_dbg(&pdev->dev, "%s\n", __func__);
14601 if (pci_enable_device_mem(pdev)) {
14602 dev_info(&pdev->dev,
14603 "Cannot re-enable PCI device after reset.\n");
14604 result = PCI_ERS_RESULT_DISCONNECT;
14605 } else {
14606 pci_set_master(pdev);
14607 pci_restore_state(pdev);
14608 pci_save_state(pdev);
14609 pci_wake_from_d3(pdev, false);
14610
14611 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14612 if (reg == 0)
14613 result = PCI_ERS_RESULT_RECOVERED;
14614 else
14615 result = PCI_ERS_RESULT_DISCONNECT;
14616 }
14617
14618 err = pci_cleanup_aer_uncorrect_error_status(pdev);
14619 if (err) {
14620 dev_info(&pdev->dev,
14621 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14622 err);
14623
14624 }
14625
14626 return result;
14627}
14628
14629static void i40e_pci_error_reset_prepare(struct pci_dev *pdev) __attribute__ ((unused));
14630
14631
14632
14633
14634static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14635{
14636 struct i40e_pf *pf = pci_get_drvdata(pdev);
14637
14638 i40e_prep_for_reset(pf, false);
14639}
14640
14641static void i40e_pci_error_reset_done(struct pci_dev *pdev) __attribute__ ((unused));
14642
14643
14644
14645
14646static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14647{
14648 struct i40e_pf *pf = pci_get_drvdata(pdev);
14649
14650 i40e_reset_and_rebuild(pf, false, false);
14651}
14652
14653
14654
14655
14656
14657
14658
14659
14660static void i40e_pci_error_resume(struct pci_dev *pdev)
14661{
14662 struct i40e_pf *pf = pci_get_drvdata(pdev);
14663
14664 dev_dbg(&pdev->dev, "%s\n", __func__);
14665 if (test_bit(__I40E_SUSPENDED, pf->state))
14666 return;
14667
14668 i40e_handle_reset_warning(pf, false);
14669}
14670
14671
14672
14673
14674
14675
14676static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14677{
14678 struct i40e_hw *hw = &pf->hw;
14679 i40e_status ret;
14680 u8 mac_addr[6];
14681 u16 flags = 0;
14682
14683
14684 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14685 ether_addr_copy(mac_addr,
14686 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14687 } else {
14688 dev_err(&pf->pdev->dev,
14689 "Failed to retrieve MAC address; using default\n");
14690 ether_addr_copy(mac_addr, hw->mac.addr);
14691 }
14692
14693
14694
14695
14696
14697 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14698
14699 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14700 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14701
14702 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14703 if (ret) {
14704 dev_err(&pf->pdev->dev,
14705 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14706 return;
14707 }
14708
14709 flags = I40E_AQC_MC_MAG_EN
14710 | I40E_AQC_WOL_PRESERVE_ON_PFR
14711 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14712 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14713 if (ret)
14714 dev_err(&pf->pdev->dev,
14715 "Failed to enable Multicast Magic Packet wake up\n");
14716}
14717
14718
14719
14720
14721
14722static void i40e_shutdown(struct pci_dev *pdev)
14723{
14724 struct i40e_pf *pf = pci_get_drvdata(pdev);
14725 struct i40e_hw *hw = &pf->hw;
14726
14727 set_bit(__I40E_SUSPENDED, pf->state);
14728 set_bit(__I40E_DOWN, pf->state);
14729
14730 del_timer_sync(&pf->service_timer);
14731 cancel_work_sync(&pf->service_task);
14732 i40e_cloud_filter_exit(pf);
14733 i40e_fdir_teardown(pf);
14734
14735
14736
14737
14738 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14739
14740 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14741 i40e_enable_mc_magic_wake(pf);
14742
14743 i40e_prep_for_reset(pf, false);
14744
14745 wr32(hw, I40E_PFPM_APM,
14746 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14747 wr32(hw, I40E_PFPM_WUFC,
14748 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14749
14750
14751
14752
14753
14754 rtnl_lock();
14755 i40e_clear_interrupt_scheme(pf);
14756 rtnl_unlock();
14757
14758 if (system_state == SYSTEM_POWER_OFF) {
14759 pci_wake_from_d3(pdev, pf->wol_en);
14760 pci_set_power_state(pdev, PCI_D3hot);
14761 }
14762}
14763
14764
14765
14766
14767
14768static int __maybe_unused i40e_suspend(struct device *dev)
14769{
14770 struct pci_dev *pdev = to_pci_dev(dev);
14771 struct i40e_pf *pf = pci_get_drvdata(pdev);
14772 struct i40e_hw *hw = &pf->hw;
14773
14774
14775 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
14776 return 0;
14777
14778 set_bit(__I40E_DOWN, pf->state);
14779
14780
14781 del_timer_sync(&pf->service_timer);
14782 cancel_work_sync(&pf->service_task);
14783
14784
14785
14786
14787 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14788
14789 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14790 i40e_enable_mc_magic_wake(pf);
14791
14792
14793
14794
14795
14796 rtnl_lock();
14797
14798 i40e_prep_for_reset(pf, true);
14799
14800 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14801 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14802
14803
14804
14805
14806
14807
14808 i40e_clear_interrupt_scheme(pf);
14809
14810 rtnl_unlock();
14811
14812 return 0;
14813}
14814
14815
14816
14817
14818
14819static int __maybe_unused i40e_resume(struct device *dev)
14820{
14821 struct pci_dev *pdev = to_pci_dev(dev);
14822 struct i40e_pf *pf = pci_get_drvdata(pdev);
14823 int err;
14824
14825
14826 if (!test_bit(__I40E_SUSPENDED, pf->state))
14827 return 0;
14828
14829
14830
14831
14832 rtnl_lock();
14833
14834
14835
14836
14837 err = i40e_restore_interrupt_scheme(pf);
14838 if (err) {
14839 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
14840 err);
14841 }
14842
14843 clear_bit(__I40E_DOWN, pf->state);
14844 i40e_reset_and_rebuild(pf, false, true);
14845
14846 rtnl_unlock();
14847
14848
14849 clear_bit(__I40E_SUSPENDED, pf->state);
14850
14851
14852 mod_timer(&pf->service_timer,
14853 round_jiffies(jiffies + pf->service_timer_period));
14854
14855 return 0;
14856}
14857
14858static const struct pci_error_handlers i40e_err_handler = {
14859 .error_detected = i40e_pci_error_detected,
14860 .slot_reset = i40e_pci_error_slot_reset,
14861#if 0
14862 .reset_prepare = i40e_pci_error_reset_prepare,
14863 .reset_done = i40e_pci_error_reset_done,
14864#endif
14865 .resume = i40e_pci_error_resume,
14866};
14867
14868static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
14869
14870static struct pci_driver i40e_driver = {
14871 .name = i40e_driver_name,
14872 .id_table = i40e_pci_tbl,
14873 .probe = i40e_probe,
14874 .remove = i40e_remove,
14875 .driver = {
14876 .pm = &i40e_pm_ops,
14877 },
14878 .shutdown = i40e_shutdown,
14879 .err_handler = &i40e_err_handler,
14880 .sriov_configure = i40e_pci_sriov_configure,
14881};
14882
14883
14884
14885
14886
14887
14888
14889static int __init i40e_init_module(void)
14890{
14891 pr_info("%s: %s - version %s\n", i40e_driver_name,
14892 i40e_driver_string, i40e_driver_version_str);
14893 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
14894
14895
14896
14897
14898
14899
14900
14901
14902 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
14903 if (!i40e_wq) {
14904 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
14905 return -ENOMEM;
14906 }
14907
14908 i40e_dbg_init();
14909 return pci_register_driver(&i40e_driver);
14910}
14911module_init(i40e_init_module);
14912
14913
14914
14915
14916
14917
14918
14919static void __exit i40e_exit_module(void)
14920{
14921 pci_unregister_driver(&i40e_driver);
14922 destroy_workqueue(i40e_wq);
14923 i40e_dbg_exit();
14924}
14925module_exit(i40e_exit_module);
14926