1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/etherdevice.h>
28#include <linux/of_net.h>
29#include <linux/pci.h>
30
31
32#include "i40e.h"
33#include "i40e_diag.h"
34#include <net/udp_tunnel.h>
35
36const char i40e_driver_name[] = "i40e";
37static const char i40e_driver_string[] =
38 "Intel(R) Ethernet Connection XL710 Network Driver";
39
40#define DRV_KERN "-k"
41
42#define DRV_VERSION_MAJOR 1
43#define DRV_VERSION_MINOR 6
44#define DRV_VERSION_BUILD 16
45#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 __stringify(DRV_VERSION_MINOR) "." \
47 __stringify(DRV_VERSION_BUILD) DRV_KERN
48const char i40e_driver_version_str[] = DRV_VERSION;
49static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
50
51
52static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
53static void i40e_handle_reset_warning(struct i40e_pf *pf);
54static int i40e_add_vsi(struct i40e_vsi *vsi);
55static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
56static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
57static int i40e_setup_misc_vector(struct i40e_pf *pf);
58static void i40e_determine_queue_usage(struct i40e_pf *pf);
59static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
60static void i40e_fdir_sb_setup(struct i40e_pf *pf);
61static int i40e_veb_get_bw_info(struct i40e_veb *veb);
62
63
64
65
66
67
68
69
70static const struct pci_device_id i40e_pci_tbl[] = {
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
89
90 {0, }
91};
92MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
93
94#define I40E_MAX_VF_COUNT 128
95static int debug = -1;
96module_param(debug, int, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98
99MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
100MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
101MODULE_LICENSE("GPL");
102MODULE_VERSION(DRV_VERSION);
103
104static struct workqueue_struct *i40e_wq;
105
106
107
108
109
110
111
112
113int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
114 u64 size, u32 alignment)
115{
116 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
117
118 mem->size = ALIGN(size, alignment);
119 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
120 &mem->pa, GFP_KERNEL);
121 if (!mem->va)
122 return -ENOMEM;
123
124 return 0;
125}
126
127
128
129
130
131
132int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
133{
134 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
135
136 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
137 mem->va = NULL;
138 mem->pa = 0;
139 mem->size = 0;
140
141 return 0;
142}
143
144
145
146
147
148
149
150int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
151 u32 size)
152{
153 mem->size = size;
154 mem->va = kzalloc(size, GFP_KERNEL);
155
156 if (!mem->va)
157 return -ENOMEM;
158
159 return 0;
160}
161
162
163
164
165
166
167int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
168{
169
170 kfree(mem->va);
171 mem->va = NULL;
172 mem->size = 0;
173
174 return 0;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
191 u16 needed, u16 id)
192{
193 int ret = -ENOMEM;
194 int i, j;
195
196 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
197 dev_info(&pf->pdev->dev,
198 "param err: pile=%p needed=%d id=0x%04x\n",
199 pile, needed, id);
200 return -EINVAL;
201 }
202
203
204 i = pile->search_hint;
205 while (i < pile->num_entries) {
206
207 if (pile->list[i] & I40E_PILE_VALID_BIT) {
208 i++;
209 continue;
210 }
211
212
213 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
214 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
215 break;
216 }
217
218 if (j == needed) {
219
220 for (j = 0; j < needed; j++)
221 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
222 ret = i;
223 pile->search_hint = i + j;
224 break;
225 }
226
227
228 i += j;
229 }
230
231 return ret;
232}
233
234
235
236
237
238
239
240
241
242static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
243{
244 int valid_id = (id | I40E_PILE_VALID_BIT);
245 int count = 0;
246 int i;
247
248 if (!pile || index >= pile->num_entries)
249 return -EINVAL;
250
251 for (i = index;
252 i < pile->num_entries && pile->list[i] == valid_id;
253 i++) {
254 pile->list[i] = 0;
255 count++;
256 }
257
258 if (count && index < pile->search_hint)
259 pile->search_hint = index;
260
261 return count;
262}
263
264
265
266
267
268
269struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
270{
271 int i;
272
273 for (i = 0; i < pf->num_alloc_vsi; i++)
274 if (pf->vsi[i] && (pf->vsi[i]->id == id))
275 return pf->vsi[i];
276
277 return NULL;
278}
279
280
281
282
283
284
285
286void i40e_service_event_schedule(struct i40e_pf *pf)
287{
288 if (!test_bit(__I40E_DOWN, &pf->state) &&
289 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
290 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
291 queue_work(i40e_wq, &pf->service_task);
292}
293
294
295
296
297
298
299
300
301
302#ifdef I40E_FCOE
303void i40e_tx_timeout(struct net_device *netdev)
304#else
305static void i40e_tx_timeout(struct net_device *netdev)
306#endif
307{
308 struct i40e_netdev_priv *np = netdev_priv(netdev);
309 struct i40e_vsi *vsi = np->vsi;
310 struct i40e_pf *pf = vsi->back;
311 struct i40e_ring *tx_ring = NULL;
312 unsigned int i, hung_queue = 0;
313 u32 head, val;
314
315 pf->tx_timeout_count++;
316
317
318 for (i = 0; i < netdev->num_tx_queues; i++) {
319 struct netdev_queue *q;
320 unsigned long trans_start;
321
322 q = netdev_get_tx_queue(netdev, i);
323 trans_start = q->trans_start;
324 if (netif_xmit_stopped(q) &&
325 time_after(jiffies,
326 (trans_start + netdev->watchdog_timeo))) {
327 hung_queue = i;
328 break;
329 }
330 }
331
332 if (i == netdev->num_tx_queues) {
333 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
334 } else {
335
336 for (i = 0; i < vsi->num_queue_pairs; i++) {
337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338 if (hung_queue ==
339 vsi->tx_rings[i]->queue_index) {
340 tx_ring = vsi->tx_rings[i];
341 break;
342 }
343 }
344 }
345 }
346
347 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
348 pf->tx_timeout_recovery_level = 1;
349 else if (time_before(jiffies,
350 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
351 return;
352
353 if (tx_ring) {
354 head = i40e_get_head(tx_ring);
355
356 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
357 val = rd32(&pf->hw,
358 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
359 tx_ring->vsi->base_vector - 1));
360 else
361 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
362
363 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
364 vsi->seid, hung_queue, tx_ring->next_to_clean,
365 head, tx_ring->next_to_use,
366 readl(tx_ring->tail), val);
367 }
368
369 pf->tx_timeout_last_recovery = jiffies;
370 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
371 pf->tx_timeout_recovery_level, hung_queue);
372
373 switch (pf->tx_timeout_recovery_level) {
374 case 1:
375 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
376 break;
377 case 2:
378 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
379 break;
380 case 3:
381 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
382 break;
383 default:
384 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
385 break;
386 }
387
388 i40e_service_event_schedule(pf);
389 pf->tx_timeout_recovery_level++;
390}
391
392
393
394
395
396
397
398
399struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
400{
401 return &vsi->net_stats;
402}
403
404
405
406
407
408
409
410
411#ifdef I40E_FCOE
412struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
413 struct net_device *netdev,
414 struct rtnl_link_stats64 *stats)
415#else
416static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
417 struct net_device *netdev,
418 struct rtnl_link_stats64 *stats)
419#endif
420{
421 struct i40e_netdev_priv *np = netdev_priv(netdev);
422 struct i40e_ring *tx_ring, *rx_ring;
423 struct i40e_vsi *vsi = np->vsi;
424 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
425 int i;
426
427 if (test_bit(__I40E_DOWN, &vsi->state))
428 return stats;
429
430 if (!vsi->tx_rings)
431 return stats;
432
433 rcu_read_lock();
434 for (i = 0; i < vsi->num_queue_pairs; i++) {
435 u64 bytes, packets;
436 unsigned int start;
437
438 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
439 if (!tx_ring)
440 continue;
441
442 do {
443 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
444 packets = tx_ring->stats.packets;
445 bytes = tx_ring->stats.bytes;
446 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
447
448 stats->tx_packets += packets;
449 stats->tx_bytes += bytes;
450 rx_ring = &tx_ring[1];
451
452 do {
453 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
454 packets = rx_ring->stats.packets;
455 bytes = rx_ring->stats.bytes;
456 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
457
458 stats->rx_packets += packets;
459 stats->rx_bytes += bytes;
460 }
461 rcu_read_unlock();
462
463
464 stats->multicast = vsi_stats->multicast;
465 stats->tx_errors = vsi_stats->tx_errors;
466 stats->tx_dropped = vsi_stats->tx_dropped;
467 stats->rx_errors = vsi_stats->rx_errors;
468 stats->rx_dropped = vsi_stats->rx_dropped;
469 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
470 stats->rx_length_errors = vsi_stats->rx_length_errors;
471
472 return stats;
473}
474
475
476
477
478
479void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
480{
481 struct rtnl_link_stats64 *ns;
482 int i;
483
484 if (!vsi)
485 return;
486
487 ns = i40e_get_vsi_stats_struct(vsi);
488 memset(ns, 0, sizeof(*ns));
489 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
490 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
491 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
492 if (vsi->rx_rings && vsi->rx_rings[0]) {
493 for (i = 0; i < vsi->num_queue_pairs; i++) {
494 memset(&vsi->rx_rings[i]->stats, 0,
495 sizeof(vsi->rx_rings[i]->stats));
496 memset(&vsi->rx_rings[i]->rx_stats, 0,
497 sizeof(vsi->rx_rings[i]->rx_stats));
498 memset(&vsi->tx_rings[i]->stats, 0,
499 sizeof(vsi->tx_rings[i]->stats));
500 memset(&vsi->tx_rings[i]->tx_stats, 0,
501 sizeof(vsi->tx_rings[i]->tx_stats));
502 }
503 }
504 vsi->stat_offsets_loaded = false;
505}
506
507
508
509
510
511void i40e_pf_reset_stats(struct i40e_pf *pf)
512{
513 int i;
514
515 memset(&pf->stats, 0, sizeof(pf->stats));
516 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
517 pf->stat_offsets_loaded = false;
518
519 for (i = 0; i < I40E_MAX_VEB; i++) {
520 if (pf->veb[i]) {
521 memset(&pf->veb[i]->stats, 0,
522 sizeof(pf->veb[i]->stats));
523 memset(&pf->veb[i]->stats_offsets, 0,
524 sizeof(pf->veb[i]->stats_offsets));
525 pf->veb[i]->stat_offsets_loaded = false;
526 }
527 }
528 pf->hw_csum_rx_error = 0;
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
547 bool offset_loaded, u64 *offset, u64 *stat)
548{
549 u64 new_data;
550
551 if (hw->device_id == I40E_DEV_ID_QEMU) {
552 new_data = rd32(hw, loreg);
553 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
554 } else {
555 new_data = rd64(hw, loreg);
556 }
557 if (!offset_loaded)
558 *offset = new_data;
559 if (likely(new_data >= *offset))
560 *stat = new_data - *offset;
561 else
562 *stat = (new_data + BIT_ULL(48)) - *offset;
563 *stat &= 0xFFFFFFFFFFFFULL;
564}
565
566
567
568
569
570
571
572
573
574static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
575 bool offset_loaded, u64 *offset, u64 *stat)
576{
577 u32 new_data;
578
579 new_data = rd32(hw, reg);
580 if (!offset_loaded)
581 *offset = new_data;
582 if (likely(new_data >= *offset))
583 *stat = (u32)(new_data - *offset);
584 else
585 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
586}
587
588
589
590
591
592void i40e_update_eth_stats(struct i40e_vsi *vsi)
593{
594 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
595 struct i40e_pf *pf = vsi->back;
596 struct i40e_hw *hw = &pf->hw;
597 struct i40e_eth_stats *oes;
598 struct i40e_eth_stats *es;
599
600 es = &vsi->eth_stats;
601 oes = &vsi->eth_stats_offsets;
602
603
604 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
605 vsi->stat_offsets_loaded,
606 &oes->tx_errors, &es->tx_errors);
607 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
608 vsi->stat_offsets_loaded,
609 &oes->rx_discards, &es->rx_discards);
610 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
611 vsi->stat_offsets_loaded,
612 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
613 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
614 vsi->stat_offsets_loaded,
615 &oes->tx_errors, &es->tx_errors);
616
617 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
618 I40E_GLV_GORCL(stat_idx),
619 vsi->stat_offsets_loaded,
620 &oes->rx_bytes, &es->rx_bytes);
621 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
622 I40E_GLV_UPRCL(stat_idx),
623 vsi->stat_offsets_loaded,
624 &oes->rx_unicast, &es->rx_unicast);
625 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
626 I40E_GLV_MPRCL(stat_idx),
627 vsi->stat_offsets_loaded,
628 &oes->rx_multicast, &es->rx_multicast);
629 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
630 I40E_GLV_BPRCL(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->rx_broadcast, &es->rx_broadcast);
633
634 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
635 I40E_GLV_GOTCL(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->tx_bytes, &es->tx_bytes);
638 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
639 I40E_GLV_UPTCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->tx_unicast, &es->tx_unicast);
642 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
643 I40E_GLV_MPTCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->tx_multicast, &es->tx_multicast);
646 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
647 I40E_GLV_BPTCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->tx_broadcast, &es->tx_broadcast);
650 vsi->stat_offsets_loaded = true;
651}
652
653
654
655
656
657static void i40e_update_veb_stats(struct i40e_veb *veb)
658{
659 struct i40e_pf *pf = veb->pf;
660 struct i40e_hw *hw = &pf->hw;
661 struct i40e_eth_stats *oes;
662 struct i40e_eth_stats *es;
663 struct i40e_veb_tc_stats *veb_oes;
664 struct i40e_veb_tc_stats *veb_es;
665 int i, idx = 0;
666
667 idx = veb->stats_idx;
668 es = &veb->stats;
669 oes = &veb->stats_offsets;
670 veb_es = &veb->tc_stats;
671 veb_oes = &veb->tc_stats_offsets;
672
673
674 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
675 veb->stat_offsets_loaded,
676 &oes->tx_discards, &es->tx_discards);
677 if (hw->revision_id > 0)
678 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
679 veb->stat_offsets_loaded,
680 &oes->rx_unknown_protocol,
681 &es->rx_unknown_protocol);
682 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
683 veb->stat_offsets_loaded,
684 &oes->rx_bytes, &es->rx_bytes);
685 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
686 veb->stat_offsets_loaded,
687 &oes->rx_unicast, &es->rx_unicast);
688 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
689 veb->stat_offsets_loaded,
690 &oes->rx_multicast, &es->rx_multicast);
691 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
692 veb->stat_offsets_loaded,
693 &oes->rx_broadcast, &es->rx_broadcast);
694
695 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->tx_bytes, &es->tx_bytes);
698 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
699 veb->stat_offsets_loaded,
700 &oes->tx_unicast, &es->tx_unicast);
701 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
702 veb->stat_offsets_loaded,
703 &oes->tx_multicast, &es->tx_multicast);
704 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
705 veb->stat_offsets_loaded,
706 &oes->tx_broadcast, &es->tx_broadcast);
707 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
708 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
709 I40E_GLVEBTC_RPCL(i, idx),
710 veb->stat_offsets_loaded,
711 &veb_oes->tc_rx_packets[i],
712 &veb_es->tc_rx_packets[i]);
713 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
714 I40E_GLVEBTC_RBCL(i, idx),
715 veb->stat_offsets_loaded,
716 &veb_oes->tc_rx_bytes[i],
717 &veb_es->tc_rx_bytes[i]);
718 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
719 I40E_GLVEBTC_TPCL(i, idx),
720 veb->stat_offsets_loaded,
721 &veb_oes->tc_tx_packets[i],
722 &veb_es->tc_tx_packets[i]);
723 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
724 I40E_GLVEBTC_TBCL(i, idx),
725 veb->stat_offsets_loaded,
726 &veb_oes->tc_tx_bytes[i],
727 &veb_es->tc_tx_bytes[i]);
728 }
729 veb->stat_offsets_loaded = true;
730}
731
732#ifdef I40E_FCOE
733
734
735
736
737static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
738{
739 struct i40e_pf *pf = vsi->back;
740 struct i40e_hw *hw = &pf->hw;
741 struct i40e_fcoe_stats *ofs;
742 struct i40e_fcoe_stats *fs;
743 int idx;
744
745 if (vsi->type != I40E_VSI_FCOE)
746 return;
747
748 idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
749 fs = &vsi->fcoe_stats;
750 ofs = &vsi->fcoe_stats_offsets;
751
752 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
753 vsi->fcoe_stat_offsets_loaded,
754 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
755 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
756 vsi->fcoe_stat_offsets_loaded,
757 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
758 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
759 vsi->fcoe_stat_offsets_loaded,
760 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
761 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
762 vsi->fcoe_stat_offsets_loaded,
763 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
764 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
765 vsi->fcoe_stat_offsets_loaded,
766 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
767 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
768 vsi->fcoe_stat_offsets_loaded,
769 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
770 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
771 vsi->fcoe_stat_offsets_loaded,
772 &ofs->fcoe_last_error, &fs->fcoe_last_error);
773 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
774 vsi->fcoe_stat_offsets_loaded,
775 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
776
777 vsi->fcoe_stat_offsets_loaded = true;
778}
779
780#endif
781
782
783
784
785
786
787
788
789
790
791static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
792{
793 struct i40e_pf *pf = vsi->back;
794 struct rtnl_link_stats64 *ons;
795 struct rtnl_link_stats64 *ns;
796 struct i40e_eth_stats *oes;
797 struct i40e_eth_stats *es;
798 u32 tx_restart, tx_busy;
799 u64 tx_lost_interrupt;
800 struct i40e_ring *p;
801 u32 rx_page, rx_buf;
802 u64 bytes, packets;
803 unsigned int start;
804 u64 tx_linearize;
805 u64 tx_force_wb;
806 u64 rx_p, rx_b;
807 u64 tx_p, tx_b;
808 u16 q;
809
810 if (test_bit(__I40E_DOWN, &vsi->state) ||
811 test_bit(__I40E_CONFIG_BUSY, &pf->state))
812 return;
813
814 ns = i40e_get_vsi_stats_struct(vsi);
815 ons = &vsi->net_stats_offsets;
816 es = &vsi->eth_stats;
817 oes = &vsi->eth_stats_offsets;
818
819
820
821
822 rx_b = rx_p = 0;
823 tx_b = tx_p = 0;
824 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
825 tx_lost_interrupt = 0;
826 rx_page = 0;
827 rx_buf = 0;
828 rcu_read_lock();
829 for (q = 0; q < vsi->num_queue_pairs; q++) {
830
831 p = ACCESS_ONCE(vsi->tx_rings[q]);
832
833 do {
834 start = u64_stats_fetch_begin_irq(&p->syncp);
835 packets = p->stats.packets;
836 bytes = p->stats.bytes;
837 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
838 tx_b += bytes;
839 tx_p += packets;
840 tx_restart += p->tx_stats.restart_queue;
841 tx_busy += p->tx_stats.tx_busy;
842 tx_linearize += p->tx_stats.tx_linearize;
843 tx_force_wb += p->tx_stats.tx_force_wb;
844 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
845
846
847 p = &p[1];
848 do {
849 start = u64_stats_fetch_begin_irq(&p->syncp);
850 packets = p->stats.packets;
851 bytes = p->stats.bytes;
852 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
853 rx_b += bytes;
854 rx_p += packets;
855 rx_buf += p->rx_stats.alloc_buff_failed;
856 rx_page += p->rx_stats.alloc_page_failed;
857 }
858 rcu_read_unlock();
859 vsi->tx_restart = tx_restart;
860 vsi->tx_busy = tx_busy;
861 vsi->tx_linearize = tx_linearize;
862 vsi->tx_force_wb = tx_force_wb;
863 vsi->tx_lost_interrupt = tx_lost_interrupt;
864 vsi->rx_page_failed = rx_page;
865 vsi->rx_buf_failed = rx_buf;
866
867 ns->rx_packets = rx_p;
868 ns->rx_bytes = rx_b;
869 ns->tx_packets = tx_p;
870 ns->tx_bytes = tx_b;
871
872
873 i40e_update_eth_stats(vsi);
874 ons->tx_errors = oes->tx_errors;
875 ns->tx_errors = es->tx_errors;
876 ons->multicast = oes->rx_multicast;
877 ns->multicast = es->rx_multicast;
878 ons->rx_dropped = oes->rx_discards;
879 ns->rx_dropped = es->rx_discards;
880 ons->tx_dropped = oes->tx_discards;
881 ns->tx_dropped = es->tx_discards;
882
883
884 if (vsi == pf->vsi[pf->lan_vsi]) {
885 ns->rx_crc_errors = pf->stats.crc_errors;
886 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
887 ns->rx_length_errors = pf->stats.rx_length_errors;
888 }
889}
890
891
892
893
894
895static void i40e_update_pf_stats(struct i40e_pf *pf)
896{
897 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
898 struct i40e_hw_port_stats *nsd = &pf->stats;
899 struct i40e_hw *hw = &pf->hw;
900 u32 val;
901 int i;
902
903 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
904 I40E_GLPRT_GORCL(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
907 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
908 I40E_GLPRT_GOTCL(hw->port),
909 pf->stat_offsets_loaded,
910 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
911 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.rx_discards,
914 &nsd->eth.rx_discards);
915 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
916 I40E_GLPRT_UPRCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.rx_unicast,
919 &nsd->eth.rx_unicast);
920 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
921 I40E_GLPRT_MPRCL(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->eth.rx_multicast,
924 &nsd->eth.rx_multicast);
925 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
926 I40E_GLPRT_BPRCL(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->eth.rx_broadcast,
929 &nsd->eth.rx_broadcast);
930 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
931 I40E_GLPRT_UPTCL(hw->port),
932 pf->stat_offsets_loaded,
933 &osd->eth.tx_unicast,
934 &nsd->eth.tx_unicast);
935 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
936 I40E_GLPRT_MPTCL(hw->port),
937 pf->stat_offsets_loaded,
938 &osd->eth.tx_multicast,
939 &nsd->eth.tx_multicast);
940 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
941 I40E_GLPRT_BPTCL(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->eth.tx_broadcast,
944 &nsd->eth.tx_broadcast);
945
946 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->tx_dropped_link_down,
949 &nsd->tx_dropped_link_down);
950
951 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->crc_errors, &nsd->crc_errors);
954
955 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
956 pf->stat_offsets_loaded,
957 &osd->illegal_bytes, &nsd->illegal_bytes);
958
959 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
960 pf->stat_offsets_loaded,
961 &osd->mac_local_faults,
962 &nsd->mac_local_faults);
963 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->mac_remote_faults,
966 &nsd->mac_remote_faults);
967
968 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
969 pf->stat_offsets_loaded,
970 &osd->rx_length_errors,
971 &nsd->rx_length_errors);
972
973 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
974 pf->stat_offsets_loaded,
975 &osd->link_xon_rx, &nsd->link_xon_rx);
976 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
977 pf->stat_offsets_loaded,
978 &osd->link_xon_tx, &nsd->link_xon_tx);
979 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->link_xoff_rx, &nsd->link_xoff_rx);
982 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
983 pf->stat_offsets_loaded,
984 &osd->link_xoff_tx, &nsd->link_xoff_tx);
985
986 for (i = 0; i < 8; i++) {
987 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
988 pf->stat_offsets_loaded,
989 &osd->priority_xoff_rx[i],
990 &nsd->priority_xoff_rx[i]);
991 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
992 pf->stat_offsets_loaded,
993 &osd->priority_xon_rx[i],
994 &nsd->priority_xon_rx[i]);
995 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
996 pf->stat_offsets_loaded,
997 &osd->priority_xon_tx[i],
998 &nsd->priority_xon_tx[i]);
999 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1000 pf->stat_offsets_loaded,
1001 &osd->priority_xoff_tx[i],
1002 &nsd->priority_xoff_tx[i]);
1003 i40e_stat_update32(hw,
1004 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1005 pf->stat_offsets_loaded,
1006 &osd->priority_xon_2_xoff[i],
1007 &nsd->priority_xon_2_xoff[i]);
1008 }
1009
1010 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1011 I40E_GLPRT_PRC64L(hw->port),
1012 pf->stat_offsets_loaded,
1013 &osd->rx_size_64, &nsd->rx_size_64);
1014 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1015 I40E_GLPRT_PRC127L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->rx_size_127, &nsd->rx_size_127);
1018 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1019 I40E_GLPRT_PRC255L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->rx_size_255, &nsd->rx_size_255);
1022 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1023 I40E_GLPRT_PRC511L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->rx_size_511, &nsd->rx_size_511);
1026 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1027 I40E_GLPRT_PRC1023L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->rx_size_1023, &nsd->rx_size_1023);
1030 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1031 I40E_GLPRT_PRC1522L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->rx_size_1522, &nsd->rx_size_1522);
1034 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1035 I40E_GLPRT_PRC9522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_size_big, &nsd->rx_size_big);
1038
1039 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1040 I40E_GLPRT_PTC64L(hw->port),
1041 pf->stat_offsets_loaded,
1042 &osd->tx_size_64, &nsd->tx_size_64);
1043 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1044 I40E_GLPRT_PTC127L(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->tx_size_127, &nsd->tx_size_127);
1047 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1048 I40E_GLPRT_PTC255L(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->tx_size_255, &nsd->tx_size_255);
1051 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1052 I40E_GLPRT_PTC511L(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->tx_size_511, &nsd->tx_size_511);
1055 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1056 I40E_GLPRT_PTC1023L(hw->port),
1057 pf->stat_offsets_loaded,
1058 &osd->tx_size_1023, &nsd->tx_size_1023);
1059 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1060 I40E_GLPRT_PTC1522L(hw->port),
1061 pf->stat_offsets_loaded,
1062 &osd->tx_size_1522, &nsd->tx_size_1522);
1063 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1064 I40E_GLPRT_PTC9522L(hw->port),
1065 pf->stat_offsets_loaded,
1066 &osd->tx_size_big, &nsd->tx_size_big);
1067
1068 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1069 pf->stat_offsets_loaded,
1070 &osd->rx_undersize, &nsd->rx_undersize);
1071 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1072 pf->stat_offsets_loaded,
1073 &osd->rx_fragments, &nsd->rx_fragments);
1074 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->rx_oversize, &nsd->rx_oversize);
1077 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->rx_jabber, &nsd->rx_jabber);
1080
1081
1082 i40e_stat_update32(hw,
1083 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1084 pf->stat_offsets_loaded,
1085 &osd->fd_atr_match, &nsd->fd_atr_match);
1086 i40e_stat_update32(hw,
1087 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1088 pf->stat_offsets_loaded,
1089 &osd->fd_sb_match, &nsd->fd_sb_match);
1090 i40e_stat_update32(hw,
1091 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1092 pf->stat_offsets_loaded,
1093 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1094
1095 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 nsd->tx_lpi_status =
1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 nsd->rx_lpi_status =
1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 pf->stat_offsets_loaded,
1104 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 pf->stat_offsets_loaded,
1107 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
1109 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1110 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1111 nsd->fd_sb_status = true;
1112 else
1113 nsd->fd_sb_status = false;
1114
1115 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1116 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1117 nsd->fd_atr_status = true;
1118 else
1119 nsd->fd_atr_status = false;
1120
1121 pf->stat_offsets_loaded = true;
1122}
1123
1124
1125
1126
1127
1128
1129
1130void i40e_update_stats(struct i40e_vsi *vsi)
1131{
1132 struct i40e_pf *pf = vsi->back;
1133
1134 if (vsi == pf->vsi[pf->lan_vsi])
1135 i40e_update_pf_stats(pf);
1136
1137 i40e_update_vsi_stats(vsi);
1138#ifdef I40E_FCOE
1139 i40e_update_fcoe_stats(vsi);
1140#endif
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1154 u8 *macaddr, s16 vlan,
1155 bool is_vf, bool is_netdev)
1156{
1157 struct i40e_mac_filter *f;
1158
1159 if (!vsi || !macaddr)
1160 return NULL;
1161
1162 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1163 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1164 (vlan == f->vlan) &&
1165 (!is_vf || f->is_vf) &&
1166 (!is_netdev || f->is_netdev))
1167 return f;
1168 }
1169 return NULL;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1183 bool is_vf, bool is_netdev)
1184{
1185 struct i40e_mac_filter *f;
1186
1187 if (!vsi || !macaddr)
1188 return NULL;
1189
1190 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1191 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1192 (!is_vf || f->is_vf) &&
1193 (!is_netdev || f->is_netdev))
1194 return f;
1195 }
1196 return NULL;
1197}
1198
1199
1200
1201
1202
1203
1204
1205bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1206{
1207 struct i40e_mac_filter *f;
1208
1209
1210
1211
1212 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1213 if (f->vlan >= 0 || vsi->info.pvid)
1214 return true;
1215 }
1216
1217 return false;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1233 bool is_vf, bool is_netdev)
1234{
1235 struct i40e_mac_filter *f;
1236
1237 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1238 if (vsi->info.pvid)
1239 f->vlan = le16_to_cpu(vsi->info.pvid);
1240 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1241 is_vf, is_netdev)) {
1242 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1243 is_vf, is_netdev))
1244 return NULL;
1245 }
1246 }
1247
1248 return list_first_entry_or_null(&vsi->mac_filter_list,
1249 struct i40e_mac_filter, list);
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1264 bool is_vf, bool is_netdev)
1265{
1266 struct i40e_mac_filter *f = NULL;
1267 int changed = 0;
1268
1269 WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1270 "Missing mac_filter_list_lock\n");
1271 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1272 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1273 (is_vf == f->is_vf) &&
1274 (is_netdev == f->is_netdev)) {
1275 f->counter--;
1276 changed = 1;
1277 if (f->counter == 0)
1278 f->state = I40E_FILTER_REMOVE;
1279 }
1280 }
1281 if (changed) {
1282 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1283 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1284 return 0;
1285 }
1286 return -ENOENT;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1298{
1299 struct i40e_aqc_remove_macvlan_element_data element;
1300 struct i40e_pf *pf = vsi->back;
1301
1302
1303 if (vsi->type != I40E_VSI_MAIN)
1304 return;
1305
1306 memset(&element, 0, sizeof(element));
1307 ether_addr_copy(element.mac_addr, macaddr);
1308 element.vlan_tag = 0;
1309
1310 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1311 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1312
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316
1317 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1318 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1319 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1336 u8 *macaddr, s16 vlan,
1337 bool is_vf, bool is_netdev)
1338{
1339 struct i40e_mac_filter *f;
1340 int changed = false;
1341
1342 if (!vsi || !macaddr)
1343 return NULL;
1344
1345
1346
1347
1348
1349 if (is_broadcast_ether_addr(macaddr))
1350 return NULL;
1351
1352 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1353 if (!f) {
1354 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1355 if (!f)
1356 goto add_filter_out;
1357
1358 ether_addr_copy(f->macaddr, macaddr);
1359 f->vlan = vlan;
1360
1361
1362
1363
1364 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
1365 f->state = I40E_FILTER_FAILED;
1366 else
1367 f->state = I40E_FILTER_NEW;
1368 changed = true;
1369 INIT_LIST_HEAD(&f->list);
1370 list_add_tail(&f->list, &vsi->mac_filter_list);
1371 }
1372
1373
1374 if (is_vf) {
1375 if (!f->is_vf) {
1376 f->is_vf = true;
1377 f->counter++;
1378 }
1379 } else if (is_netdev) {
1380 if (!f->is_netdev) {
1381 f->is_netdev = true;
1382 f->counter++;
1383 }
1384 } else {
1385 f->counter++;
1386 }
1387
1388 if (changed) {
1389 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1390 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1391 }
1392
1393add_filter_out:
1394 return f;
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411void i40e_del_filter(struct i40e_vsi *vsi,
1412 u8 *macaddr, s16 vlan,
1413 bool is_vf, bool is_netdev)
1414{
1415 struct i40e_mac_filter *f;
1416
1417 if (!vsi || !macaddr)
1418 return;
1419
1420 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1421 if (!f || f->counter == 0)
1422 return;
1423
1424 if (is_vf) {
1425 if (f->is_vf) {
1426 f->is_vf = false;
1427 f->counter--;
1428 }
1429 } else if (is_netdev) {
1430 if (f->is_netdev) {
1431 f->is_netdev = false;
1432 f->counter--;
1433 }
1434 } else {
1435
1436 int min_f = 0;
1437
1438 min_f += (f->is_vf ? 1 : 0);
1439 min_f += (f->is_netdev ? 1 : 0);
1440
1441 if (f->counter > min_f)
1442 f->counter--;
1443 }
1444
1445
1446
1447
1448 if (f->counter == 0) {
1449 if ((f->state == I40E_FILTER_FAILED) ||
1450 (f->state == I40E_FILTER_NEW)) {
1451
1452
1453
1454 list_del(&f->list);
1455 kfree(f);
1456 } else {
1457 f->state = I40E_FILTER_REMOVE;
1458 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1459 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1460 }
1461 }
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471#ifdef I40E_FCOE
1472int i40e_set_mac(struct net_device *netdev, void *p)
1473#else
1474static int i40e_set_mac(struct net_device *netdev, void *p)
1475#endif
1476{
1477 struct i40e_netdev_priv *np = netdev_priv(netdev);
1478 struct i40e_vsi *vsi = np->vsi;
1479 struct i40e_pf *pf = vsi->back;
1480 struct i40e_hw *hw = &pf->hw;
1481 struct sockaddr *addr = p;
1482
1483 if (!is_valid_ether_addr(addr->sa_data))
1484 return -EADDRNOTAVAIL;
1485
1486 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1487 netdev_info(netdev, "already using mac address %pM\n",
1488 addr->sa_data);
1489 return 0;
1490 }
1491
1492 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1493 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1494 return -EADDRNOTAVAIL;
1495
1496 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1497 netdev_info(netdev, "returning to hw mac address %pM\n",
1498 hw->mac.addr);
1499 else
1500 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1501
1502 spin_lock_bh(&vsi->mac_filter_list_lock);
1503 i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true);
1504 i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true);
1505 spin_unlock_bh(&vsi->mac_filter_list_lock);
1506 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1507 if (vsi->type == I40E_VSI_MAIN) {
1508 i40e_status ret;
1509
1510 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1511 I40E_AQC_WRITE_TYPE_LAA_WOL,
1512 addr->sa_data, NULL);
1513 if (ret)
1514 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1515 i40e_stat_str(hw, ret),
1516 i40e_aq_str(hw, hw->aq.asq_last_status));
1517 }
1518
1519
1520
1521
1522 i40e_service_event_schedule(vsi->back);
1523 return 0;
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535#ifdef I40E_FCOE
1536void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1537 struct i40e_vsi_context *ctxt,
1538 u8 enabled_tc,
1539 bool is_add)
1540#else
1541static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1542 struct i40e_vsi_context *ctxt,
1543 u8 enabled_tc,
1544 bool is_add)
1545#endif
1546{
1547 struct i40e_pf *pf = vsi->back;
1548 u16 sections = 0;
1549 u8 netdev_tc = 0;
1550 u16 numtc = 0;
1551 u16 qcount;
1552 u8 offset;
1553 u16 qmap;
1554 int i;
1555 u16 num_tc_qps = 0;
1556
1557 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1558 offset = 0;
1559
1560 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1561
1562 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1563 if (enabled_tc & BIT(i))
1564 numtc++;
1565 }
1566 if (!numtc) {
1567 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1568 numtc = 1;
1569 }
1570 } else {
1571
1572 numtc = 1;
1573 }
1574
1575 vsi->tc_config.numtc = numtc;
1576 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1577
1578 qcount = vsi->alloc_queue_pairs;
1579
1580 num_tc_qps = qcount / numtc;
1581 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1582
1583
1584 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1585
1586 if (vsi->tc_config.enabled_tc & BIT(i)) {
1587
1588 int pow, num_qps;
1589
1590 switch (vsi->type) {
1591 case I40E_VSI_MAIN:
1592 qcount = min_t(int, pf->alloc_rss_size,
1593 num_tc_qps);
1594 break;
1595#ifdef I40E_FCOE
1596 case I40E_VSI_FCOE:
1597 qcount = num_tc_qps;
1598 break;
1599#endif
1600 case I40E_VSI_FDIR:
1601 case I40E_VSI_SRIOV:
1602 case I40E_VSI_VMDQ2:
1603 default:
1604 qcount = num_tc_qps;
1605 WARN_ON(i != 0);
1606 break;
1607 }
1608 vsi->tc_config.tc_info[i].qoffset = offset;
1609 vsi->tc_config.tc_info[i].qcount = qcount;
1610
1611
1612 num_qps = qcount;
1613 pow = 0;
1614 while (num_qps && (BIT_ULL(pow) < qcount)) {
1615 pow++;
1616 num_qps >>= 1;
1617 }
1618
1619 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1620 qmap =
1621 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1622 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1623
1624 offset += qcount;
1625 } else {
1626
1627
1628
1629
1630 vsi->tc_config.tc_info[i].qoffset = 0;
1631 vsi->tc_config.tc_info[i].qcount = 1;
1632 vsi->tc_config.tc_info[i].netdev_tc = 0;
1633
1634 qmap = 0;
1635 }
1636 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1637 }
1638
1639
1640 vsi->num_queue_pairs = offset;
1641 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1642 if (vsi->req_queue_pairs > 0)
1643 vsi->num_queue_pairs = vsi->req_queue_pairs;
1644 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1645 vsi->num_queue_pairs = pf->num_lan_msix;
1646 }
1647
1648
1649 if (is_add) {
1650 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1651
1652 ctxt->info.up_enable_bits = enabled_tc;
1653 }
1654 if (vsi->type == I40E_VSI_SRIOV) {
1655 ctxt->info.mapping_flags |=
1656 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1657 for (i = 0; i < vsi->num_queue_pairs; i++)
1658 ctxt->info.queue_mapping[i] =
1659 cpu_to_le16(vsi->base_queue + i);
1660 } else {
1661 ctxt->info.mapping_flags |=
1662 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1663 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1664 }
1665 ctxt->info.valid_sections |= cpu_to_le16(sections);
1666}
1667
1668
1669
1670
1671
1672#ifdef I40E_FCOE
1673void i40e_set_rx_mode(struct net_device *netdev)
1674#else
1675static void i40e_set_rx_mode(struct net_device *netdev)
1676#endif
1677{
1678 struct i40e_netdev_priv *np = netdev_priv(netdev);
1679 struct i40e_mac_filter *f, *ftmp;
1680 struct i40e_vsi *vsi = np->vsi;
1681 struct netdev_hw_addr *uca;
1682 struct netdev_hw_addr *mca;
1683 struct netdev_hw_addr *ha;
1684
1685 spin_lock_bh(&vsi->mac_filter_list_lock);
1686
1687
1688 netdev_for_each_uc_addr(uca, netdev) {
1689 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1690 if (i40e_is_vsi_in_vlan(vsi))
1691 i40e_put_mac_in_vlan(vsi, uca->addr,
1692 false, true);
1693 else
1694 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1695 false, true);
1696 }
1697 }
1698
1699 netdev_for_each_mc_addr(mca, netdev) {
1700 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1701 if (i40e_is_vsi_in_vlan(vsi))
1702 i40e_put_mac_in_vlan(vsi, mca->addr,
1703 false, true);
1704 else
1705 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1706 false, true);
1707 }
1708 }
1709
1710
1711 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1712
1713 if (!f->is_netdev)
1714 continue;
1715
1716 netdev_for_each_mc_addr(mca, netdev)
1717 if (ether_addr_equal(mca->addr, f->macaddr))
1718 goto bottom_of_search_loop;
1719
1720 netdev_for_each_uc_addr(uca, netdev)
1721 if (ether_addr_equal(uca->addr, f->macaddr))
1722 goto bottom_of_search_loop;
1723
1724 for_each_dev_addr(netdev, ha)
1725 if (ether_addr_equal(ha->addr, f->macaddr))
1726 goto bottom_of_search_loop;
1727
1728
1729 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1730
1731bottom_of_search_loop:
1732 continue;
1733 }
1734 spin_unlock_bh(&vsi->mac_filter_list_lock);
1735
1736
1737 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1738 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1739 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1740 }
1741
1742
1743
1744
1745 i40e_service_event_schedule(vsi->back);
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1757 struct list_head *from)
1758{
1759 struct i40e_mac_filter *f, *ftmp;
1760
1761 list_for_each_entry_safe(f, ftmp, from, list) {
1762
1763 list_move_tail(&f->list, &vsi->mac_filter_list);
1764 }
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static int
1779i40e_update_filter_state(int count,
1780 struct i40e_aqc_add_macvlan_element_data *add_list,
1781 struct i40e_mac_filter *add_head, int aq_err)
1782{
1783 int retval = 0;
1784 int i;
1785
1786
1787 if (!aq_err) {
1788 retval = count;
1789
1790 for (i = 0; i < count ; i++) {
1791 add_head->state = I40E_FILTER_ACTIVE;
1792 add_head = list_next_entry(add_head, list);
1793 }
1794 } else if (aq_err == I40E_AQ_RC_ENOSPC) {
1795
1796
1797
1798 for (i = 0; i < count ; i++) {
1799 if (add_list[i].match_method ==
1800 I40E_AQC_MM_ERR_NO_RES) {
1801 add_head->state = I40E_FILTER_FAILED;
1802 } else {
1803 add_head->state = I40E_FILTER_ACTIVE;
1804 retval++;
1805 }
1806 add_head = list_next_entry(add_head, list);
1807 }
1808 } else {
1809
1810 retval = 0;
1811 for (i = 0; i < count ; i++) {
1812 add_head->state = I40E_FILTER_FAILED;
1813 add_head = list_next_entry(add_head, list);
1814 }
1815 }
1816 return retval;
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1828{
1829 struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
1830 struct list_head tmp_add_list, tmp_del_list;
1831 struct i40e_hw *hw = &vsi->back->hw;
1832 bool promisc_changed = false;
1833 char vsi_name[16] = "PF";
1834 int filter_list_len = 0;
1835 u32 changed_flags = 0;
1836 i40e_status aq_ret = 0;
1837 int retval = 0;
1838 struct i40e_pf *pf;
1839 int num_add = 0;
1840 int num_del = 0;
1841 int aq_err = 0;
1842 u16 cmd_flags;
1843 int list_size;
1844 int fcnt;
1845
1846
1847 struct i40e_aqc_add_macvlan_element_data *add_list;
1848 struct i40e_aqc_remove_macvlan_element_data *del_list;
1849
1850 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1851 usleep_range(1000, 2000);
1852 pf = vsi->back;
1853
1854 if (vsi->netdev) {
1855 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1856 vsi->current_netdev_flags = vsi->netdev->flags;
1857 }
1858
1859 INIT_LIST_HEAD(&tmp_add_list);
1860 INIT_LIST_HEAD(&tmp_del_list);
1861
1862 if (vsi->type == I40E_VSI_SRIOV)
1863 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1864 else if (vsi->type != I40E_VSI_MAIN)
1865 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1866
1867 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1868 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1869
1870 spin_lock_bh(&vsi->mac_filter_list_lock);
1871
1872 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1873 if (f->state == I40E_FILTER_REMOVE) {
1874 WARN_ON(f->counter != 0);
1875
1876 list_move_tail(&f->list, &tmp_del_list);
1877 vsi->active_filters--;
1878 }
1879 if (f->state == I40E_FILTER_NEW) {
1880 WARN_ON(f->counter == 0);
1881
1882 list_move_tail(&f->list, &tmp_add_list);
1883 }
1884 }
1885 spin_unlock_bh(&vsi->mac_filter_list_lock);
1886 }
1887
1888
1889 if (!list_empty(&tmp_del_list)) {
1890 filter_list_len = hw->aq.asq_buf_size /
1891 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1892 list_size = filter_list_len *
1893 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1894 del_list = kzalloc(list_size, GFP_ATOMIC);
1895 if (!del_list) {
1896
1897 spin_lock_bh(&vsi->mac_filter_list_lock);
1898 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1899 spin_unlock_bh(&vsi->mac_filter_list_lock);
1900 retval = -ENOMEM;
1901 goto out;
1902 }
1903
1904 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
1905 cmd_flags = 0;
1906
1907
1908 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1909 if (f->vlan == I40E_VLAN_ANY) {
1910 del_list[num_del].vlan_tag = 0;
1911 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1912 } else {
1913 del_list[num_del].vlan_tag =
1914 cpu_to_le16((u16)(f->vlan));
1915 }
1916
1917 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1918 del_list[num_del].flags = cmd_flags;
1919 num_del++;
1920
1921
1922 if (num_del == filter_list_len) {
1923 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
1924 del_list,
1925 num_del, NULL);
1926 aq_err = hw->aq.asq_last_status;
1927 num_del = 0;
1928 memset(del_list, 0, list_size);
1929
1930
1931
1932
1933 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1934 retval = -EIO;
1935 dev_info(&pf->pdev->dev,
1936 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1937 vsi_name,
1938 i40e_stat_str(hw, aq_ret),
1939 i40e_aq_str(hw, aq_err));
1940 }
1941 }
1942
1943
1944
1945 list_del(&f->list);
1946 kfree(f);
1947 }
1948
1949 if (num_del) {
1950 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1951 num_del, NULL);
1952 aq_err = hw->aq.asq_last_status;
1953 num_del = 0;
1954
1955
1956
1957
1958 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1959 retval = -EIO;
1960 dev_info(&pf->pdev->dev,
1961 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1962 vsi_name,
1963 i40e_stat_str(hw, aq_ret),
1964 i40e_aq_str(hw, aq_err));
1965 }
1966 }
1967
1968 kfree(del_list);
1969 del_list = NULL;
1970 }
1971
1972 if (!list_empty(&tmp_add_list)) {
1973
1974 filter_list_len = hw->aq.asq_buf_size /
1975 sizeof(struct i40e_aqc_add_macvlan_element_data);
1976 list_size = filter_list_len *
1977 sizeof(struct i40e_aqc_add_macvlan_element_data);
1978 add_list = kzalloc(list_size, GFP_ATOMIC);
1979 if (!add_list) {
1980 retval = -ENOMEM;
1981 goto out;
1982 }
1983 num_add = 0;
1984 list_for_each_entry(f, &tmp_add_list, list) {
1985 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1986 &vsi->state)) {
1987 f->state = I40E_FILTER_FAILED;
1988 continue;
1989 }
1990
1991 if (num_add == 0)
1992 add_head = f;
1993 cmd_flags = 0;
1994 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1995 if (f->vlan == I40E_VLAN_ANY) {
1996 add_list[num_add].vlan_tag = 0;
1997 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1998 } else {
1999 add_list[num_add].vlan_tag =
2000 cpu_to_le16((u16)(f->vlan));
2001 }
2002 add_list[num_add].queue_number = 0;
2003 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2004 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2005 num_add++;
2006
2007
2008 if (num_add == filter_list_len) {
2009 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2010 add_list, num_add,
2011 NULL);
2012 aq_err = hw->aq.asq_last_status;
2013 fcnt = i40e_update_filter_state(num_add,
2014 add_list,
2015 add_head,
2016 aq_ret);
2017 vsi->active_filters += fcnt;
2018
2019 if (fcnt != num_add) {
2020 promisc_changed = true;
2021 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2022 &vsi->state);
2023 vsi->promisc_threshold =
2024 (vsi->active_filters * 3) / 4;
2025 dev_warn(&pf->pdev->dev,
2026 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2027 i40e_aq_str(hw, aq_err),
2028 vsi_name);
2029 }
2030 memset(add_list, 0, list_size);
2031 num_add = 0;
2032 }
2033 }
2034 if (num_add) {
2035 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2036 add_list, num_add, NULL);
2037 aq_err = hw->aq.asq_last_status;
2038 fcnt = i40e_update_filter_state(num_add, add_list,
2039 add_head, aq_ret);
2040 vsi->active_filters += fcnt;
2041 if (fcnt != num_add) {
2042 promisc_changed = true;
2043 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2044 &vsi->state);
2045 vsi->promisc_threshold =
2046 (vsi->active_filters * 3) / 4;
2047 dev_warn(&pf->pdev->dev,
2048 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2049 i40e_aq_str(hw, aq_err), vsi_name);
2050 }
2051 }
2052
2053
2054
2055 spin_lock_bh(&vsi->mac_filter_list_lock);
2056 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
2057 list_move_tail(&f->list, &vsi->mac_filter_list);
2058 }
2059 spin_unlock_bh(&vsi->mac_filter_list_lock);
2060 kfree(add_list);
2061 add_list = NULL;
2062 }
2063
2064
2065 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
2066 (vsi->active_filters < vsi->promisc_threshold)) {
2067 int failed_count = 0;
2068
2069
2070
2071 spin_lock_bh(&vsi->mac_filter_list_lock);
2072 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2073 if (f->state == I40E_FILTER_FAILED)
2074 failed_count++;
2075 }
2076 spin_unlock_bh(&vsi->mac_filter_list_lock);
2077 if (!failed_count) {
2078 dev_info(&pf->pdev->dev,
2079 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2080 vsi_name);
2081 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2082 promisc_changed = true;
2083 vsi->promisc_threshold = 0;
2084 }
2085 }
2086
2087
2088 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2089 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2090 goto out;
2091 }
2092
2093
2094 if (changed_flags & IFF_ALLMULTI) {
2095 bool cur_multipromisc;
2096
2097 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2098 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2099 vsi->seid,
2100 cur_multipromisc,
2101 NULL);
2102 if (aq_ret) {
2103 retval = i40e_aq_rc_to_posix(aq_ret,
2104 hw->aq.asq_last_status);
2105 dev_info(&pf->pdev->dev,
2106 "set multi promisc failed on %s, err %s aq_err %s\n",
2107 vsi_name,
2108 i40e_stat_str(hw, aq_ret),
2109 i40e_aq_str(hw, hw->aq.asq_last_status));
2110 }
2111 }
2112 if ((changed_flags & IFF_PROMISC) ||
2113 (promisc_changed &&
2114 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
2115 bool cur_promisc;
2116
2117 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2118 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2119 &vsi->state));
2120 if ((vsi->type == I40E_VSI_MAIN) &&
2121 (pf->lan_veb != I40E_NO_VEB) &&
2122 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2123
2124
2125
2126
2127
2128 if (pf->cur_promisc != cur_promisc) {
2129 pf->cur_promisc = cur_promisc;
2130 if (cur_promisc)
2131 aq_ret =
2132 i40e_aq_set_default_vsi(hw,
2133 vsi->seid,
2134 NULL);
2135 else
2136 aq_ret =
2137 i40e_aq_clear_default_vsi(hw,
2138 vsi->seid,
2139 NULL);
2140 if (aq_ret) {
2141 retval = i40e_aq_rc_to_posix(aq_ret,
2142 hw->aq.asq_last_status);
2143 dev_info(&pf->pdev->dev,
2144 "Set default VSI failed on %s, err %s, aq_err %s\n",
2145 vsi_name,
2146 i40e_stat_str(hw, aq_ret),
2147 i40e_aq_str(hw,
2148 hw->aq.asq_last_status));
2149 }
2150 }
2151 } else {
2152 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2153 hw,
2154 vsi->seid,
2155 cur_promisc, NULL,
2156 true);
2157 if (aq_ret) {
2158 retval =
2159 i40e_aq_rc_to_posix(aq_ret,
2160 hw->aq.asq_last_status);
2161 dev_info(&pf->pdev->dev,
2162 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2163 vsi_name,
2164 i40e_stat_str(hw, aq_ret),
2165 i40e_aq_str(hw,
2166 hw->aq.asq_last_status));
2167 }
2168 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2169 hw,
2170 vsi->seid,
2171 cur_promisc, NULL);
2172 if (aq_ret) {
2173 retval =
2174 i40e_aq_rc_to_posix(aq_ret,
2175 hw->aq.asq_last_status);
2176 dev_info(&pf->pdev->dev,
2177 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2178 vsi_name,
2179 i40e_stat_str(hw, aq_ret),
2180 i40e_aq_str(hw,
2181 hw->aq.asq_last_status));
2182 }
2183 }
2184 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2185 vsi->seid,
2186 cur_promisc, NULL);
2187 if (aq_ret) {
2188 retval = i40e_aq_rc_to_posix(aq_ret,
2189 pf->hw.aq.asq_last_status);
2190 dev_info(&pf->pdev->dev,
2191 "set brdcast promisc failed, err %s, aq_err %s\n",
2192 i40e_stat_str(hw, aq_ret),
2193 i40e_aq_str(hw,
2194 hw->aq.asq_last_status));
2195 }
2196 }
2197out:
2198
2199 if (retval)
2200 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2201
2202 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2203 return retval;
2204}
2205
2206
2207
2208
2209
2210static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2211{
2212 int v;
2213
2214 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2215 return;
2216 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2217
2218 for (v = 0; v < pf->num_alloc_vsi; v++) {
2219 if (pf->vsi[v] &&
2220 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2221 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2222
2223 if (ret) {
2224
2225 pf->flags |= I40E_FLAG_FILTER_SYNC;
2226 break;
2227 }
2228 }
2229 }
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2240{
2241 struct i40e_netdev_priv *np = netdev_priv(netdev);
2242 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2243 struct i40e_vsi *vsi = np->vsi;
2244
2245
2246 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2247 return -EINVAL;
2248
2249 netdev_info(netdev, "changing MTU from %d to %d\n",
2250 netdev->mtu, new_mtu);
2251 netdev->mtu = new_mtu;
2252 if (netif_running(netdev))
2253 i40e_vsi_reinit_locked(vsi);
2254 i40e_notify_client_of_l2_param_changes(vsi);
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2265{
2266 struct i40e_netdev_priv *np = netdev_priv(netdev);
2267 struct i40e_pf *pf = np->vsi->back;
2268
2269 switch (cmd) {
2270 case SIOCGHWTSTAMP:
2271 return i40e_ptp_get_ts_config(pf, ifr);
2272 case SIOCSHWTSTAMP:
2273 return i40e_ptp_set_ts_config(pf, ifr);
2274 default:
2275 return -EOPNOTSUPP;
2276 }
2277}
2278
2279
2280
2281
2282
2283void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2284{
2285 struct i40e_vsi_context ctxt;
2286 i40e_status ret;
2287
2288 if ((vsi->info.valid_sections &
2289 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2290 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2291 return;
2292
2293 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2294 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2295 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2296
2297 ctxt.seid = vsi->seid;
2298 ctxt.info = vsi->info;
2299 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2300 if (ret) {
2301 dev_info(&vsi->back->pdev->dev,
2302 "update vlan stripping failed, err %s aq_err %s\n",
2303 i40e_stat_str(&vsi->back->hw, ret),
2304 i40e_aq_str(&vsi->back->hw,
2305 vsi->back->hw.aq.asq_last_status));
2306 }
2307}
2308
2309
2310
2311
2312
2313void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2314{
2315 struct i40e_vsi_context ctxt;
2316 i40e_status ret;
2317
2318 if ((vsi->info.valid_sections &
2319 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2320 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2321 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2322 return;
2323
2324 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2325 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2326 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2327
2328 ctxt.seid = vsi->seid;
2329 ctxt.info = vsi->info;
2330 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2331 if (ret) {
2332 dev_info(&vsi->back->pdev->dev,
2333 "update vlan stripping failed, err %s aq_err %s\n",
2334 i40e_stat_str(&vsi->back->hw, ret),
2335 i40e_aq_str(&vsi->back->hw,
2336 vsi->back->hw.aq.asq_last_status));
2337 }
2338}
2339
2340
2341
2342
2343
2344
2345static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2346{
2347 struct i40e_netdev_priv *np = netdev_priv(netdev);
2348 struct i40e_vsi *vsi = np->vsi;
2349
2350 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2351 i40e_vlan_stripping_enable(vsi);
2352 else
2353 i40e_vlan_stripping_disable(vsi);
2354}
2355
2356
2357
2358
2359
2360
2361int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2362{
2363 struct i40e_mac_filter *f, *ftmp, *add_f;
2364 bool is_netdev, is_vf;
2365
2366 is_vf = (vsi->type == I40E_VSI_SRIOV);
2367 is_netdev = !!(vsi->netdev);
2368
2369
2370 spin_lock_bh(&vsi->mac_filter_list_lock);
2371
2372 if (is_netdev) {
2373 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2374 is_vf, is_netdev);
2375 if (!add_f) {
2376 dev_info(&vsi->back->pdev->dev,
2377 "Could not add vlan filter %d for %pM\n",
2378 vid, vsi->netdev->dev_addr);
2379 spin_unlock_bh(&vsi->mac_filter_list_lock);
2380 return -ENOMEM;
2381 }
2382 }
2383
2384 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2385 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2386 if (!add_f) {
2387 dev_info(&vsi->back->pdev->dev,
2388 "Could not add vlan filter %d for %pM\n",
2389 vid, f->macaddr);
2390 spin_unlock_bh(&vsi->mac_filter_list_lock);
2391 return -ENOMEM;
2392 }
2393 }
2394
2395
2396
2397
2398
2399
2400 if (vid > 0) {
2401 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2402 I40E_VLAN_ANY,
2403 is_vf, is_netdev)) {
2404 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2405 I40E_VLAN_ANY, is_vf, is_netdev);
2406 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2407 is_vf, is_netdev);
2408 if (!add_f) {
2409 dev_info(&vsi->back->pdev->dev,
2410 "Could not add filter 0 for %pM\n",
2411 vsi->netdev->dev_addr);
2412 spin_unlock_bh(&vsi->mac_filter_list_lock);
2413 return -ENOMEM;
2414 }
2415 }
2416 }
2417
2418
2419 if (vid > 0 && !vsi->info.pvid) {
2420 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2421 if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2422 is_vf, is_netdev))
2423 continue;
2424 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2425 is_vf, is_netdev);
2426 add_f = i40e_add_filter(vsi, f->macaddr,
2427 0, is_vf, is_netdev);
2428 if (!add_f) {
2429 dev_info(&vsi->back->pdev->dev,
2430 "Could not add filter 0 for %pM\n",
2431 f->macaddr);
2432 spin_unlock_bh(&vsi->mac_filter_list_lock);
2433 return -ENOMEM;
2434 }
2435 }
2436 }
2437
2438 spin_unlock_bh(&vsi->mac_filter_list_lock);
2439
2440
2441
2442
2443 i40e_service_event_schedule(vsi->back);
2444 return 0;
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2455{
2456 struct net_device *netdev = vsi->netdev;
2457 struct i40e_mac_filter *f, *ftmp, *add_f;
2458 bool is_vf, is_netdev;
2459 int filter_count = 0;
2460
2461 is_vf = (vsi->type == I40E_VSI_SRIOV);
2462 is_netdev = !!(netdev);
2463
2464
2465 spin_lock_bh(&vsi->mac_filter_list_lock);
2466
2467 if (is_netdev)
2468 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2469
2470 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
2471 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2472
2473
2474
2475
2476
2477
2478 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2479 if (is_netdev) {
2480 if (f->vlan &&
2481 ether_addr_equal(netdev->dev_addr, f->macaddr))
2482 filter_count++;
2483 }
2484
2485 if (f->vlan)
2486 filter_count++;
2487 }
2488
2489 if (!filter_count && is_netdev) {
2490 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2491 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2492 is_vf, is_netdev);
2493 if (!f) {
2494 dev_info(&vsi->back->pdev->dev,
2495 "Could not add filter %d for %pM\n",
2496 I40E_VLAN_ANY, netdev->dev_addr);
2497 spin_unlock_bh(&vsi->mac_filter_list_lock);
2498 return -ENOMEM;
2499 }
2500 }
2501
2502 if (!filter_count) {
2503 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2504 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2505 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2506 is_vf, is_netdev);
2507 if (!add_f) {
2508 dev_info(&vsi->back->pdev->dev,
2509 "Could not add filter %d for %pM\n",
2510 I40E_VLAN_ANY, f->macaddr);
2511 spin_unlock_bh(&vsi->mac_filter_list_lock);
2512 return -ENOMEM;
2513 }
2514 }
2515 }
2516
2517 spin_unlock_bh(&vsi->mac_filter_list_lock);
2518
2519
2520
2521
2522 i40e_service_event_schedule(vsi->back);
2523 return 0;
2524}
2525
2526
2527
2528
2529
2530
2531
2532
2533#ifdef I40E_FCOE
2534int i40e_vlan_rx_add_vid(struct net_device *netdev,
2535 __always_unused __be16 proto, u16 vid)
2536#else
2537static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2538 __always_unused __be16 proto, u16 vid)
2539#endif
2540{
2541 struct i40e_netdev_priv *np = netdev_priv(netdev);
2542 struct i40e_vsi *vsi = np->vsi;
2543 int ret = 0;
2544
2545 if (vid > 4095)
2546 return -EINVAL;
2547
2548
2549
2550
2551
2552
2553
2554 if (vid)
2555 ret = i40e_vsi_add_vlan(vsi, vid);
2556
2557 if (!ret && (vid < VLAN_N_VID))
2558 set_bit(vid, vsi->active_vlans);
2559
2560 return ret;
2561}
2562
2563
2564
2565
2566
2567
2568
2569
2570#ifdef I40E_FCOE
2571int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2572 __always_unused __be16 proto, u16 vid)
2573#else
2574static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2575 __always_unused __be16 proto, u16 vid)
2576#endif
2577{
2578 struct i40e_netdev_priv *np = netdev_priv(netdev);
2579 struct i40e_vsi *vsi = np->vsi;
2580
2581
2582
2583
2584
2585 i40e_vsi_kill_vlan(vsi, vid);
2586
2587 clear_bit(vid, vsi->active_vlans);
2588
2589 return 0;
2590}
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2603{
2604 int ret;
2605 struct i40e_aqc_add_macvlan_element_data element;
2606
2607 ret = i40e_aq_mac_address_write(&vsi->back->hw,
2608 I40E_AQC_WRITE_TYPE_LAA_WOL,
2609 macaddr, NULL);
2610 if (ret) {
2611 dev_info(&vsi->back->pdev->dev,
2612 "Addr change for VSI failed: %d\n", ret);
2613 return -EADDRNOTAVAIL;
2614 }
2615
2616 memset(&element, 0, sizeof(element));
2617 ether_addr_copy(element.mac_addr, macaddr);
2618 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2619 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2620 if (ret) {
2621 dev_info(&vsi->back->pdev->dev,
2622 "add filter failed err %s aq_err %s\n",
2623 i40e_stat_str(&vsi->back->hw, ret),
2624 i40e_aq_str(&vsi->back->hw,
2625 vsi->back->hw.aq.asq_last_status));
2626 }
2627 return ret;
2628}
2629
2630
2631
2632
2633
2634static void i40e_restore_vlan(struct i40e_vsi *vsi)
2635{
2636 u16 vid;
2637
2638 if (!vsi->netdev)
2639 return;
2640
2641 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2642
2643 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2644 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2645 vid);
2646}
2647
2648
2649
2650
2651
2652
2653int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2654{
2655 struct i40e_vsi_context ctxt;
2656 i40e_status ret;
2657
2658 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2659 vsi->info.pvid = cpu_to_le16(vid);
2660 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2661 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2662 I40E_AQ_VSI_PVLAN_EMOD_STR;
2663
2664 ctxt.seid = vsi->seid;
2665 ctxt.info = vsi->info;
2666 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2667 if (ret) {
2668 dev_info(&vsi->back->pdev->dev,
2669 "add pvid failed, err %s aq_err %s\n",
2670 i40e_stat_str(&vsi->back->hw, ret),
2671 i40e_aq_str(&vsi->back->hw,
2672 vsi->back->hw.aq.asq_last_status));
2673 return -ENOENT;
2674 }
2675
2676 return 0;
2677}
2678
2679
2680
2681
2682
2683
2684
2685void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2686{
2687 i40e_vlan_stripping_disable(vsi);
2688
2689 vsi->info.pvid = 0;
2690}
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2703{
2704 int i, err = 0;
2705
2706 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2707 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2708
2709 return err;
2710}
2711
2712
2713
2714
2715
2716
2717
2718static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2719{
2720 int i;
2721
2722 if (!vsi->tx_rings)
2723 return;
2724
2725 for (i = 0; i < vsi->num_queue_pairs; i++)
2726 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2727 i40e_free_tx_resources(vsi->tx_rings[i]);
2728}
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2741{
2742 int i, err = 0;
2743
2744 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2745 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2746#ifdef I40E_FCOE
2747 i40e_fcoe_setup_ddp_resources(vsi);
2748#endif
2749 return err;
2750}
2751
2752
2753
2754
2755
2756
2757
2758static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2759{
2760 int i;
2761
2762 if (!vsi->rx_rings)
2763 return;
2764
2765 for (i = 0; i < vsi->num_queue_pairs; i++)
2766 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2767 i40e_free_rx_resources(vsi->rx_rings[i]);
2768#ifdef I40E_FCOE
2769 i40e_fcoe_free_ddp_resources(vsi);
2770#endif
2771}
2772
2773
2774
2775
2776
2777
2778
2779
2780static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2781{
2782 struct i40e_vsi *vsi = ring->vsi;
2783 cpumask_var_t mask;
2784
2785 if (!ring->q_vector || !ring->netdev)
2786 return;
2787
2788
2789 if (vsi->tc_config.numtc <= 1) {
2790 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2791 netif_set_xps_queue(ring->netdev,
2792 &ring->q_vector->affinity_mask,
2793 ring->queue_index);
2794 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2795
2796 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2797 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2798 free_cpumask_var(mask);
2799 }
2800
2801
2802
2803
2804 i40e_service_event_schedule(vsi->back);
2805}
2806
2807
2808
2809
2810
2811
2812
2813static int i40e_configure_tx_ring(struct i40e_ring *ring)
2814{
2815 struct i40e_vsi *vsi = ring->vsi;
2816 u16 pf_q = vsi->base_queue + ring->queue_index;
2817 struct i40e_hw *hw = &vsi->back->hw;
2818 struct i40e_hmc_obj_txq tx_ctx;
2819 i40e_status err = 0;
2820 u32 qtx_ctl = 0;
2821
2822
2823 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2824 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2825 ring->atr_count = 0;
2826 } else {
2827 ring->atr_sample_rate = 0;
2828 }
2829
2830
2831 i40e_config_xps_tx_ring(ring);
2832
2833
2834 memset(&tx_ctx, 0, sizeof(tx_ctx));
2835
2836 tx_ctx.new_context = 1;
2837 tx_ctx.base = (ring->dma / 128);
2838 tx_ctx.qlen = ring->count;
2839 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2840 I40E_FLAG_FD_ATR_ENABLED));
2841#ifdef I40E_FCOE
2842 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2843#endif
2844 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2845
2846 if (vsi->type != I40E_VSI_FDIR)
2847 tx_ctx.head_wb_ena = 1;
2848 tx_ctx.head_wb_addr = ring->dma +
2849 (ring->count * sizeof(struct i40e_tx_desc));
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2862 tx_ctx.rdylist_act = 0;
2863
2864
2865 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2866 if (err) {
2867 dev_info(&vsi->back->pdev->dev,
2868 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2869 ring->queue_index, pf_q, err);
2870 return -ENOMEM;
2871 }
2872
2873
2874 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2875 if (err) {
2876 dev_info(&vsi->back->pdev->dev,
2877 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2878 ring->queue_index, pf_q, err);
2879 return -ENOMEM;
2880 }
2881
2882
2883 if (vsi->type == I40E_VSI_VMDQ2) {
2884 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2885 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2886 I40E_QTX_CTL_VFVM_INDX_MASK;
2887 } else {
2888 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2889 }
2890
2891 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2892 I40E_QTX_CTL_PF_INDX_MASK);
2893 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2894 i40e_flush(hw);
2895
2896
2897 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2898
2899 return 0;
2900}
2901
2902
2903
2904
2905
2906
2907
2908static int i40e_configure_rx_ring(struct i40e_ring *ring)
2909{
2910 struct i40e_vsi *vsi = ring->vsi;
2911 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2912 u16 pf_q = vsi->base_queue + ring->queue_index;
2913 struct i40e_hw *hw = &vsi->back->hw;
2914 struct i40e_hmc_obj_rxq rx_ctx;
2915 i40e_status err = 0;
2916
2917 ring->state = 0;
2918
2919
2920 memset(&rx_ctx, 0, sizeof(rx_ctx));
2921
2922 ring->rx_buf_len = vsi->rx_buf_len;
2923
2924 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2925
2926 rx_ctx.base = (ring->dma / 128);
2927 rx_ctx.qlen = ring->count;
2928
2929
2930 rx_ctx.dsize = 1;
2931
2932
2933
2934
2935 rx_ctx.hsplit_0 = 0;
2936
2937 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
2938 if (hw->revision_id == 0)
2939 rx_ctx.lrxqthresh = 0;
2940 else
2941 rx_ctx.lrxqthresh = 2;
2942 rx_ctx.crcstrip = 1;
2943 rx_ctx.l2tsel = 1;
2944
2945 rx_ctx.showiv = 0;
2946#ifdef I40E_FCOE
2947 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2948#endif
2949
2950 rx_ctx.prefena = 1;
2951
2952
2953 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2954 if (err) {
2955 dev_info(&vsi->back->pdev->dev,
2956 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2957 ring->queue_index, pf_q, err);
2958 return -ENOMEM;
2959 }
2960
2961
2962 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2963 if (err) {
2964 dev_info(&vsi->back->pdev->dev,
2965 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2966 ring->queue_index, pf_q, err);
2967 return -ENOMEM;
2968 }
2969
2970
2971 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2972 writel(0, ring->tail);
2973
2974 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2975
2976 return 0;
2977}
2978
2979
2980
2981
2982
2983
2984
2985static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2986{
2987 int err = 0;
2988 u16 i;
2989
2990 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2991 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2992
2993 return err;
2994}
2995
2996
2997
2998
2999
3000
3001
3002static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3003{
3004 int err = 0;
3005 u16 i;
3006
3007 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
3008 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
3009 + ETH_FCS_LEN + VLAN_HLEN;
3010 else
3011 vsi->max_frame = I40E_RXBUFFER_2048;
3012
3013 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3014
3015#ifdef I40E_FCOE
3016
3017 if ((vsi->type == I40E_VSI_FCOE) &&
3018 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
3019 vsi->rx_buf_len = I40E_RXBUFFER_3072;
3020 vsi->max_frame = I40E_RXBUFFER_3072;
3021 }
3022
3023#endif
3024
3025 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
3026 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3027
3028
3029 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3030 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3031
3032 return err;
3033}
3034
3035
3036
3037
3038
3039static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3040{
3041 struct i40e_ring *tx_ring, *rx_ring;
3042 u16 qoffset, qcount;
3043 int i, n;
3044
3045 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3046
3047 for (i = 0; i < vsi->num_queue_pairs; i++) {
3048 rx_ring = vsi->rx_rings[i];
3049 tx_ring = vsi->tx_rings[i];
3050 rx_ring->dcb_tc = 0;
3051 tx_ring->dcb_tc = 0;
3052 }
3053 }
3054
3055 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3056 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3057 continue;
3058
3059 qoffset = vsi->tc_config.tc_info[n].qoffset;
3060 qcount = vsi->tc_config.tc_info[n].qcount;
3061 for (i = qoffset; i < (qoffset + qcount); i++) {
3062 rx_ring = vsi->rx_rings[i];
3063 tx_ring = vsi->tx_rings[i];
3064 rx_ring->dcb_tc = n;
3065 tx_ring->dcb_tc = n;
3066 }
3067 }
3068}
3069
3070
3071
3072
3073
3074static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3075{
3076 struct i40e_pf *pf = vsi->back;
3077 int err;
3078
3079 if (vsi->netdev)
3080 i40e_set_rx_mode(vsi->netdev);
3081
3082 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
3083 err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
3084 if (err) {
3085 dev_warn(&pf->pdev->dev,
3086 "could not set up macaddr; err %d\n", err);
3087 }
3088 }
3089}
3090
3091
3092
3093
3094
3095
3096
3097
3098static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3099{
3100 struct i40e_fdir_filter *filter;
3101 struct i40e_pf *pf = vsi->back;
3102 struct hlist_node *node;
3103
3104 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3105 return;
3106
3107 hlist_for_each_entry_safe(filter, node,
3108 &pf->fdir_filter_list, fdir_node) {
3109 i40e_add_del_fdir(vsi, filter, true);
3110 }
3111}
3112
3113
3114
3115
3116
3117static int i40e_vsi_configure(struct i40e_vsi *vsi)
3118{
3119 int err;
3120
3121 i40e_set_vsi_rx_mode(vsi);
3122 i40e_restore_vlan(vsi);
3123 i40e_vsi_config_dcb_rings(vsi);
3124 err = i40e_vsi_configure_tx(vsi);
3125 if (!err)
3126 err = i40e_vsi_configure_rx(vsi);
3127
3128 return err;
3129}
3130
3131
3132
3133
3134
3135static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3136{
3137 struct i40e_pf *pf = vsi->back;
3138 struct i40e_hw *hw = &pf->hw;
3139 u16 vector;
3140 int i, q;
3141 u32 qp;
3142
3143
3144
3145
3146
3147 qp = vsi->base_queue;
3148 vector = vsi->base_vector;
3149 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3150 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3151
3152 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3153 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3154 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3155 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3156 q_vector->rx.itr);
3157 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3158 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3159 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3160 q_vector->tx.itr);
3161 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3162 INTRL_USEC_TO_REG(vsi->int_rate_limit));
3163
3164
3165 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3166 for (q = 0; q < q_vector->num_ringpairs; q++) {
3167 u32 val;
3168
3169 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3170 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3171 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3172 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3173 (I40E_QUEUE_TYPE_TX
3174 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3175
3176 wr32(hw, I40E_QINT_RQCTL(qp), val);
3177
3178 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3179 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3180 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3181 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3182 (I40E_QUEUE_TYPE_RX
3183 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3184
3185
3186 if (q == (q_vector->num_ringpairs - 1))
3187 val |= (I40E_QUEUE_END_OF_LIST
3188 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3189
3190 wr32(hw, I40E_QINT_TQCTL(qp), val);
3191 qp++;
3192 }
3193 }
3194
3195 i40e_flush(hw);
3196}
3197
3198
3199
3200
3201
3202static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3203{
3204 struct i40e_hw *hw = &pf->hw;
3205 u32 val;
3206
3207
3208 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3209 rd32(hw, I40E_PFINT_ICR0);
3210
3211 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3212 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3213 I40E_PFINT_ICR0_ENA_GRST_MASK |
3214 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3215 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3216 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3217 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3218 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3219
3220 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3221 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3222
3223 if (pf->flags & I40E_FLAG_PTP)
3224 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3225
3226 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3227
3228
3229 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3230 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3231
3232
3233 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3234}
3235
3236
3237
3238
3239
3240static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3241{
3242 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3243 struct i40e_pf *pf = vsi->back;
3244 struct i40e_hw *hw = &pf->hw;
3245 u32 val;
3246
3247
3248 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3249 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3250 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3251 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3252 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3253 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3254 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3255
3256 i40e_enable_misc_int_causes(pf);
3257
3258
3259 wr32(hw, I40E_PFINT_LNKLST0, 0);
3260
3261
3262 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3263 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3264 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3265
3266 wr32(hw, I40E_QINT_RQCTL(0), val);
3267
3268 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3269 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3270 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3271
3272 wr32(hw, I40E_QINT_TQCTL(0), val);
3273 i40e_flush(hw);
3274}
3275
3276
3277
3278
3279
3280void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3281{
3282 struct i40e_hw *hw = &pf->hw;
3283
3284 wr32(hw, I40E_PFINT_DYN_CTL0,
3285 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3286 i40e_flush(hw);
3287}
3288
3289
3290
3291
3292
3293
3294void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3295{
3296 struct i40e_hw *hw = &pf->hw;
3297 u32 val;
3298
3299 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3300 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3301 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3302
3303 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3304 i40e_flush(hw);
3305}
3306
3307
3308
3309
3310
3311
3312static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3313{
3314 struct i40e_q_vector *q_vector = data;
3315
3316 if (!q_vector->tx.ring && !q_vector->rx.ring)
3317 return IRQ_HANDLED;
3318
3319 napi_schedule_irqoff(&q_vector->napi);
3320
3321 return IRQ_HANDLED;
3322}
3323
3324
3325
3326
3327
3328
3329
3330
3331static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3332{
3333 int q_vectors = vsi->num_q_vectors;
3334 struct i40e_pf *pf = vsi->back;
3335 int base = vsi->base_vector;
3336 int rx_int_idx = 0;
3337 int tx_int_idx = 0;
3338 int vector, err;
3339
3340 for (vector = 0; vector < q_vectors; vector++) {
3341 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3342
3343 if (q_vector->tx.ring && q_vector->rx.ring) {
3344 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3345 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3346 tx_int_idx++;
3347 } else if (q_vector->rx.ring) {
3348 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3349 "%s-%s-%d", basename, "rx", rx_int_idx++);
3350 } else if (q_vector->tx.ring) {
3351 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3352 "%s-%s-%d", basename, "tx", tx_int_idx++);
3353 } else {
3354
3355 continue;
3356 }
3357 err = request_irq(pf->msix_entries[base + vector].vector,
3358 vsi->irq_handler,
3359 0,
3360 q_vector->name,
3361 q_vector);
3362 if (err) {
3363 dev_info(&pf->pdev->dev,
3364 "MSIX request_irq failed, error: %d\n", err);
3365 goto free_queue_irqs;
3366 }
3367
3368 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3369 &q_vector->affinity_mask);
3370 }
3371
3372 vsi->irqs_ready = true;
3373 return 0;
3374
3375free_queue_irqs:
3376 while (vector) {
3377 vector--;
3378 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3379 NULL);
3380 free_irq(pf->msix_entries[base + vector].vector,
3381 &(vsi->q_vectors[vector]));
3382 }
3383 return err;
3384}
3385
3386
3387
3388
3389
3390static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3391{
3392 struct i40e_pf *pf = vsi->back;
3393 struct i40e_hw *hw = &pf->hw;
3394 int base = vsi->base_vector;
3395 int i;
3396
3397 for (i = 0; i < vsi->num_queue_pairs; i++) {
3398 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3399 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3400 }
3401
3402 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3403 for (i = vsi->base_vector;
3404 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3405 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3406
3407 i40e_flush(hw);
3408 for (i = 0; i < vsi->num_q_vectors; i++)
3409 synchronize_irq(pf->msix_entries[i + base].vector);
3410 } else {
3411
3412 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3413 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3414 i40e_flush(hw);
3415 synchronize_irq(pf->pdev->irq);
3416 }
3417}
3418
3419
3420
3421
3422
3423static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3424{
3425 struct i40e_pf *pf = vsi->back;
3426 int i;
3427
3428 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3429 for (i = 0; i < vsi->num_q_vectors; i++)
3430 i40e_irq_dynamic_enable(vsi, i);
3431 } else {
3432 i40e_irq_dynamic_enable_icr0(pf, true);
3433 }
3434
3435 i40e_flush(&pf->hw);
3436 return 0;
3437}
3438
3439
3440
3441
3442
3443static void i40e_stop_misc_vector(struct i40e_pf *pf)
3444{
3445
3446 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3447 i40e_flush(&pf->hw);
3448}
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459static irqreturn_t i40e_intr(int irq, void *data)
3460{
3461 struct i40e_pf *pf = (struct i40e_pf *)data;
3462 struct i40e_hw *hw = &pf->hw;
3463 irqreturn_t ret = IRQ_NONE;
3464 u32 icr0, icr0_remaining;
3465 u32 val, ena_mask;
3466
3467 icr0 = rd32(hw, I40E_PFINT_ICR0);
3468 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3469
3470
3471 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3472 goto enable_intr;
3473
3474
3475 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3476 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3477 pf->sw_int_count++;
3478
3479 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3480 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3481 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3482 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3483 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3484 }
3485
3486
3487 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3488 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3489 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3490
3491
3492
3493
3494
3495
3496
3497 if (!test_bit(__I40E_DOWN, &pf->state))
3498 napi_schedule_irqoff(&q_vector->napi);
3499 }
3500
3501 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3502 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3503 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3504 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3505 }
3506
3507 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3508 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3509 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3510 }
3511
3512 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3513 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3514 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3515 }
3516
3517 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3518 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3519 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3520 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3521 val = rd32(hw, I40E_GLGEN_RSTAT);
3522 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3523 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3524 if (val == I40E_RESET_CORER) {
3525 pf->corer_count++;
3526 } else if (val == I40E_RESET_GLOBR) {
3527 pf->globr_count++;
3528 } else if (val == I40E_RESET_EMPR) {
3529 pf->empr_count++;
3530 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3531 }
3532 }
3533
3534 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3535 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3536 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3537 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3538 rd32(hw, I40E_PFHMC_ERRORINFO),
3539 rd32(hw, I40E_PFHMC_ERRORDATA));
3540 }
3541
3542 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3543 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3544
3545 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3546 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3547 i40e_ptp_tx_hwtstamp(pf);
3548 }
3549 }
3550
3551
3552
3553
3554
3555 icr0_remaining = icr0 & ena_mask;
3556 if (icr0_remaining) {
3557 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3558 icr0_remaining);
3559 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3560 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3561 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3562 dev_info(&pf->pdev->dev, "device will be reset\n");
3563 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3564 i40e_service_event_schedule(pf);
3565 }
3566 ena_mask &= ~icr0_remaining;
3567 }
3568 ret = IRQ_HANDLED;
3569
3570enable_intr:
3571
3572 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3573 if (!test_bit(__I40E_DOWN, &pf->state)) {
3574 i40e_service_event_schedule(pf);
3575 i40e_irq_dynamic_enable_icr0(pf, false);
3576 }
3577
3578 return ret;
3579}
3580
3581
3582
3583
3584
3585
3586
3587
3588static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3589{
3590 struct i40e_vsi *vsi = tx_ring->vsi;
3591 u16 i = tx_ring->next_to_clean;
3592 struct i40e_tx_buffer *tx_buf;
3593 struct i40e_tx_desc *tx_desc;
3594
3595 tx_buf = &tx_ring->tx_bi[i];
3596 tx_desc = I40E_TX_DESC(tx_ring, i);
3597 i -= tx_ring->count;
3598
3599 do {
3600 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3601
3602
3603 if (!eop_desc)
3604 break;
3605
3606
3607 read_barrier_depends();
3608
3609
3610 if (!(eop_desc->cmd_type_offset_bsz &
3611 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3612 break;
3613
3614
3615 tx_buf->next_to_watch = NULL;
3616
3617 tx_desc->buffer_addr = 0;
3618 tx_desc->cmd_type_offset_bsz = 0;
3619
3620 tx_buf++;
3621 tx_desc++;
3622 i++;
3623 if (unlikely(!i)) {
3624 i -= tx_ring->count;
3625 tx_buf = tx_ring->tx_bi;
3626 tx_desc = I40E_TX_DESC(tx_ring, 0);
3627 }
3628
3629 dma_unmap_single(tx_ring->dev,
3630 dma_unmap_addr(tx_buf, dma),
3631 dma_unmap_len(tx_buf, len),
3632 DMA_TO_DEVICE);
3633 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3634 kfree(tx_buf->raw_buf);
3635
3636 tx_buf->raw_buf = NULL;
3637 tx_buf->tx_flags = 0;
3638 tx_buf->next_to_watch = NULL;
3639 dma_unmap_len_set(tx_buf, len, 0);
3640 tx_desc->buffer_addr = 0;
3641 tx_desc->cmd_type_offset_bsz = 0;
3642
3643
3644 tx_buf++;
3645 tx_desc++;
3646 i++;
3647 if (unlikely(!i)) {
3648 i -= tx_ring->count;
3649 tx_buf = tx_ring->tx_bi;
3650 tx_desc = I40E_TX_DESC(tx_ring, 0);
3651 }
3652
3653
3654 budget--;
3655 } while (likely(budget));
3656
3657 i += tx_ring->count;
3658 tx_ring->next_to_clean = i;
3659
3660 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3661 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3662
3663 return budget > 0;
3664}
3665
3666
3667
3668
3669
3670
3671static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3672{
3673 struct i40e_q_vector *q_vector = data;
3674 struct i40e_vsi *vsi;
3675
3676 if (!q_vector->tx.ring)
3677 return IRQ_HANDLED;
3678
3679 vsi = q_vector->tx.ring->vsi;
3680 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3681
3682 return IRQ_HANDLED;
3683}
3684
3685
3686
3687
3688
3689
3690
3691static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3692{
3693 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3694 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3695 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3696
3697 tx_ring->q_vector = q_vector;
3698 tx_ring->next = q_vector->tx.ring;
3699 q_vector->tx.ring = tx_ring;
3700 q_vector->tx.count++;
3701
3702 rx_ring->q_vector = q_vector;
3703 rx_ring->next = q_vector->rx.ring;
3704 q_vector->rx.ring = rx_ring;
3705 q_vector->rx.count++;
3706}
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3718{
3719 int qp_remaining = vsi->num_queue_pairs;
3720 int q_vectors = vsi->num_q_vectors;
3721 int num_ringpairs;
3722 int v_start = 0;
3723 int qp_idx = 0;
3724
3725
3726
3727
3728
3729
3730
3731
3732 for (; v_start < q_vectors; v_start++) {
3733 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3734
3735 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3736
3737 q_vector->num_ringpairs = num_ringpairs;
3738
3739 q_vector->rx.count = 0;
3740 q_vector->tx.count = 0;
3741 q_vector->rx.ring = NULL;
3742 q_vector->tx.ring = NULL;
3743
3744 while (num_ringpairs--) {
3745 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3746 qp_idx++;
3747 qp_remaining--;
3748 }
3749 }
3750}
3751
3752
3753
3754
3755
3756
3757static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3758{
3759 struct i40e_pf *pf = vsi->back;
3760 int err;
3761
3762 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3763 err = i40e_vsi_request_irq_msix(vsi, basename);
3764 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3765 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3766 pf->int_name, pf);
3767 else
3768 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3769 pf->int_name, pf);
3770
3771 if (err)
3772 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3773
3774 return err;
3775}
3776
3777#ifdef CONFIG_NET_POLL_CONTROLLER
3778
3779
3780
3781
3782
3783
3784
3785#ifdef I40E_FCOE
3786void i40e_netpoll(struct net_device *netdev)
3787#else
3788static void i40e_netpoll(struct net_device *netdev)
3789#endif
3790{
3791 struct i40e_netdev_priv *np = netdev_priv(netdev);
3792 struct i40e_vsi *vsi = np->vsi;
3793 struct i40e_pf *pf = vsi->back;
3794 int i;
3795
3796
3797 if (test_bit(__I40E_DOWN, &vsi->state))
3798 return;
3799
3800 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3801 for (i = 0; i < vsi->num_q_vectors; i++)
3802 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3803 } else {
3804 i40e_intr(pf->pdev->irq, netdev);
3805 }
3806}
3807#endif
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3821{
3822 int i;
3823 u32 tx_reg;
3824
3825 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3826 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3827 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3828 break;
3829
3830 usleep_range(10, 20);
3831 }
3832 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3833 return -ETIMEDOUT;
3834
3835 return 0;
3836}
3837
3838
3839
3840
3841
3842
3843static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3844{
3845 struct i40e_pf *pf = vsi->back;
3846 struct i40e_hw *hw = &pf->hw;
3847 int i, j, pf_q, ret = 0;
3848 u32 tx_reg;
3849
3850 pf_q = vsi->base_queue;
3851 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3852
3853
3854 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3855 if (!enable)
3856 usleep_range(10, 20);
3857
3858 for (j = 0; j < 50; j++) {
3859 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3860 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3861 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3862 break;
3863 usleep_range(1000, 2000);
3864 }
3865
3866 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3867 continue;
3868
3869
3870 if (enable) {
3871 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3872 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3873 } else {
3874 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3875 }
3876
3877 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3878
3879 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3880 continue;
3881
3882
3883 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3884 if (ret) {
3885 dev_info(&pf->pdev->dev,
3886 "VSI seid %d Tx ring %d %sable timeout\n",
3887 vsi->seid, pf_q, (enable ? "en" : "dis"));
3888 break;
3889 }
3890 }
3891
3892 if (hw->revision_id == 0)
3893 mdelay(50);
3894 return ret;
3895}
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3909{
3910 int i;
3911 u32 rx_reg;
3912
3913 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3914 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3915 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3916 break;
3917
3918 usleep_range(10, 20);
3919 }
3920 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3921 return -ETIMEDOUT;
3922
3923 return 0;
3924}
3925
3926
3927
3928
3929
3930
3931static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3932{
3933 struct i40e_pf *pf = vsi->back;
3934 struct i40e_hw *hw = &pf->hw;
3935 int i, j, pf_q, ret = 0;
3936 u32 rx_reg;
3937
3938 pf_q = vsi->base_queue;
3939 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3940 for (j = 0; j < 50; j++) {
3941 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3942 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3943 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3944 break;
3945 usleep_range(1000, 2000);
3946 }
3947
3948
3949 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3950 continue;
3951
3952
3953 if (enable)
3954 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3955 else
3956 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3957 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3958
3959 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3960 continue;
3961
3962
3963 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3964 if (ret) {
3965 dev_info(&pf->pdev->dev,
3966 "VSI seid %d Rx ring %d %sable timeout\n",
3967 vsi->seid, pf_q, (enable ? "en" : "dis"));
3968 break;
3969 }
3970 }
3971
3972 return ret;
3973}
3974
3975
3976
3977
3978
3979
3980int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3981{
3982 int ret = 0;
3983
3984
3985 if (request) {
3986 ret = i40e_vsi_control_rx(vsi, request);
3987 if (ret)
3988 return ret;
3989 ret = i40e_vsi_control_tx(vsi, request);
3990 } else {
3991
3992 i40e_vsi_control_tx(vsi, request);
3993 i40e_vsi_control_rx(vsi, request);
3994 }
3995
3996 return ret;
3997}
3998
3999
4000
4001
4002
4003static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4004{
4005 struct i40e_pf *pf = vsi->back;
4006 struct i40e_hw *hw = &pf->hw;
4007 int base = vsi->base_vector;
4008 u32 val, qp;
4009 int i;
4010
4011 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4012 if (!vsi->q_vectors)
4013 return;
4014
4015 if (!vsi->irqs_ready)
4016 return;
4017
4018 vsi->irqs_ready = false;
4019 for (i = 0; i < vsi->num_q_vectors; i++) {
4020 u16 vector = i + base;
4021
4022
4023 if (!vsi->q_vectors[i] ||
4024 !vsi->q_vectors[i]->num_ringpairs)
4025 continue;
4026
4027
4028 irq_set_affinity_hint(pf->msix_entries[vector].vector,
4029 NULL);
4030 synchronize_irq(pf->msix_entries[vector].vector);
4031 free_irq(pf->msix_entries[vector].vector,
4032 vsi->q_vectors[i]);
4033
4034
4035
4036
4037
4038
4039
4040
4041 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4042 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4043 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4044 val |= I40E_QUEUE_END_OF_LIST
4045 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4046 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4047
4048 while (qp != I40E_QUEUE_END_OF_LIST) {
4049 u32 next;
4050
4051 val = rd32(hw, I40E_QINT_RQCTL(qp));
4052
4053 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4054 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4055 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4056 I40E_QINT_RQCTL_INTEVENT_MASK);
4057
4058 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4059 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4060
4061 wr32(hw, I40E_QINT_RQCTL(qp), val);
4062
4063 val = rd32(hw, I40E_QINT_TQCTL(qp));
4064
4065 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4066 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4067
4068 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4069 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4070 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4071 I40E_QINT_TQCTL_INTEVENT_MASK);
4072
4073 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4074 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4075
4076 wr32(hw, I40E_QINT_TQCTL(qp), val);
4077 qp = next;
4078 }
4079 }
4080 } else {
4081 free_irq(pf->pdev->irq, pf);
4082
4083 val = rd32(hw, I40E_PFINT_LNKLST0);
4084 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4085 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4086 val |= I40E_QUEUE_END_OF_LIST
4087 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4088 wr32(hw, I40E_PFINT_LNKLST0, val);
4089
4090 val = rd32(hw, I40E_QINT_RQCTL(qp));
4091 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4092 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4093 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4094 I40E_QINT_RQCTL_INTEVENT_MASK);
4095
4096 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4097 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4098
4099 wr32(hw, I40E_QINT_RQCTL(qp), val);
4100
4101 val = rd32(hw, I40E_QINT_TQCTL(qp));
4102
4103 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4104 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4105 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4106 I40E_QINT_TQCTL_INTEVENT_MASK);
4107
4108 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4109 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4110
4111 wr32(hw, I40E_QINT_TQCTL(qp), val);
4112 }
4113}
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4125{
4126 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4127 struct i40e_ring *ring;
4128
4129 if (!q_vector)
4130 return;
4131
4132
4133 i40e_for_each_ring(ring, q_vector->tx)
4134 ring->q_vector = NULL;
4135
4136 i40e_for_each_ring(ring, q_vector->rx)
4137 ring->q_vector = NULL;
4138
4139
4140 if (vsi->netdev)
4141 netif_napi_del(&q_vector->napi);
4142
4143 vsi->q_vectors[v_idx] = NULL;
4144
4145 kfree_rcu(q_vector, rcu);
4146}
4147
4148
4149
4150
4151
4152
4153
4154
4155static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4156{
4157 int v_idx;
4158
4159 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4160 i40e_free_q_vector(vsi, v_idx);
4161}
4162
4163
4164
4165
4166
4167static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4168{
4169
4170 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4171 pci_disable_msix(pf->pdev);
4172 kfree(pf->msix_entries);
4173 pf->msix_entries = NULL;
4174 kfree(pf->irq_pile);
4175 pf->irq_pile = NULL;
4176 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4177 pci_disable_msi(pf->pdev);
4178 }
4179 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4180}
4181
4182
4183
4184
4185
4186
4187
4188
4189static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4190{
4191 int i;
4192
4193 i40e_stop_misc_vector(pf);
4194 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4195 synchronize_irq(pf->msix_entries[0].vector);
4196 free_irq(pf->msix_entries[0].vector, pf);
4197 }
4198
4199 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4200 I40E_IWARP_IRQ_PILE_ID);
4201
4202 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4203 for (i = 0; i < pf->num_alloc_vsi; i++)
4204 if (pf->vsi[i])
4205 i40e_vsi_free_q_vectors(pf->vsi[i]);
4206 i40e_reset_interrupt_capability(pf);
4207}
4208
4209
4210
4211
4212
4213static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4214{
4215 int q_idx;
4216
4217 if (!vsi->netdev)
4218 return;
4219
4220 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4221 napi_enable(&vsi->q_vectors[q_idx]->napi);
4222}
4223
4224
4225
4226
4227
4228static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4229{
4230 int q_idx;
4231
4232 if (!vsi->netdev)
4233 return;
4234
4235 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4236 napi_disable(&vsi->q_vectors[q_idx]->napi);
4237}
4238
4239
4240
4241
4242
4243static void i40e_vsi_close(struct i40e_vsi *vsi)
4244{
4245 bool reset = false;
4246
4247 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4248 i40e_down(vsi);
4249 i40e_vsi_free_irq(vsi);
4250 i40e_vsi_free_tx_resources(vsi);
4251 i40e_vsi_free_rx_resources(vsi);
4252 vsi->current_netdev_flags = 0;
4253 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4254 reset = true;
4255 i40e_notify_client_of_netdev_close(vsi, reset);
4256}
4257
4258
4259
4260
4261
4262static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4263{
4264 if (test_bit(__I40E_DOWN, &vsi->state))
4265 return;
4266
4267
4268 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4269 vsi->type == I40E_VSI_FCOE) {
4270 dev_dbg(&vsi->back->pdev->dev,
4271 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4272 return;
4273 }
4274
4275 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4276 if (vsi->netdev && netif_running(vsi->netdev))
4277 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4278 else
4279 i40e_vsi_close(vsi);
4280}
4281
4282
4283
4284
4285
4286static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4287{
4288 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4289 return;
4290
4291 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4292 if (vsi->netdev && netif_running(vsi->netdev))
4293 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4294 else
4295 i40e_vsi_open(vsi);
4296}
4297
4298
4299
4300
4301
4302static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4303{
4304 int v;
4305
4306 for (v = 0; v < pf->num_alloc_vsi; v++) {
4307 if (pf->vsi[v])
4308 i40e_quiesce_vsi(pf->vsi[v]);
4309 }
4310}
4311
4312
4313
4314
4315
4316static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4317{
4318 int v;
4319
4320 for (v = 0; v < pf->num_alloc_vsi; v++) {
4321 if (pf->vsi[v])
4322 i40e_unquiesce_vsi(pf->vsi[v]);
4323 }
4324}
4325
4326#ifdef CONFIG_I40E_DCB
4327
4328
4329
4330
4331
4332
4333static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4334{
4335 struct i40e_pf *pf = vsi->back;
4336 int i, pf_q, ret;
4337
4338 pf_q = vsi->base_queue;
4339 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4340
4341 ret = i40e_pf_txq_wait(pf, pf_q, false);
4342 if (ret) {
4343 dev_info(&pf->pdev->dev,
4344 "VSI seid %d Tx ring %d disable timeout\n",
4345 vsi->seid, pf_q);
4346 return ret;
4347 }
4348 }
4349
4350 pf_q = vsi->base_queue;
4351 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4352
4353 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4354 if (ret) {
4355 dev_info(&pf->pdev->dev,
4356 "VSI seid %d Rx ring %d disable timeout\n",
4357 vsi->seid, pf_q);
4358 return ret;
4359 }
4360 }
4361
4362 return 0;
4363}
4364
4365
4366
4367
4368
4369
4370
4371
4372static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4373{
4374 int v, ret = 0;
4375
4376 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4377
4378 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4379 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4380 if (ret)
4381 break;
4382 }
4383 }
4384
4385 return ret;
4386}
4387
4388#endif
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4402{
4403 struct i40e_ring *tx_ring = NULL;
4404 struct i40e_pf *pf;
4405 u32 head, val, tx_pending_hw;
4406 int i;
4407
4408 pf = vsi->back;
4409
4410
4411 for (i = 0; i < vsi->num_queue_pairs; i++) {
4412 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4413 if (q_idx == vsi->tx_rings[i]->queue_index) {
4414 tx_ring = vsi->tx_rings[i];
4415 break;
4416 }
4417 }
4418 }
4419
4420 if (!tx_ring)
4421 return;
4422
4423
4424 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4425 val = rd32(&pf->hw,
4426 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4427 tx_ring->vsi->base_vector - 1));
4428 else
4429 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4430
4431 head = i40e_get_head(tx_ring);
4432
4433 tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
4434
4435
4436
4437
4438
4439
4440
4441 if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4442
4443 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4444 &tx_ring->q_vector->hung_detected)) {
4445 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4446 vsi->seid, q_idx, tx_pending_hw,
4447 tx_ring->next_to_clean, head,
4448 tx_ring->next_to_use,
4449 readl(tx_ring->tail));
4450 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4451 vsi->seid, q_idx, val);
4452 i40e_force_wb(vsi, tx_ring->q_vector);
4453 } else {
4454
4455 set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4456 &tx_ring->q_vector->hung_detected);
4457 }
4458 }
4459
4460
4461
4462
4463
4464
4465 if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
4466 (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4467 if (napi_reschedule(&tx_ring->q_vector->napi))
4468 tx_ring->tx_stats.tx_lost_interrupt++;
4469 }
4470}
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480static void i40e_detect_recover_hung(struct i40e_pf *pf)
4481{
4482 struct net_device *netdev;
4483 struct i40e_vsi *vsi;
4484 int i;
4485
4486
4487 vsi = pf->vsi[pf->lan_vsi];
4488
4489 if (!vsi)
4490 return;
4491
4492
4493 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4494 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4495 return;
4496
4497
4498 if (vsi->type != I40E_VSI_MAIN)
4499 return;
4500
4501 netdev = vsi->netdev;
4502 if (!netdev)
4503 return;
4504
4505
4506 if (!netif_carrier_ok(netdev))
4507 return;
4508
4509
4510 for (i = 0; i < netdev->num_tx_queues; i++) {
4511 struct netdev_queue *q;
4512
4513 q = netdev_get_tx_queue(netdev, i);
4514 if (q)
4515 i40e_detect_recover_hung_queue(i, vsi);
4516 }
4517}
4518
4519
4520
4521
4522
4523
4524
4525
4526static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4527{
4528 struct i40e_dcb_app_priority_table app;
4529 struct i40e_hw *hw = &pf->hw;
4530 u8 enabled_tc = 1;
4531 u8 tc, i;
4532
4533 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4534
4535 for (i = 0; i < dcbcfg->numapps; i++) {
4536 app = dcbcfg->app[i];
4537 if (app.selector == I40E_APP_SEL_TCPIP &&
4538 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4539 tc = dcbcfg->etscfg.prioritytable[app.priority];
4540 enabled_tc |= BIT(tc);
4541 break;
4542 }
4543 }
4544
4545 return enabled_tc;
4546}
4547
4548
4549
4550
4551
4552
4553
4554static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4555{
4556 int i, tc_unused = 0;
4557 u8 num_tc = 0;
4558 u8 ret = 0;
4559
4560
4561
4562
4563
4564 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4565 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4566
4567
4568
4569
4570 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4571 if (num_tc & BIT(i)) {
4572 if (!tc_unused) {
4573 ret++;
4574 } else {
4575 pr_err("Non-contiguous TC - Disabling DCB\n");
4576 return 1;
4577 }
4578 } else {
4579 tc_unused = 1;
4580 }
4581 }
4582
4583
4584 if (!ret)
4585 ret = 1;
4586
4587 return ret;
4588}
4589
4590
4591
4592
4593
4594
4595
4596
4597static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4598{
4599 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4600 u8 enabled_tc = 1;
4601 u8 i;
4602
4603 for (i = 0; i < num_tc; i++)
4604 enabled_tc |= BIT(i);
4605
4606 return enabled_tc;
4607}
4608
4609
4610
4611
4612
4613
4614
4615static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4616{
4617 struct i40e_hw *hw = &pf->hw;
4618 u8 i, enabled_tc = 1;
4619 u8 num_tc = 0;
4620 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4621
4622
4623 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4624 return 1;
4625
4626
4627 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4628 return i40e_dcb_get_num_tc(dcbcfg);
4629
4630
4631 if (pf->hw.func_caps.iscsi)
4632 enabled_tc = i40e_get_iscsi_tc_map(pf);
4633 else
4634 return 1;
4635
4636 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4637 if (enabled_tc & BIT(i))
4638 num_tc++;
4639 }
4640 return num_tc;
4641}
4642
4643
4644
4645
4646
4647
4648
4649static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4650{
4651
4652 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4653 return I40E_DEFAULT_TRAFFIC_CLASS;
4654
4655
4656 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4657 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4658
4659
4660 if (pf->hw.func_caps.iscsi)
4661 return i40e_get_iscsi_tc_map(pf);
4662 else
4663 return I40E_DEFAULT_TRAFFIC_CLASS;
4664}
4665
4666
4667
4668
4669
4670
4671
4672static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4673{
4674 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4675 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4676 struct i40e_pf *pf = vsi->back;
4677 struct i40e_hw *hw = &pf->hw;
4678 i40e_status ret;
4679 u32 tc_bw_max;
4680 int i;
4681
4682
4683 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4684 if (ret) {
4685 dev_info(&pf->pdev->dev,
4686 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4687 i40e_stat_str(&pf->hw, ret),
4688 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4689 return -EINVAL;
4690 }
4691
4692
4693 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4694 NULL);
4695 if (ret) {
4696 dev_info(&pf->pdev->dev,
4697 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4698 i40e_stat_str(&pf->hw, ret),
4699 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4700 return -EINVAL;
4701 }
4702
4703 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4704 dev_info(&pf->pdev->dev,
4705 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4706 bw_config.tc_valid_bits,
4707 bw_ets_config.tc_valid_bits);
4708
4709 }
4710
4711 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4712 vsi->bw_max_quanta = bw_config.max_bw;
4713 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4714 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4715 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4716 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4717 vsi->bw_ets_limit_credits[i] =
4718 le16_to_cpu(bw_ets_config.credits[i]);
4719
4720 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4721 }
4722
4723 return 0;
4724}
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4735 u8 *bw_share)
4736{
4737 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4738 i40e_status ret;
4739 int i;
4740
4741 bw_data.tc_valid_bits = enabled_tc;
4742 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4743 bw_data.tc_bw_credits[i] = bw_share[i];
4744
4745 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4746 NULL);
4747 if (ret) {
4748 dev_info(&vsi->back->pdev->dev,
4749 "AQ command Config VSI BW allocation per TC failed = %d\n",
4750 vsi->back->hw.aq.asq_last_status);
4751 return -EINVAL;
4752 }
4753
4754 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4755 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4756
4757 return 0;
4758}
4759
4760
4761
4762
4763
4764
4765
4766static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4767{
4768 struct net_device *netdev = vsi->netdev;
4769 struct i40e_pf *pf = vsi->back;
4770 struct i40e_hw *hw = &pf->hw;
4771 u8 netdev_tc = 0;
4772 int i;
4773 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4774
4775 if (!netdev)
4776 return;
4777
4778 if (!enabled_tc) {
4779 netdev_reset_tc(netdev);
4780 return;
4781 }
4782
4783
4784 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4785 return;
4786
4787
4788 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4789
4790
4791
4792
4793
4794
4795
4796 if (vsi->tc_config.enabled_tc & BIT(i))
4797 netdev_set_tc_queue(netdev,
4798 vsi->tc_config.tc_info[i].netdev_tc,
4799 vsi->tc_config.tc_info[i].qcount,
4800 vsi->tc_config.tc_info[i].qoffset);
4801 }
4802
4803
4804 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4805
4806 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4807
4808 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4809 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4810 }
4811}
4812
4813
4814
4815
4816
4817
4818static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4819 struct i40e_vsi_context *ctxt)
4820{
4821
4822
4823
4824
4825 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4826 memcpy(&vsi->info.queue_mapping,
4827 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4828 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4829 sizeof(vsi->info.tc_mapping));
4830}
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4846{
4847 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4848 struct i40e_vsi_context ctxt;
4849 int ret = 0;
4850 int i;
4851
4852
4853 if (vsi->tc_config.enabled_tc == enabled_tc)
4854 return ret;
4855
4856
4857 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4858 if (enabled_tc & BIT(i))
4859 bw_share[i] = 1;
4860 }
4861
4862 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4863 if (ret) {
4864 dev_info(&vsi->back->pdev->dev,
4865 "Failed configuring TC map %d for VSI %d\n",
4866 enabled_tc, vsi->seid);
4867 goto out;
4868 }
4869
4870
4871 ctxt.seid = vsi->seid;
4872 ctxt.pf_num = vsi->back->hw.pf_id;
4873 ctxt.vf_num = 0;
4874 ctxt.uplink_seid = vsi->uplink_seid;
4875 ctxt.info = vsi->info;
4876 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4877
4878 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4879 ctxt.info.valid_sections |=
4880 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4881 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4882 }
4883
4884
4885 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4886 if (ret) {
4887 dev_info(&vsi->back->pdev->dev,
4888 "Update vsi tc config failed, err %s aq_err %s\n",
4889 i40e_stat_str(&vsi->back->hw, ret),
4890 i40e_aq_str(&vsi->back->hw,
4891 vsi->back->hw.aq.asq_last_status));
4892 goto out;
4893 }
4894
4895 i40e_vsi_update_queue_map(vsi, &ctxt);
4896 vsi->info.valid_sections = 0;
4897
4898
4899 ret = i40e_vsi_get_bw_info(vsi);
4900 if (ret) {
4901 dev_info(&vsi->back->pdev->dev,
4902 "Failed updating vsi bw info, err %s aq_err %s\n",
4903 i40e_stat_str(&vsi->back->hw, ret),
4904 i40e_aq_str(&vsi->back->hw,
4905 vsi->back->hw.aq.asq_last_status));
4906 goto out;
4907 }
4908
4909
4910 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4911out:
4912 return ret;
4913}
4914
4915
4916
4917
4918
4919
4920
4921
4922int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4923{
4924 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4925 struct i40e_pf *pf = veb->pf;
4926 int ret = 0;
4927 int i;
4928
4929
4930 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4931 return ret;
4932
4933 bw_data.tc_valid_bits = enabled_tc;
4934
4935
4936
4937 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4938 if (enabled_tc & BIT(i))
4939 bw_data.tc_bw_share_credits[i] = 1;
4940 }
4941
4942 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4943 &bw_data, NULL);
4944 if (ret) {
4945 dev_info(&pf->pdev->dev,
4946 "VEB bw config failed, err %s aq_err %s\n",
4947 i40e_stat_str(&pf->hw, ret),
4948 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4949 goto out;
4950 }
4951
4952
4953 ret = i40e_veb_get_bw_info(veb);
4954 if (ret) {
4955 dev_info(&pf->pdev->dev,
4956 "Failed getting veb bw config, err %s aq_err %s\n",
4957 i40e_stat_str(&pf->hw, ret),
4958 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4959 }
4960
4961out:
4962 return ret;
4963}
4964
4965#ifdef CONFIG_I40E_DCB
4966
4967
4968
4969
4970
4971
4972
4973
4974static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4975{
4976 u8 tc_map = 0;
4977 int ret;
4978 u8 v;
4979
4980
4981 tc_map = i40e_pf_get_tc_map(pf);
4982 for (v = 0; v < I40E_MAX_VEB; v++) {
4983 if (!pf->veb[v])
4984 continue;
4985 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4986 if (ret) {
4987 dev_info(&pf->pdev->dev,
4988 "Failed configuring TC for VEB seid=%d\n",
4989 pf->veb[v]->seid);
4990
4991 }
4992 }
4993
4994
4995 for (v = 0; v < pf->num_alloc_vsi; v++) {
4996 if (!pf->vsi[v])
4997 continue;
4998
4999
5000
5001
5002
5003
5004
5005
5006 if (v == pf->lan_vsi)
5007 tc_map = i40e_pf_get_tc_map(pf);
5008 else
5009 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
5010#ifdef I40E_FCOE
5011 if (pf->vsi[v]->type == I40E_VSI_FCOE)
5012 tc_map = i40e_get_fcoe_tc_map(pf);
5013#endif
5014
5015 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5016 if (ret) {
5017 dev_info(&pf->pdev->dev,
5018 "Failed configuring TC for VSI seid=%d\n",
5019 pf->vsi[v]->seid);
5020
5021 } else {
5022
5023 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5024 if (pf->vsi[v]->netdev)
5025 i40e_dcbnl_set_all(pf->vsi[v]);
5026 }
5027 }
5028}
5029
5030
5031
5032
5033
5034
5035
5036
5037static int i40e_resume_port_tx(struct i40e_pf *pf)
5038{
5039 struct i40e_hw *hw = &pf->hw;
5040 int ret;
5041
5042 ret = i40e_aq_resume_port_tx(hw, NULL);
5043 if (ret) {
5044 dev_info(&pf->pdev->dev,
5045 "Resume Port Tx failed, err %s aq_err %s\n",
5046 i40e_stat_str(&pf->hw, ret),
5047 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5048
5049 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5050 i40e_service_event_schedule(pf);
5051 }
5052
5053 return ret;
5054}
5055
5056
5057
5058
5059
5060
5061
5062
5063static int i40e_init_pf_dcb(struct i40e_pf *pf)
5064{
5065 struct i40e_hw *hw = &pf->hw;
5066 int err = 0;
5067
5068
5069 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
5070 goto out;
5071
5072
5073 err = i40e_init_dcb(hw);
5074 if (!err) {
5075
5076 if ((!hw->func_caps.dcb) ||
5077 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5078 dev_info(&pf->pdev->dev,
5079 "DCBX offload is not supported or is disabled for this PF.\n");
5080
5081 if (pf->flags & I40E_FLAG_MFP_ENABLED)
5082 goto out;
5083
5084 } else {
5085
5086 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5087 DCB_CAP_DCBX_VER_IEEE;
5088
5089 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5090
5091
5092
5093 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5094 pf->flags |= I40E_FLAG_DCB_ENABLED;
5095 else
5096 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5097 dev_dbg(&pf->pdev->dev,
5098 "DCBX offload is supported for this PF.\n");
5099 }
5100 } else {
5101 dev_info(&pf->pdev->dev,
5102 "Query for DCB configuration failed, err %s aq_err %s\n",
5103 i40e_stat_str(&pf->hw, err),
5104 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5105 }
5106
5107out:
5108 return err;
5109}
5110#endif
5111#define SPEED_SIZE 14
5112#define FC_SIZE 8
5113
5114
5115
5116
5117void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5118{
5119 char *speed = "Unknown";
5120 char *fc = "Unknown";
5121
5122 if (vsi->current_isup == isup)
5123 return;
5124 vsi->current_isup = isup;
5125 if (!isup) {
5126 netdev_info(vsi->netdev, "NIC Link is Down\n");
5127 return;
5128 }
5129
5130
5131
5132
5133 if (vsi->back->hw.func_caps.npar_enable &&
5134 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5135 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5136 netdev_warn(vsi->netdev,
5137 "The partition detected link speed that is less than 10Gbps\n");
5138
5139 switch (vsi->back->hw.phy.link_info.link_speed) {
5140 case I40E_LINK_SPEED_40GB:
5141 speed = "40 G";
5142 break;
5143 case I40E_LINK_SPEED_20GB:
5144 speed = "20 G";
5145 break;
5146 case I40E_LINK_SPEED_10GB:
5147 speed = "10 G";
5148 break;
5149 case I40E_LINK_SPEED_1GB:
5150 speed = "1000 M";
5151 break;
5152 case I40E_LINK_SPEED_100MB:
5153 speed = "100 M";
5154 break;
5155 default:
5156 break;
5157 }
5158
5159 switch (vsi->back->hw.fc.current_mode) {
5160 case I40E_FC_FULL:
5161 fc = "RX/TX";
5162 break;
5163 case I40E_FC_TX_PAUSE:
5164 fc = "TX";
5165 break;
5166 case I40E_FC_RX_PAUSE:
5167 fc = "RX";
5168 break;
5169 default:
5170 fc = "None";
5171 break;
5172 }
5173
5174 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5175 speed, fc);
5176}
5177
5178
5179
5180
5181
5182static int i40e_up_complete(struct i40e_vsi *vsi)
5183{
5184 struct i40e_pf *pf = vsi->back;
5185 int err;
5186
5187 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5188 i40e_vsi_configure_msix(vsi);
5189 else
5190 i40e_configure_msi_and_legacy(vsi);
5191
5192
5193 err = i40e_vsi_control_rings(vsi, true);
5194 if (err)
5195 return err;
5196
5197 clear_bit(__I40E_DOWN, &vsi->state);
5198 i40e_napi_enable_all(vsi);
5199 i40e_vsi_enable_irq(vsi);
5200
5201 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5202 (vsi->netdev)) {
5203 i40e_print_link_message(vsi, true);
5204 netif_tx_start_all_queues(vsi->netdev);
5205 netif_carrier_on(vsi->netdev);
5206 } else if (vsi->netdev) {
5207 i40e_print_link_message(vsi, false);
5208
5209 if ((pf->hw.phy.link_info.link_info &
5210 I40E_AQ_MEDIA_AVAILABLE) &&
5211 (!(pf->hw.phy.link_info.an_info &
5212 I40E_AQ_QUALIFIED_MODULE)))
5213 netdev_err(vsi->netdev,
5214 "the driver failed to link because an unqualified module was detected.");
5215 }
5216
5217
5218 if (vsi->type == I40E_VSI_FDIR) {
5219
5220 pf->fd_add_err = pf->fd_atr_cnt = 0;
5221 if (pf->fd_tcp_rule > 0) {
5222 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
5223 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5224 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5225 pf->fd_tcp_rule = 0;
5226 }
5227 i40e_fdir_filter_restore(vsi);
5228 }
5229
5230
5231
5232
5233 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5234 i40e_service_event_schedule(pf);
5235
5236 return 0;
5237}
5238
5239
5240
5241
5242
5243
5244
5245
5246static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5247{
5248 struct i40e_pf *pf = vsi->back;
5249
5250 WARN_ON(in_interrupt());
5251 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5252 usleep_range(1000, 2000);
5253 i40e_down(vsi);
5254
5255 i40e_up(vsi);
5256 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5257}
5258
5259
5260
5261
5262
5263int i40e_up(struct i40e_vsi *vsi)
5264{
5265 int err;
5266
5267 err = i40e_vsi_configure(vsi);
5268 if (!err)
5269 err = i40e_up_complete(vsi);
5270
5271 return err;
5272}
5273
5274
5275
5276
5277
5278void i40e_down(struct i40e_vsi *vsi)
5279{
5280 int i;
5281
5282
5283
5284
5285 if (vsi->netdev) {
5286 netif_carrier_off(vsi->netdev);
5287 netif_tx_disable(vsi->netdev);
5288 }
5289 i40e_vsi_disable_irq(vsi);
5290 i40e_vsi_control_rings(vsi, false);
5291 i40e_napi_disable_all(vsi);
5292
5293 for (i = 0; i < vsi->num_queue_pairs; i++) {
5294 i40e_clean_tx_ring(vsi->tx_rings[i]);
5295 i40e_clean_rx_ring(vsi->rx_rings[i]);
5296 }
5297
5298 i40e_notify_client_of_netdev_close(vsi, false);
5299
5300}
5301
5302
5303
5304
5305
5306
5307static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5308{
5309 struct i40e_netdev_priv *np = netdev_priv(netdev);
5310 struct i40e_vsi *vsi = np->vsi;
5311 struct i40e_pf *pf = vsi->back;
5312 u8 enabled_tc = 0;
5313 int ret = -EINVAL;
5314 int i;
5315
5316
5317 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5318 netdev_info(netdev, "DCB is not enabled for adapter\n");
5319 goto exit;
5320 }
5321
5322
5323 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5324 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5325 goto exit;
5326 }
5327
5328
5329 if (tc > i40e_pf_get_num_tc(pf)) {
5330 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5331 goto exit;
5332 }
5333
5334
5335 for (i = 0; i < tc; i++)
5336 enabled_tc |= BIT(i);
5337
5338
5339 if (enabled_tc == vsi->tc_config.enabled_tc)
5340 return 0;
5341
5342
5343 i40e_quiesce_vsi(vsi);
5344
5345
5346 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5347 if (ret) {
5348 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5349 vsi->seid);
5350 goto exit;
5351 }
5352
5353
5354 i40e_unquiesce_vsi(vsi);
5355
5356exit:
5357 return ret;
5358}
5359
5360#ifdef I40E_FCOE
5361int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5362 struct tc_to_netdev *tc)
5363#else
5364static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5365 struct tc_to_netdev *tc)
5366#endif
5367{
5368 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
5369 return -EINVAL;
5370 return i40e_setup_tc(netdev, tc->tc);
5371}
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385int i40e_open(struct net_device *netdev)
5386{
5387 struct i40e_netdev_priv *np = netdev_priv(netdev);
5388 struct i40e_vsi *vsi = np->vsi;
5389 struct i40e_pf *pf = vsi->back;
5390 int err;
5391
5392
5393 if (test_bit(__I40E_TESTING, &pf->state) ||
5394 test_bit(__I40E_BAD_EEPROM, &pf->state))
5395 return -EBUSY;
5396
5397 netif_carrier_off(netdev);
5398
5399 err = i40e_vsi_open(vsi);
5400 if (err)
5401 return err;
5402
5403
5404 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5405 TCP_FLAG_FIN) >> 16);
5406 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5407 TCP_FLAG_FIN |
5408 TCP_FLAG_CWR) >> 16);
5409 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5410
5411 udp_tunnel_get_rx_info(netdev);
5412
5413 return 0;
5414}
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424int i40e_vsi_open(struct i40e_vsi *vsi)
5425{
5426 struct i40e_pf *pf = vsi->back;
5427 char int_name[I40E_INT_NAME_STR_LEN];
5428 int err;
5429
5430
5431 err = i40e_vsi_setup_tx_resources(vsi);
5432 if (err)
5433 goto err_setup_tx;
5434 err = i40e_vsi_setup_rx_resources(vsi);
5435 if (err)
5436 goto err_setup_rx;
5437
5438 err = i40e_vsi_configure(vsi);
5439 if (err)
5440 goto err_setup_rx;
5441
5442 if (vsi->netdev) {
5443 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5444 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5445 err = i40e_vsi_request_irq(vsi, int_name);
5446 if (err)
5447 goto err_setup_rx;
5448
5449
5450 err = netif_set_real_num_tx_queues(vsi->netdev,
5451 vsi->num_queue_pairs);
5452 if (err)
5453 goto err_set_queues;
5454
5455 err = netif_set_real_num_rx_queues(vsi->netdev,
5456 vsi->num_queue_pairs);
5457 if (err)
5458 goto err_set_queues;
5459
5460 } else if (vsi->type == I40E_VSI_FDIR) {
5461 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5462 dev_driver_string(&pf->pdev->dev),
5463 dev_name(&pf->pdev->dev));
5464 err = i40e_vsi_request_irq(vsi, int_name);
5465
5466 } else {
5467 err = -EINVAL;
5468 goto err_setup_rx;
5469 }
5470
5471 err = i40e_up_complete(vsi);
5472 if (err)
5473 goto err_up_complete;
5474
5475 return 0;
5476
5477err_up_complete:
5478 i40e_down(vsi);
5479err_set_queues:
5480 i40e_vsi_free_irq(vsi);
5481err_setup_rx:
5482 i40e_vsi_free_rx_resources(vsi);
5483err_setup_tx:
5484 i40e_vsi_free_tx_resources(vsi);
5485 if (vsi == pf->vsi[pf->lan_vsi])
5486 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5487
5488 return err;
5489}
5490
5491
5492
5493
5494
5495
5496
5497
5498static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5499{
5500 struct i40e_fdir_filter *filter;
5501 struct hlist_node *node2;
5502
5503 hlist_for_each_entry_safe(filter, node2,
5504 &pf->fdir_filter_list, fdir_node) {
5505 hlist_del(&filter->fdir_node);
5506 kfree(filter);
5507 }
5508 pf->fdir_pf_active_filters = 0;
5509}
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521int i40e_close(struct net_device *netdev)
5522{
5523 struct i40e_netdev_priv *np = netdev_priv(netdev);
5524 struct i40e_vsi *vsi = np->vsi;
5525
5526 i40e_vsi_close(vsi);
5527
5528 return 0;
5529}
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5541{
5542 u32 val;
5543
5544 WARN_ON(in_interrupt());
5545
5546
5547
5548 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5559 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5560 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5561 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5562
5563 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5564
5565
5566
5567
5568
5569 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5570 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5571 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5572 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5573 i40e_flush(&pf->hw);
5574
5575 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5586 i40e_handle_reset_warning(pf);
5587
5588 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5589 int v;
5590
5591
5592 dev_info(&pf->pdev->dev,
5593 "VSI reinit requested\n");
5594 for (v = 0; v < pf->num_alloc_vsi; v++) {
5595 struct i40e_vsi *vsi = pf->vsi[v];
5596
5597 if (vsi != NULL &&
5598 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5599 i40e_vsi_reinit_locked(pf->vsi[v]);
5600 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5601 }
5602 }
5603 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5604 int v;
5605
5606
5607 dev_info(&pf->pdev->dev, "VSI down requested\n");
5608 for (v = 0; v < pf->num_alloc_vsi; v++) {
5609 struct i40e_vsi *vsi = pf->vsi[v];
5610
5611 if (vsi != NULL &&
5612 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5613 set_bit(__I40E_DOWN, &vsi->state);
5614 i40e_down(vsi);
5615 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5616 }
5617 }
5618 } else {
5619 dev_info(&pf->pdev->dev,
5620 "bad reset request 0x%08x\n", reset_flags);
5621 }
5622}
5623
5624#ifdef CONFIG_I40E_DCB
5625
5626
5627
5628
5629
5630
5631bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5632 struct i40e_dcbx_config *old_cfg,
5633 struct i40e_dcbx_config *new_cfg)
5634{
5635 bool need_reconfig = false;
5636
5637
5638 if (memcmp(&new_cfg->etscfg,
5639 &old_cfg->etscfg,
5640 sizeof(new_cfg->etscfg))) {
5641
5642 if (memcmp(&new_cfg->etscfg.prioritytable,
5643 &old_cfg->etscfg.prioritytable,
5644 sizeof(new_cfg->etscfg.prioritytable))) {
5645 need_reconfig = true;
5646 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5647 }
5648
5649 if (memcmp(&new_cfg->etscfg.tcbwtable,
5650 &old_cfg->etscfg.tcbwtable,
5651 sizeof(new_cfg->etscfg.tcbwtable)))
5652 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5653
5654 if (memcmp(&new_cfg->etscfg.tsatable,
5655 &old_cfg->etscfg.tsatable,
5656 sizeof(new_cfg->etscfg.tsatable)))
5657 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5658 }
5659
5660
5661 if (memcmp(&new_cfg->pfc,
5662 &old_cfg->pfc,
5663 sizeof(new_cfg->pfc))) {
5664 need_reconfig = true;
5665 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5666 }
5667
5668
5669 if (memcmp(&new_cfg->app,
5670 &old_cfg->app,
5671 sizeof(new_cfg->app))) {
5672 need_reconfig = true;
5673 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5674 }
5675
5676 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5677 return need_reconfig;
5678}
5679
5680
5681
5682
5683
5684
5685static int i40e_handle_lldp_event(struct i40e_pf *pf,
5686 struct i40e_arq_event_info *e)
5687{
5688 struct i40e_aqc_lldp_get_mib *mib =
5689 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5690 struct i40e_hw *hw = &pf->hw;
5691 struct i40e_dcbx_config tmp_dcbx_cfg;
5692 bool need_reconfig = false;
5693 int ret = 0;
5694 u8 type;
5695
5696
5697 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5698 return ret;
5699
5700
5701 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5702 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5703 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5704 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5705 return ret;
5706
5707
5708 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5709 dev_dbg(&pf->pdev->dev,
5710 "LLDP event mib type %s\n", type ? "remote" : "local");
5711 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5712
5713 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5714 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5715 &hw->remote_dcbx_config);
5716 goto exit;
5717 }
5718
5719
5720 tmp_dcbx_cfg = hw->local_dcbx_config;
5721
5722
5723 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5724
5725 ret = i40e_get_dcb_config(&pf->hw);
5726 if (ret) {
5727 dev_info(&pf->pdev->dev,
5728 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5729 i40e_stat_str(&pf->hw, ret),
5730 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5731 goto exit;
5732 }
5733
5734
5735 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5736 sizeof(tmp_dcbx_cfg))) {
5737 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5738 goto exit;
5739 }
5740
5741 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5742 &hw->local_dcbx_config);
5743
5744 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5745
5746 if (!need_reconfig)
5747 goto exit;
5748
5749
5750 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5751 pf->flags |= I40E_FLAG_DCB_ENABLED;
5752 else
5753 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5754
5755 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5756
5757 i40e_pf_quiesce_all_vsi(pf);
5758
5759
5760 i40e_dcb_reconfigure(pf);
5761
5762 ret = i40e_resume_port_tx(pf);
5763
5764 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5765
5766 if (ret)
5767 goto exit;
5768
5769
5770 ret = i40e_pf_wait_queues_disabled(pf);
5771 if (ret) {
5772
5773 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5774 i40e_service_event_schedule(pf);
5775 } else {
5776 i40e_pf_unquiesce_all_vsi(pf);
5777
5778 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
5779 }
5780
5781exit:
5782 return ret;
5783}
5784#endif
5785
5786
5787
5788
5789
5790
5791
5792void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5793{
5794 rtnl_lock();
5795 i40e_do_reset(pf, reset_flags);
5796 rtnl_unlock();
5797}
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5808 struct i40e_arq_event_info *e)
5809{
5810 struct i40e_aqc_lan_overflow *data =
5811 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5812 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5813 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5814 struct i40e_hw *hw = &pf->hw;
5815 struct i40e_vf *vf;
5816 u16 vf_id;
5817
5818 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5819 queue, qtx_ctl);
5820
5821
5822 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5823 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5824 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5825 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5826 vf_id -= hw->func_caps.vf_base_id;
5827 vf = &pf->vf[vf_id];
5828 i40e_vc_notify_vf_reset(vf);
5829
5830 msleep(20);
5831 i40e_reset_vf(vf, false);
5832 }
5833}
5834
5835
5836
5837
5838
5839static void i40e_service_event_complete(struct i40e_pf *pf)
5840{
5841 WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5842
5843
5844 smp_mb__before_atomic();
5845 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5846}
5847
5848
5849
5850
5851
5852u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5853{
5854 u32 val, fcnt_prog;
5855
5856 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5857 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5858 return fcnt_prog;
5859}
5860
5861
5862
5863
5864
5865u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5866{
5867 u32 val, fcnt_prog;
5868
5869 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5870 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5871 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5872 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5873 return fcnt_prog;
5874}
5875
5876
5877
5878
5879
5880u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5881{
5882 u32 val, fcnt_prog;
5883
5884 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5885 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5886 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5887 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5888 return fcnt_prog;
5889}
5890
5891
5892
5893
5894
5895void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5896{
5897 struct i40e_fdir_filter *filter;
5898 u32 fcnt_prog, fcnt_avail;
5899 struct hlist_node *node;
5900
5901 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5902 return;
5903
5904
5905
5906
5907 fcnt_prog = i40e_get_global_fd_count(pf);
5908 fcnt_avail = pf->fdir_pf_filter_count;
5909 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5910 (pf->fd_add_err == 0) ||
5911 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5912 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5913 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5914 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5915 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5916 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5917 }
5918 }
5919
5920
5921
5922
5923 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5924 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5925 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
5926 (pf->fd_tcp_rule == 0)) {
5927 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5928 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5929 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
5930 }
5931 }
5932
5933
5934 if (pf->fd_inv > 0) {
5935 hlist_for_each_entry_safe(filter, node,
5936 &pf->fdir_filter_list, fdir_node) {
5937 if (filter->fd_id == pf->fd_inv) {
5938 hlist_del(&filter->fdir_node);
5939 kfree(filter);
5940 pf->fdir_pf_active_filters--;
5941 }
5942 }
5943 }
5944}
5945
5946#define I40E_MIN_FD_FLUSH_INTERVAL 10
5947#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5948
5949
5950
5951
5952static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5953{
5954 unsigned long min_flush_time;
5955 int flush_wait_retry = 50;
5956 bool disable_atr = false;
5957 int fd_room;
5958 int reg;
5959
5960 if (!time_after(jiffies, pf->fd_flush_timestamp +
5961 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5962 return;
5963
5964
5965
5966
5967 min_flush_time = pf->fd_flush_timestamp +
5968 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5969 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5970
5971 if (!(time_after(jiffies, min_flush_time)) &&
5972 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5973 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5974 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5975 disable_atr = true;
5976 }
5977
5978 pf->fd_flush_timestamp = jiffies;
5979 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
5980
5981 wr32(&pf->hw, I40E_PFQF_CTL_1,
5982 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5983 i40e_flush(&pf->hw);
5984 pf->fd_flush_cnt++;
5985 pf->fd_add_err = 0;
5986 do {
5987
5988 usleep_range(5000, 6000);
5989 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5990 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5991 break;
5992 } while (flush_wait_retry--);
5993 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5994 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5995 } else {
5996
5997 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5998 if (!disable_atr)
5999 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
6000 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
6001 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6002 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6003 }
6004}
6005
6006
6007
6008
6009
6010u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6011{
6012 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6013}
6014
6015
6016
6017
6018
6019
6020#define I40E_MAX_FD_PROGRAM_ERROR 256
6021
6022
6023
6024
6025
6026static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6027{
6028
6029
6030 if (test_bit(__I40E_DOWN, &pf->state))
6031 return;
6032
6033 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
6034 i40e_fdir_flush_and_replay(pf);
6035
6036 i40e_fdir_check_and_reenable(pf);
6037
6038}
6039
6040
6041
6042
6043
6044
6045static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6046{
6047 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
6048 return;
6049
6050 switch (vsi->type) {
6051 case I40E_VSI_MAIN:
6052#ifdef I40E_FCOE
6053 case I40E_VSI_FCOE:
6054#endif
6055 if (!vsi->netdev || !vsi->netdev_registered)
6056 break;
6057
6058 if (link_up) {
6059 netif_carrier_on(vsi->netdev);
6060 netif_tx_wake_all_queues(vsi->netdev);
6061 } else {
6062 netif_carrier_off(vsi->netdev);
6063 netif_tx_stop_all_queues(vsi->netdev);
6064 }
6065 break;
6066
6067 case I40E_VSI_SRIOV:
6068 case I40E_VSI_VMDQ2:
6069 case I40E_VSI_CTRL:
6070 case I40E_VSI_IWARP:
6071 case I40E_VSI_MIRROR:
6072 default:
6073
6074 break;
6075 }
6076}
6077
6078
6079
6080
6081
6082
6083static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6084{
6085 struct i40e_pf *pf;
6086 int i;
6087
6088 if (!veb || !veb->pf)
6089 return;
6090 pf = veb->pf;
6091
6092
6093 for (i = 0; i < I40E_MAX_VEB; i++)
6094 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6095 i40e_veb_link_event(pf->veb[i], link_up);
6096
6097
6098 for (i = 0; i < pf->num_alloc_vsi; i++)
6099 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6100 i40e_vsi_link_event(pf->vsi[i], link_up);
6101}
6102
6103
6104
6105
6106
6107static void i40e_link_event(struct i40e_pf *pf)
6108{
6109 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6110 u8 new_link_speed, old_link_speed;
6111 i40e_status status;
6112 bool new_link, old_link;
6113
6114
6115 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6116
6117
6118 pf->hw.phy.get_link_info = true;
6119
6120 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6121
6122 status = i40e_get_link_status(&pf->hw, &new_link);
6123 if (status) {
6124 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6125 status);
6126 return;
6127 }
6128
6129 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6130 new_link_speed = pf->hw.phy.link_info.link_speed;
6131
6132 if (new_link == old_link &&
6133 new_link_speed == old_link_speed &&
6134 (test_bit(__I40E_DOWN, &vsi->state) ||
6135 new_link == netif_carrier_ok(vsi->netdev)))
6136 return;
6137
6138 if (!test_bit(__I40E_DOWN, &vsi->state))
6139 i40e_print_link_message(vsi, new_link);
6140
6141
6142
6143
6144 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6145 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6146 else
6147 i40e_vsi_link_event(vsi, new_link);
6148
6149 if (pf->vf)
6150 i40e_vc_notify_link_state(pf);
6151
6152 if (pf->flags & I40E_FLAG_PTP)
6153 i40e_ptp_set_increment(pf);
6154}
6155
6156
6157
6158
6159
6160static void i40e_watchdog_subtask(struct i40e_pf *pf)
6161{
6162 int i;
6163
6164
6165 if (test_bit(__I40E_DOWN, &pf->state) ||
6166 test_bit(__I40E_CONFIG_BUSY, &pf->state))
6167 return;
6168
6169
6170 if (time_before(jiffies, (pf->service_timer_previous +
6171 pf->service_timer_period)))
6172 return;
6173 pf->service_timer_previous = jiffies;
6174
6175 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6176 i40e_link_event(pf);
6177
6178
6179
6180
6181 for (i = 0; i < pf->num_alloc_vsi; i++)
6182 if (pf->vsi[i] && pf->vsi[i]->netdev)
6183 i40e_update_stats(pf->vsi[i]);
6184
6185 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6186
6187 for (i = 0; i < I40E_MAX_VEB; i++)
6188 if (pf->veb[i])
6189 i40e_update_veb_stats(pf->veb[i]);
6190 }
6191
6192 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6193}
6194
6195
6196
6197
6198
6199static void i40e_reset_subtask(struct i40e_pf *pf)
6200{
6201 u32 reset_flags = 0;
6202
6203 rtnl_lock();
6204 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6205 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6206 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6207 }
6208 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6209 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6210 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6211 }
6212 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6213 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6214 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6215 }
6216 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6217 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6218 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6219 }
6220 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6221 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6222 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6223 }
6224
6225
6226
6227
6228 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6229 i40e_handle_reset_warning(pf);
6230 goto unlock;
6231 }
6232
6233
6234 if (reset_flags &&
6235 !test_bit(__I40E_DOWN, &pf->state) &&
6236 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6237 i40e_do_reset(pf, reset_flags);
6238
6239unlock:
6240 rtnl_unlock();
6241}
6242
6243
6244
6245
6246
6247
6248static void i40e_handle_link_event(struct i40e_pf *pf,
6249 struct i40e_arq_event_info *e)
6250{
6251 struct i40e_aqc_get_link_status *status =
6252 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6253
6254
6255
6256
6257
6258
6259
6260 i40e_link_event(pf);
6261
6262
6263 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6264 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6265 (!(status->link_info & I40E_AQ_LINK_UP)))
6266 dev_err(&pf->pdev->dev,
6267 "The driver failed to link because an unqualified module was detected.\n");
6268}
6269
6270
6271
6272
6273
6274static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6275{
6276 struct i40e_arq_event_info event;
6277 struct i40e_hw *hw = &pf->hw;
6278 u16 pending, i = 0;
6279 i40e_status ret;
6280 u16 opcode;
6281 u32 oldval;
6282 u32 val;
6283
6284
6285 if (test_bit(__I40E_RESET_FAILED, &pf->state))
6286 return;
6287
6288
6289 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6290 oldval = val;
6291 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6292 if (hw->debug_mask & I40E_DEBUG_AQ)
6293 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6294 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6295 }
6296 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6297 if (hw->debug_mask & I40E_DEBUG_AQ)
6298 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6299 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6300 pf->arq_overflows++;
6301 }
6302 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6303 if (hw->debug_mask & I40E_DEBUG_AQ)
6304 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6305 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6306 }
6307 if (oldval != val)
6308 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6309
6310 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6311 oldval = val;
6312 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6313 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6314 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6315 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6316 }
6317 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6318 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6319 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6320 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6321 }
6322 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6323 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6324 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6325 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6326 }
6327 if (oldval != val)
6328 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6329
6330 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6331 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6332 if (!event.msg_buf)
6333 return;
6334
6335 do {
6336 ret = i40e_clean_arq_element(hw, &event, &pending);
6337 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6338 break;
6339 else if (ret) {
6340 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6341 break;
6342 }
6343
6344 opcode = le16_to_cpu(event.desc.opcode);
6345 switch (opcode) {
6346
6347 case i40e_aqc_opc_get_link_status:
6348 i40e_handle_link_event(pf, &event);
6349 break;
6350 case i40e_aqc_opc_send_msg_to_pf:
6351 ret = i40e_vc_process_vf_msg(pf,
6352 le16_to_cpu(event.desc.retval),
6353 le32_to_cpu(event.desc.cookie_high),
6354 le32_to_cpu(event.desc.cookie_low),
6355 event.msg_buf,
6356 event.msg_len);
6357 break;
6358 case i40e_aqc_opc_lldp_update_mib:
6359 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6360#ifdef CONFIG_I40E_DCB
6361 rtnl_lock();
6362 ret = i40e_handle_lldp_event(pf, &event);
6363 rtnl_unlock();
6364#endif
6365 break;
6366 case i40e_aqc_opc_event_lan_overflow:
6367 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6368 i40e_handle_lan_overflow_event(pf, &event);
6369 break;
6370 case i40e_aqc_opc_send_msg_to_peer:
6371 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6372 break;
6373 case i40e_aqc_opc_nvm_erase:
6374 case i40e_aqc_opc_nvm_update:
6375 case i40e_aqc_opc_oem_post_update:
6376 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6377 "ARQ NVM operation 0x%04x completed\n",
6378 opcode);
6379 break;
6380 default:
6381 dev_info(&pf->pdev->dev,
6382 "ARQ: Unknown event 0x%04x ignored\n",
6383 opcode);
6384 break;
6385 }
6386 } while (pending && (i++ < pf->adminq_work_limit));
6387
6388 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6389
6390 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6391 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6392 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6393 i40e_flush(hw);
6394
6395 kfree(event.msg_buf);
6396}
6397
6398
6399
6400
6401
6402static void i40e_verify_eeprom(struct i40e_pf *pf)
6403{
6404 int err;
6405
6406 err = i40e_diag_eeprom_test(&pf->hw);
6407 if (err) {
6408
6409 err = i40e_diag_eeprom_test(&pf->hw);
6410 if (err) {
6411 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6412 err);
6413 set_bit(__I40E_BAD_EEPROM, &pf->state);
6414 }
6415 }
6416
6417 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6418 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6419 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6420 }
6421}
6422
6423
6424
6425
6426
6427
6428
6429static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6430{
6431 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6432 struct i40e_vsi_context ctxt;
6433 int ret;
6434
6435 ctxt.seid = pf->main_vsi_seid;
6436 ctxt.pf_num = pf->hw.pf_id;
6437 ctxt.vf_num = 0;
6438 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6439 if (ret) {
6440 dev_info(&pf->pdev->dev,
6441 "couldn't get PF vsi config, err %s aq_err %s\n",
6442 i40e_stat_str(&pf->hw, ret),
6443 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6444 return;
6445 }
6446 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6447 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6448 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6449
6450 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6451 if (ret) {
6452 dev_info(&pf->pdev->dev,
6453 "update vsi switch failed, err %s aq_err %s\n",
6454 i40e_stat_str(&pf->hw, ret),
6455 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6456 }
6457}
6458
6459
6460
6461
6462
6463
6464
6465static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6466{
6467 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6468 struct i40e_vsi_context ctxt;
6469 int ret;
6470
6471 ctxt.seid = pf->main_vsi_seid;
6472 ctxt.pf_num = pf->hw.pf_id;
6473 ctxt.vf_num = 0;
6474 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6475 if (ret) {
6476 dev_info(&pf->pdev->dev,
6477 "couldn't get PF vsi config, err %s aq_err %s\n",
6478 i40e_stat_str(&pf->hw, ret),
6479 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6480 return;
6481 }
6482 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6483 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6484 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6485
6486 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6487 if (ret) {
6488 dev_info(&pf->pdev->dev,
6489 "update vsi switch failed, err %s aq_err %s\n",
6490 i40e_stat_str(&pf->hw, ret),
6491 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6492 }
6493}
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503static void i40e_config_bridge_mode(struct i40e_veb *veb)
6504{
6505 struct i40e_pf *pf = veb->pf;
6506
6507 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6508 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6509 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6510 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6511 i40e_disable_pf_switch_lb(pf);
6512 else
6513 i40e_enable_pf_switch_lb(pf);
6514}
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525static int i40e_reconstitute_veb(struct i40e_veb *veb)
6526{
6527 struct i40e_vsi *ctl_vsi = NULL;
6528 struct i40e_pf *pf = veb->pf;
6529 int v, veb_idx;
6530 int ret;
6531
6532
6533 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6534 if (pf->vsi[v] &&
6535 pf->vsi[v]->veb_idx == veb->idx &&
6536 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6537 ctl_vsi = pf->vsi[v];
6538 break;
6539 }
6540 }
6541 if (!ctl_vsi) {
6542 dev_info(&pf->pdev->dev,
6543 "missing owner VSI for veb_idx %d\n", veb->idx);
6544 ret = -ENOENT;
6545 goto end_reconstitute;
6546 }
6547 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6548 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6549 ret = i40e_add_vsi(ctl_vsi);
6550 if (ret) {
6551 dev_info(&pf->pdev->dev,
6552 "rebuild of veb_idx %d owner VSI failed: %d\n",
6553 veb->idx, ret);
6554 goto end_reconstitute;
6555 }
6556 i40e_vsi_reset_stats(ctl_vsi);
6557
6558
6559 ret = i40e_add_veb(veb, ctl_vsi);
6560 if (ret)
6561 goto end_reconstitute;
6562
6563 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6564 veb->bridge_mode = BRIDGE_MODE_VEB;
6565 else
6566 veb->bridge_mode = BRIDGE_MODE_VEPA;
6567 i40e_config_bridge_mode(veb);
6568
6569
6570 for (v = 0; v < pf->num_alloc_vsi; v++) {
6571 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6572 continue;
6573
6574 if (pf->vsi[v]->veb_idx == veb->idx) {
6575 struct i40e_vsi *vsi = pf->vsi[v];
6576
6577 vsi->uplink_seid = veb->seid;
6578 ret = i40e_add_vsi(vsi);
6579 if (ret) {
6580 dev_info(&pf->pdev->dev,
6581 "rebuild of vsi_idx %d failed: %d\n",
6582 v, ret);
6583 goto end_reconstitute;
6584 }
6585 i40e_vsi_reset_stats(vsi);
6586 }
6587 }
6588
6589
6590 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6591 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6592 pf->veb[veb_idx]->uplink_seid = veb->seid;
6593 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6594 if (ret)
6595 break;
6596 }
6597 }
6598
6599end_reconstitute:
6600 return ret;
6601}
6602
6603
6604
6605
6606
6607static int i40e_get_capabilities(struct i40e_pf *pf)
6608{
6609 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6610 u16 data_size;
6611 int buf_len;
6612 int err;
6613
6614 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6615 do {
6616 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6617 if (!cap_buf)
6618 return -ENOMEM;
6619
6620
6621 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6622 &data_size,
6623 i40e_aqc_opc_list_func_capabilities,
6624 NULL);
6625
6626 kfree(cap_buf);
6627
6628 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6629
6630 buf_len = data_size;
6631 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6632 dev_info(&pf->pdev->dev,
6633 "capability discovery failed, err %s aq_err %s\n",
6634 i40e_stat_str(&pf->hw, err),
6635 i40e_aq_str(&pf->hw,
6636 pf->hw.aq.asq_last_status));
6637 return -ENODEV;
6638 }
6639 } while (err);
6640
6641 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6642 dev_info(&pf->pdev->dev,
6643 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6644 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6645 pf->hw.func_caps.num_msix_vectors,
6646 pf->hw.func_caps.num_msix_vectors_vf,
6647 pf->hw.func_caps.fd_filters_guaranteed,
6648 pf->hw.func_caps.fd_filters_best_effort,
6649 pf->hw.func_caps.num_tx_qp,
6650 pf->hw.func_caps.num_vsis);
6651
6652#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6653 + pf->hw.func_caps.num_vfs)
6654 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6655 dev_info(&pf->pdev->dev,
6656 "got num_vsis %d, setting num_vsis to %d\n",
6657 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6658 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6659 }
6660
6661 return 0;
6662}
6663
6664static int i40e_vsi_clear(struct i40e_vsi *vsi);
6665
6666
6667
6668
6669
6670static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6671{
6672 struct i40e_vsi *vsi;
6673 int i;
6674
6675
6676
6677
6678 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6679 static const u32 hkey[] = {
6680 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6681 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6682 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6683 0x95b3a76d};
6684
6685 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6686 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6687 }
6688
6689 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6690 return;
6691
6692
6693 vsi = NULL;
6694 for (i = 0; i < pf->num_alloc_vsi; i++) {
6695 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6696 vsi = pf->vsi[i];
6697 break;
6698 }
6699 }
6700
6701
6702 if (!vsi) {
6703 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6704 pf->vsi[pf->lan_vsi]->seid, 0);
6705 if (!vsi) {
6706 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6707 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6708 return;
6709 }
6710 }
6711
6712 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6713}
6714
6715
6716
6717
6718
6719static void i40e_fdir_teardown(struct i40e_pf *pf)
6720{
6721 int i;
6722
6723 i40e_fdir_filter_exit(pf);
6724 for (i = 0; i < pf->num_alloc_vsi; i++) {
6725 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6726 i40e_vsi_release(pf->vsi[i]);
6727 break;
6728 }
6729 }
6730}
6731
6732
6733
6734
6735
6736
6737
6738static void i40e_prep_for_reset(struct i40e_pf *pf)
6739{
6740 struct i40e_hw *hw = &pf->hw;
6741 i40e_status ret = 0;
6742 u32 v;
6743
6744 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6745 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6746 return;
6747 if (i40e_check_asq_alive(&pf->hw))
6748 i40e_vc_notify_reset(pf);
6749
6750 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6751
6752
6753 i40e_pf_quiesce_all_vsi(pf);
6754
6755 for (v = 0; v < pf->num_alloc_vsi; v++) {
6756 if (pf->vsi[v])
6757 pf->vsi[v]->seid = 0;
6758 }
6759
6760 i40e_shutdown_adminq(&pf->hw);
6761
6762
6763 if (hw->hmc.hmc_obj) {
6764 ret = i40e_shutdown_lan_hmc(hw);
6765 if (ret)
6766 dev_warn(&pf->pdev->dev,
6767 "shutdown_lan_hmc failed: %d\n", ret);
6768 }
6769}
6770
6771
6772
6773
6774
6775static void i40e_send_version(struct i40e_pf *pf)
6776{
6777 struct i40e_driver_version dv;
6778
6779 dv.major_version = DRV_VERSION_MAJOR;
6780 dv.minor_version = DRV_VERSION_MINOR;
6781 dv.build_version = DRV_VERSION_BUILD;
6782 dv.subbuild_version = 0;
6783 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6784 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6785}
6786
6787
6788
6789
6790
6791
6792static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6793{
6794 struct i40e_hw *hw = &pf->hw;
6795 u8 set_fc_aq_fail = 0;
6796 i40e_status ret;
6797 u32 val;
6798 u32 v;
6799
6800
6801
6802
6803
6804 ret = i40e_pf_reset(hw);
6805 if (ret) {
6806 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6807 set_bit(__I40E_RESET_FAILED, &pf->state);
6808 goto clear_recovery;
6809 }
6810 pf->pfr_count++;
6811
6812 if (test_bit(__I40E_DOWN, &pf->state))
6813 goto clear_recovery;
6814 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6815
6816
6817 ret = i40e_init_adminq(&pf->hw);
6818 if (ret) {
6819 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6820 i40e_stat_str(&pf->hw, ret),
6821 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6822 goto clear_recovery;
6823 }
6824
6825
6826 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6827 i40e_verify_eeprom(pf);
6828
6829 i40e_clear_pxe_mode(hw);
6830 ret = i40e_get_capabilities(pf);
6831 if (ret)
6832 goto end_core_reset;
6833
6834 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6835 hw->func_caps.num_rx_qp,
6836 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6837 if (ret) {
6838 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6839 goto end_core_reset;
6840 }
6841 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6842 if (ret) {
6843 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6844 goto end_core_reset;
6845 }
6846
6847#ifdef CONFIG_I40E_DCB
6848 ret = i40e_init_pf_dcb(pf);
6849 if (ret) {
6850 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6851 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6852
6853 }
6854#endif
6855#ifdef I40E_FCOE
6856 i40e_init_pf_fcoe(pf);
6857
6858#endif
6859
6860 ret = i40e_setup_pf_switch(pf, reinit);
6861 if (ret)
6862 goto end_core_reset;
6863
6864
6865
6866
6867 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6868 ~(I40E_AQ_EVENT_LINK_UPDOWN |
6869 I40E_AQ_EVENT_MEDIA_NA |
6870 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
6871 if (ret)
6872 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6873 i40e_stat_str(&pf->hw, ret),
6874 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6875
6876
6877 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6878 if (ret)
6879 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6880 i40e_stat_str(&pf->hw, ret),
6881 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6882
6883
6884
6885
6886
6887
6888
6889
6890 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6891 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6892
6893 for (v = 0; v < I40E_MAX_VEB; v++) {
6894 if (!pf->veb[v])
6895 continue;
6896
6897 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6898 pf->veb[v]->uplink_seid == 0) {
6899 ret = i40e_reconstitute_veb(pf->veb[v]);
6900
6901 if (!ret)
6902 continue;
6903
6904
6905
6906
6907
6908
6909
6910 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6911 dev_info(&pf->pdev->dev,
6912 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6913 ret);
6914 pf->vsi[pf->lan_vsi]->uplink_seid
6915 = pf->mac_seid;
6916 break;
6917 } else if (pf->veb[v]->uplink_seid == 0) {
6918 dev_info(&pf->pdev->dev,
6919 "rebuild of orphan VEB failed: %d\n",
6920 ret);
6921 }
6922 }
6923 }
6924 }
6925
6926 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6927 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6928
6929 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6930 if (ret) {
6931 dev_info(&pf->pdev->dev,
6932 "rebuild of Main VSI failed: %d\n", ret);
6933 goto end_core_reset;
6934 }
6935 }
6936
6937
6938
6939
6940
6941#define I40E_REG_MSS 0x000E64DC
6942#define I40E_REG_MSS_MIN_MASK 0x3FF0000
6943#define I40E_64BYTE_MSS 0x400000
6944 val = rd32(hw, I40E_REG_MSS);
6945 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6946 val &= ~I40E_REG_MSS_MIN_MASK;
6947 val |= I40E_64BYTE_MSS;
6948 wr32(hw, I40E_REG_MSS, val);
6949 }
6950
6951 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
6952 msleep(75);
6953 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6954 if (ret)
6955 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6956 i40e_stat_str(&pf->hw, ret),
6957 i40e_aq_str(&pf->hw,
6958 pf->hw.aq.asq_last_status));
6959 }
6960
6961 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6962 ret = i40e_setup_misc_vector(pf);
6963
6964
6965
6966
6967
6968
6969
6970 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6971 pf->main_vsi_seid);
6972
6973
6974 i40e_pf_unquiesce_all_vsi(pf);
6975
6976 if (pf->num_alloc_vfs) {
6977 for (v = 0; v < pf->num_alloc_vfs; v++)
6978 i40e_reset_vf(&pf->vf[v], true);
6979 }
6980
6981
6982 i40e_send_version(pf);
6983
6984end_core_reset:
6985 clear_bit(__I40E_RESET_FAILED, &pf->state);
6986clear_recovery:
6987 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6988}
6989
6990
6991
6992
6993
6994
6995
6996
6997static void i40e_handle_reset_warning(struct i40e_pf *pf)
6998{
6999 i40e_prep_for_reset(pf);
7000 i40e_reset_and_rebuild(pf, false);
7001}
7002
7003
7004
7005
7006
7007
7008
7009static void i40e_handle_mdd_event(struct i40e_pf *pf)
7010{
7011 struct i40e_hw *hw = &pf->hw;
7012 bool mdd_detected = false;
7013 bool pf_mdd_detected = false;
7014 struct i40e_vf *vf;
7015 u32 reg;
7016 int i;
7017
7018 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
7019 return;
7020
7021
7022 reg = rd32(hw, I40E_GL_MDET_TX);
7023 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7024 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7025 I40E_GL_MDET_TX_PF_NUM_SHIFT;
7026 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7027 I40E_GL_MDET_TX_VF_NUM_SHIFT;
7028 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7029 I40E_GL_MDET_TX_EVENT_SHIFT;
7030 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7031 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7032 pf->hw.func_caps.base_queue;
7033 if (netif_msg_tx_err(pf))
7034 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7035 event, queue, pf_num, vf_num);
7036 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7037 mdd_detected = true;
7038 }
7039 reg = rd32(hw, I40E_GL_MDET_RX);
7040 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7041 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7042 I40E_GL_MDET_RX_FUNCTION_SHIFT;
7043 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7044 I40E_GL_MDET_RX_EVENT_SHIFT;
7045 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7046 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7047 pf->hw.func_caps.base_queue;
7048 if (netif_msg_rx_err(pf))
7049 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7050 event, queue, func);
7051 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7052 mdd_detected = true;
7053 }
7054
7055 if (mdd_detected) {
7056 reg = rd32(hw, I40E_PF_MDET_TX);
7057 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7058 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7059 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7060 pf_mdd_detected = true;
7061 }
7062 reg = rd32(hw, I40E_PF_MDET_RX);
7063 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7064 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7065 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7066 pf_mdd_detected = true;
7067 }
7068
7069 if (pf_mdd_detected) {
7070 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7071 i40e_service_event_schedule(pf);
7072 }
7073 }
7074
7075
7076 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7077 vf = &(pf->vf[i]);
7078 reg = rd32(hw, I40E_VP_MDET_TX(i));
7079 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7080 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7081 vf->num_mdd_events++;
7082 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7083 i);
7084 }
7085
7086 reg = rd32(hw, I40E_VP_MDET_RX(i));
7087 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7088 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7089 vf->num_mdd_events++;
7090 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7091 i);
7092 }
7093
7094 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7095 dev_info(&pf->pdev->dev,
7096 "Too many MDD events on VF %d, disabled\n", i);
7097 dev_info(&pf->pdev->dev,
7098 "Use PF Control I/F to re-enable the VF\n");
7099 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7100 }
7101 }
7102
7103
7104 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7105 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7106 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7107 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7108 i40e_flush(hw);
7109}
7110
7111
7112
7113
7114
7115static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7116{
7117 struct i40e_hw *hw = &pf->hw;
7118 i40e_status ret;
7119 __be16 port;
7120 int i;
7121
7122 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7123 return;
7124
7125 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7126
7127 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7128 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7129 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7130 port = pf->udp_ports[i].index;
7131 if (port)
7132 ret = i40e_aq_add_udp_tunnel(hw, port,
7133 pf->udp_ports[i].type,
7134 NULL, NULL);
7135 else
7136 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7137
7138 if (ret) {
7139 dev_dbg(&pf->pdev->dev,
7140 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7141 pf->udp_ports[i].type ? "vxlan" : "geneve",
7142 port ? "add" : "delete",
7143 ntohs(port), i,
7144 i40e_stat_str(&pf->hw, ret),
7145 i40e_aq_str(&pf->hw,
7146 pf->hw.aq.asq_last_status));
7147 pf->udp_ports[i].index = 0;
7148 }
7149 }
7150 }
7151}
7152
7153
7154
7155
7156
7157static void i40e_service_task(struct work_struct *work)
7158{
7159 struct i40e_pf *pf = container_of(work,
7160 struct i40e_pf,
7161 service_task);
7162 unsigned long start_time = jiffies;
7163
7164
7165 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7166 i40e_service_event_complete(pf);
7167 return;
7168 }
7169
7170 i40e_detect_recover_hung(pf);
7171 i40e_sync_filters_subtask(pf);
7172 i40e_reset_subtask(pf);
7173 i40e_handle_mdd_event(pf);
7174 i40e_vc_process_vflr_event(pf);
7175 i40e_watchdog_subtask(pf);
7176 i40e_fdir_reinit_subtask(pf);
7177 i40e_client_subtask(pf);
7178 i40e_sync_filters_subtask(pf);
7179 i40e_sync_udp_filters_subtask(pf);
7180 i40e_clean_adminq_subtask(pf);
7181
7182 i40e_service_event_complete(pf);
7183
7184
7185
7186
7187
7188 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7189 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
7190 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
7191 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7192 i40e_service_event_schedule(pf);
7193}
7194
7195
7196
7197
7198
7199static void i40e_service_timer(unsigned long data)
7200{
7201 struct i40e_pf *pf = (struct i40e_pf *)data;
7202
7203 mod_timer(&pf->service_timer,
7204 round_jiffies(jiffies + pf->service_timer_period));
7205 i40e_service_event_schedule(pf);
7206}
7207
7208
7209
7210
7211
7212static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7213{
7214 struct i40e_pf *pf = vsi->back;
7215
7216 switch (vsi->type) {
7217 case I40E_VSI_MAIN:
7218 vsi->alloc_queue_pairs = pf->num_lan_qps;
7219 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7220 I40E_REQ_DESCRIPTOR_MULTIPLE);
7221 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7222 vsi->num_q_vectors = pf->num_lan_msix;
7223 else
7224 vsi->num_q_vectors = 1;
7225
7226 break;
7227
7228 case I40E_VSI_FDIR:
7229 vsi->alloc_queue_pairs = 1;
7230 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7231 I40E_REQ_DESCRIPTOR_MULTIPLE);
7232 vsi->num_q_vectors = pf->num_fdsb_msix;
7233 break;
7234
7235 case I40E_VSI_VMDQ2:
7236 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7237 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7238 I40E_REQ_DESCRIPTOR_MULTIPLE);
7239 vsi->num_q_vectors = pf->num_vmdq_msix;
7240 break;
7241
7242 case I40E_VSI_SRIOV:
7243 vsi->alloc_queue_pairs = pf->num_vf_qps;
7244 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7245 I40E_REQ_DESCRIPTOR_MULTIPLE);
7246 break;
7247
7248#ifdef I40E_FCOE
7249 case I40E_VSI_FCOE:
7250 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7251 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7252 I40E_REQ_DESCRIPTOR_MULTIPLE);
7253 vsi->num_q_vectors = pf->num_fcoe_msix;
7254 break;
7255
7256#endif
7257 default:
7258 WARN_ON(1);
7259 return -ENODATA;
7260 }
7261
7262 return 0;
7263}
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7274{
7275 int size;
7276 int ret = 0;
7277
7278
7279 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7280 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7281 if (!vsi->tx_rings)
7282 return -ENOMEM;
7283 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7284
7285 if (alloc_qvectors) {
7286
7287 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7288 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7289 if (!vsi->q_vectors) {
7290 ret = -ENOMEM;
7291 goto err_vectors;
7292 }
7293 }
7294 return ret;
7295
7296err_vectors:
7297 kfree(vsi->tx_rings);
7298 return ret;
7299}
7300
7301
7302
7303
7304
7305
7306
7307
7308
7309static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7310{
7311 int ret = -ENODEV;
7312 struct i40e_vsi *vsi;
7313 int vsi_idx;
7314 int i;
7315
7316
7317 mutex_lock(&pf->switch_mutex);
7318
7319
7320
7321
7322
7323
7324
7325 i = pf->next_vsi;
7326 while (i < pf->num_alloc_vsi && pf->vsi[i])
7327 i++;
7328 if (i >= pf->num_alloc_vsi) {
7329 i = 0;
7330 while (i < pf->next_vsi && pf->vsi[i])
7331 i++;
7332 }
7333
7334 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7335 vsi_idx = i;
7336 } else {
7337 ret = -ENODEV;
7338 goto unlock_pf;
7339 }
7340 pf->next_vsi = ++i;
7341
7342 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7343 if (!vsi) {
7344 ret = -ENOMEM;
7345 goto unlock_pf;
7346 }
7347 vsi->type = type;
7348 vsi->back = pf;
7349 set_bit(__I40E_DOWN, &vsi->state);
7350 vsi->flags = 0;
7351 vsi->idx = vsi_idx;
7352 vsi->int_rate_limit = 0;
7353 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7354 pf->rss_table_size : 64;
7355 vsi->netdev_registered = false;
7356 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7357 INIT_LIST_HEAD(&vsi->mac_filter_list);
7358 vsi->irqs_ready = false;
7359
7360 ret = i40e_set_num_rings_in_vsi(vsi);
7361 if (ret)
7362 goto err_rings;
7363
7364 ret = i40e_vsi_alloc_arrays(vsi, true);
7365 if (ret)
7366 goto err_rings;
7367
7368
7369 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7370
7371
7372 spin_lock_init(&vsi->mac_filter_list_lock);
7373 pf->vsi[vsi_idx] = vsi;
7374 ret = vsi_idx;
7375 goto unlock_pf;
7376
7377err_rings:
7378 pf->next_vsi = i - 1;
7379 kfree(vsi);
7380unlock_pf:
7381 mutex_unlock(&pf->switch_mutex);
7382 return ret;
7383}
7384
7385
7386
7387
7388
7389
7390
7391
7392
7393static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7394{
7395
7396 if (free_qvectors) {
7397 kfree(vsi->q_vectors);
7398 vsi->q_vectors = NULL;
7399 }
7400 kfree(vsi->tx_rings);
7401 vsi->tx_rings = NULL;
7402 vsi->rx_rings = NULL;
7403}
7404
7405
7406
7407
7408
7409
7410static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7411{
7412 if (!vsi)
7413 return;
7414
7415 kfree(vsi->rss_hkey_user);
7416 vsi->rss_hkey_user = NULL;
7417
7418 kfree(vsi->rss_lut_user);
7419 vsi->rss_lut_user = NULL;
7420}
7421
7422
7423
7424
7425
7426static int i40e_vsi_clear(struct i40e_vsi *vsi)
7427{
7428 struct i40e_pf *pf;
7429
7430 if (!vsi)
7431 return 0;
7432
7433 if (!vsi->back)
7434 goto free_vsi;
7435 pf = vsi->back;
7436
7437 mutex_lock(&pf->switch_mutex);
7438 if (!pf->vsi[vsi->idx]) {
7439 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7440 vsi->idx, vsi->idx, vsi, vsi->type);
7441 goto unlock_vsi;
7442 }
7443
7444 if (pf->vsi[vsi->idx] != vsi) {
7445 dev_err(&pf->pdev->dev,
7446 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7447 pf->vsi[vsi->idx]->idx,
7448 pf->vsi[vsi->idx],
7449 pf->vsi[vsi->idx]->type,
7450 vsi->idx, vsi, vsi->type);
7451 goto unlock_vsi;
7452 }
7453
7454
7455 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7456 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7457
7458 i40e_vsi_free_arrays(vsi, true);
7459 i40e_clear_rss_config_user(vsi);
7460
7461 pf->vsi[vsi->idx] = NULL;
7462 if (vsi->idx < pf->next_vsi)
7463 pf->next_vsi = vsi->idx;
7464
7465unlock_vsi:
7466 mutex_unlock(&pf->switch_mutex);
7467free_vsi:
7468 kfree(vsi);
7469
7470 return 0;
7471}
7472
7473
7474
7475
7476
7477static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7478{
7479 int i;
7480
7481 if (vsi->tx_rings && vsi->tx_rings[0]) {
7482 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7483 kfree_rcu(vsi->tx_rings[i], rcu);
7484 vsi->tx_rings[i] = NULL;
7485 vsi->rx_rings[i] = NULL;
7486 }
7487 }
7488}
7489
7490
7491
7492
7493
7494static int i40e_alloc_rings(struct i40e_vsi *vsi)
7495{
7496 struct i40e_ring *tx_ring, *rx_ring;
7497 struct i40e_pf *pf = vsi->back;
7498 int i;
7499
7500
7501 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7502
7503 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7504 if (!tx_ring)
7505 goto err_out;
7506
7507 tx_ring->queue_index = i;
7508 tx_ring->reg_idx = vsi->base_queue + i;
7509 tx_ring->ring_active = false;
7510 tx_ring->vsi = vsi;
7511 tx_ring->netdev = vsi->netdev;
7512 tx_ring->dev = &pf->pdev->dev;
7513 tx_ring->count = vsi->num_desc;
7514 tx_ring->size = 0;
7515 tx_ring->dcb_tc = 0;
7516 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7517 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7518 tx_ring->tx_itr_setting = pf->tx_itr_default;
7519 vsi->tx_rings[i] = tx_ring;
7520
7521 rx_ring = &tx_ring[1];
7522 rx_ring->queue_index = i;
7523 rx_ring->reg_idx = vsi->base_queue + i;
7524 rx_ring->ring_active = false;
7525 rx_ring->vsi = vsi;
7526 rx_ring->netdev = vsi->netdev;
7527 rx_ring->dev = &pf->pdev->dev;
7528 rx_ring->count = vsi->num_desc;
7529 rx_ring->size = 0;
7530 rx_ring->dcb_tc = 0;
7531 rx_ring->rx_itr_setting = pf->rx_itr_default;
7532 vsi->rx_rings[i] = rx_ring;
7533 }
7534
7535 return 0;
7536
7537err_out:
7538 i40e_vsi_clear_rings(vsi);
7539 return -ENOMEM;
7540}
7541
7542
7543
7544
7545
7546
7547
7548
7549static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7550{
7551 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7552 I40E_MIN_MSIX, vectors);
7553 if (vectors < 0) {
7554 dev_info(&pf->pdev->dev,
7555 "MSI-X vector reservation failed: %d\n", vectors);
7556 vectors = 0;
7557 }
7558
7559 return vectors;
7560}
7561
7562
7563
7564
7565
7566
7567
7568
7569
7570static int i40e_init_msix(struct i40e_pf *pf)
7571{
7572 struct i40e_hw *hw = &pf->hw;
7573 int vectors_left;
7574 int v_budget, i;
7575 int v_actual;
7576 int iwarp_requested = 0;
7577
7578 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7579 return -ENODEV;
7580
7581
7582
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599 vectors_left = hw->func_caps.num_msix_vectors;
7600 v_budget = 0;
7601
7602
7603 if (vectors_left) {
7604 v_budget++;
7605 vectors_left--;
7606 }
7607
7608
7609 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7610 vectors_left -= pf->num_lan_msix;
7611 v_budget += pf->num_lan_msix;
7612
7613
7614 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7615 if (vectors_left) {
7616 pf->num_fdsb_msix = 1;
7617 v_budget++;
7618 vectors_left--;
7619 } else {
7620 pf->num_fdsb_msix = 0;
7621 }
7622 }
7623
7624#ifdef I40E_FCOE
7625
7626 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7627 if (!vectors_left)
7628 pf->num_fcoe_msix = 0;
7629 else if (vectors_left >= pf->num_fcoe_qps)
7630 pf->num_fcoe_msix = pf->num_fcoe_qps;
7631 else
7632 pf->num_fcoe_msix = 1;
7633 v_budget += pf->num_fcoe_msix;
7634 vectors_left -= pf->num_fcoe_msix;
7635 }
7636
7637#endif
7638
7639 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7640 iwarp_requested = pf->num_iwarp_msix;
7641
7642 if (!vectors_left)
7643 pf->num_iwarp_msix = 0;
7644 else if (vectors_left < pf->num_iwarp_msix)
7645 pf->num_iwarp_msix = 1;
7646 v_budget += pf->num_iwarp_msix;
7647 vectors_left -= pf->num_iwarp_msix;
7648 }
7649
7650
7651 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7652 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7653 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7654
7655 if (!vectors_left) {
7656 pf->num_vmdq_msix = 0;
7657 pf->num_vmdq_qps = 0;
7658 } else {
7659
7660
7661
7662
7663
7664
7665 if (vmdq_vecs < vmdq_vecs_wanted)
7666 pf->num_vmdq_qps = 1;
7667 pf->num_vmdq_msix = pf->num_vmdq_qps;
7668
7669 v_budget += vmdq_vecs;
7670 vectors_left -= vmdq_vecs;
7671 }
7672 }
7673
7674 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7675 GFP_KERNEL);
7676 if (!pf->msix_entries)
7677 return -ENOMEM;
7678
7679 for (i = 0; i < v_budget; i++)
7680 pf->msix_entries[i].entry = i;
7681 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7682
7683 if (v_actual < I40E_MIN_MSIX) {
7684 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7685 kfree(pf->msix_entries);
7686 pf->msix_entries = NULL;
7687 pci_disable_msix(pf->pdev);
7688 return -ENODEV;
7689
7690 } else if (v_actual == I40E_MIN_MSIX) {
7691
7692 pf->num_vmdq_vsis = 0;
7693 pf->num_vmdq_qps = 0;
7694 pf->num_lan_qps = 1;
7695 pf->num_lan_msix = 1;
7696
7697 } else if (!vectors_left) {
7698
7699
7700
7701
7702
7703 int vec;
7704
7705 dev_info(&pf->pdev->dev,
7706 "MSI-X vector limit reached, attempting to redistribute vectors\n");
7707
7708 vec = v_actual - 1;
7709
7710
7711 pf->num_vmdq_msix = 1;
7712 pf->num_vmdq_vsis = 1;
7713 pf->num_vmdq_qps = 1;
7714#ifdef I40E_FCOE
7715 pf->num_fcoe_qps = 0;
7716 pf->num_fcoe_msix = 0;
7717#endif
7718
7719
7720 switch (vec) {
7721 case 2:
7722 pf->num_lan_msix = 1;
7723 break;
7724 case 3:
7725 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7726 pf->num_lan_msix = 1;
7727 pf->num_iwarp_msix = 1;
7728 } else {
7729 pf->num_lan_msix = 2;
7730 }
7731#ifdef I40E_FCOE
7732
7733 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7734 pf->num_lan_msix = 1;
7735 pf->num_fcoe_msix = 1;
7736 }
7737#endif
7738 break;
7739 default:
7740 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7741 pf->num_iwarp_msix = min_t(int, (vec / 3),
7742 iwarp_requested);
7743 pf->num_vmdq_vsis = min_t(int, (vec / 3),
7744 I40E_DEFAULT_NUM_VMDQ_VSI);
7745 } else {
7746 pf->num_vmdq_vsis = min_t(int, (vec / 2),
7747 I40E_DEFAULT_NUM_VMDQ_VSI);
7748 }
7749 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7750 pf->num_fdsb_msix = 1;
7751 vec--;
7752 }
7753 pf->num_lan_msix = min_t(int,
7754 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7755 pf->num_lan_msix);
7756 pf->num_lan_qps = pf->num_lan_msix;
7757#ifdef I40E_FCOE
7758
7759 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7760 pf->num_fcoe_msix = 1;
7761 vec--;
7762 }
7763#endif
7764 break;
7765 }
7766 }
7767
7768 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
7769 (pf->num_fdsb_msix == 0)) {
7770 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
7771 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7772 }
7773 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7774 (pf->num_vmdq_msix == 0)) {
7775 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7776 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7777 }
7778
7779 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7780 (pf->num_iwarp_msix == 0)) {
7781 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7782 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7783 }
7784#ifdef I40E_FCOE
7785
7786 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7787 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7788 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7789 }
7790#endif
7791 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
7792 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
7793 pf->num_lan_msix,
7794 pf->num_vmdq_msix * pf->num_vmdq_vsis,
7795 pf->num_fdsb_msix,
7796 pf->num_iwarp_msix);
7797
7798 return v_actual;
7799}
7800
7801
7802
7803
7804
7805
7806
7807
7808
7809static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
7810{
7811 struct i40e_q_vector *q_vector;
7812
7813
7814 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7815 if (!q_vector)
7816 return -ENOMEM;
7817
7818 q_vector->vsi = vsi;
7819 q_vector->v_idx = v_idx;
7820 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7821
7822 if (vsi->netdev)
7823 netif_napi_add(vsi->netdev, &q_vector->napi,
7824 i40e_napi_poll, NAPI_POLL_WEIGHT);
7825
7826 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7827 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7828
7829
7830 vsi->q_vectors[v_idx] = q_vector;
7831
7832 return 0;
7833}
7834
7835
7836
7837
7838
7839
7840
7841
7842static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7843{
7844 struct i40e_pf *pf = vsi->back;
7845 int err, v_idx, num_q_vectors, current_cpu;
7846
7847
7848 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7849 num_q_vectors = vsi->num_q_vectors;
7850 else if (vsi == pf->vsi[pf->lan_vsi])
7851 num_q_vectors = 1;
7852 else
7853 return -EINVAL;
7854
7855 current_cpu = cpumask_first(cpu_online_mask);
7856
7857 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7858 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
7859 if (err)
7860 goto err_out;
7861 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
7862 if (unlikely(current_cpu >= nr_cpu_ids))
7863 current_cpu = cpumask_first(cpu_online_mask);
7864 }
7865
7866 return 0;
7867
7868err_out:
7869 while (v_idx--)
7870 i40e_free_q_vector(vsi, v_idx);
7871
7872 return err;
7873}
7874
7875
7876
7877
7878
7879static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7880{
7881 int vectors = 0;
7882 ssize_t size;
7883
7884 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7885 vectors = i40e_init_msix(pf);
7886 if (vectors < 0) {
7887 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
7888 I40E_FLAG_IWARP_ENABLED |
7889#ifdef I40E_FCOE
7890 I40E_FLAG_FCOE_ENABLED |
7891#endif
7892 I40E_FLAG_RSS_ENABLED |
7893 I40E_FLAG_DCB_CAPABLE |
7894 I40E_FLAG_DCB_ENABLED |
7895 I40E_FLAG_SRIOV_ENABLED |
7896 I40E_FLAG_FD_SB_ENABLED |
7897 I40E_FLAG_FD_ATR_ENABLED |
7898 I40E_FLAG_VMDQ_ENABLED);
7899
7900
7901 i40e_determine_queue_usage(pf);
7902 }
7903 }
7904
7905 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7906 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7907 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7908 vectors = pci_enable_msi(pf->pdev);
7909 if (vectors < 0) {
7910 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7911 vectors);
7912 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7913 }
7914 vectors = 1;
7915 }
7916
7917 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7918 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7919
7920
7921 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7922 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7923 if (!pf->irq_pile) {
7924 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7925 return -ENOMEM;
7926 }
7927 pf->irq_pile->num_entries = vectors;
7928 pf->irq_pile->search_hint = 0;
7929
7930
7931 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7932
7933 return 0;
7934}
7935
7936
7937
7938
7939
7940
7941
7942
7943
7944static int i40e_setup_misc_vector(struct i40e_pf *pf)
7945{
7946 struct i40e_hw *hw = &pf->hw;
7947 int err = 0;
7948
7949
7950
7951
7952 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7953 err = request_irq(pf->msix_entries[0].vector,
7954 i40e_intr, 0, pf->int_name, pf);
7955 if (err) {
7956 dev_info(&pf->pdev->dev,
7957 "request_irq for %s failed: %d\n",
7958 pf->int_name, err);
7959 return -EFAULT;
7960 }
7961 }
7962
7963 i40e_enable_misc_int_causes(pf);
7964
7965
7966 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7967 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7968
7969 i40e_flush(hw);
7970
7971 i40e_irq_dynamic_enable_icr0(pf, true);
7972
7973 return err;
7974}
7975
7976
7977
7978
7979
7980
7981static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7982 u8 *lut, u16 lut_size)
7983{
7984 struct i40e_pf *pf = vsi->back;
7985 struct i40e_hw *hw = &pf->hw;
7986 int ret = 0;
7987
7988 if (seed) {
7989 struct i40e_aqc_get_set_rss_key_data *seed_dw =
7990 (struct i40e_aqc_get_set_rss_key_data *)seed;
7991 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
7992 if (ret) {
7993 dev_info(&pf->pdev->dev,
7994 "Cannot set RSS key, err %s aq_err %s\n",
7995 i40e_stat_str(hw, ret),
7996 i40e_aq_str(hw, hw->aq.asq_last_status));
7997 return ret;
7998 }
7999 }
8000 if (lut) {
8001 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8002
8003 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8004 if (ret) {
8005 dev_info(&pf->pdev->dev,
8006 "Cannot set RSS lut, err %s aq_err %s\n",
8007 i40e_stat_str(hw, ret),
8008 i40e_aq_str(hw, hw->aq.asq_last_status));
8009 return ret;
8010 }
8011 }
8012 return ret;
8013}
8014
8015
8016
8017
8018
8019
8020
8021
8022
8023
8024static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8025 u8 *lut, u16 lut_size)
8026{
8027 struct i40e_pf *pf = vsi->back;
8028 struct i40e_hw *hw = &pf->hw;
8029 int ret = 0;
8030
8031 if (seed) {
8032 ret = i40e_aq_get_rss_key(hw, vsi->id,
8033 (struct i40e_aqc_get_set_rss_key_data *)seed);
8034 if (ret) {
8035 dev_info(&pf->pdev->dev,
8036 "Cannot get RSS key, err %s aq_err %s\n",
8037 i40e_stat_str(&pf->hw, ret),
8038 i40e_aq_str(&pf->hw,
8039 pf->hw.aq.asq_last_status));
8040 return ret;
8041 }
8042 }
8043
8044 if (lut) {
8045 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8046
8047 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8048 if (ret) {
8049 dev_info(&pf->pdev->dev,
8050 "Cannot get RSS lut, err %s aq_err %s\n",
8051 i40e_stat_str(&pf->hw, ret),
8052 i40e_aq_str(&pf->hw,
8053 pf->hw.aq.asq_last_status));
8054 return ret;
8055 }
8056 }
8057
8058 return ret;
8059}
8060
8061
8062
8063
8064
8065static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8066{
8067 u8 seed[I40E_HKEY_ARRAY_SIZE];
8068 struct i40e_pf *pf = vsi->back;
8069 u8 *lut;
8070 int ret;
8071
8072 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
8073 return 0;
8074
8075 if (!vsi->rss_size)
8076 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8077 vsi->num_queue_pairs);
8078 if (!vsi->rss_size)
8079 return -EINVAL;
8080
8081 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8082 if (!lut)
8083 return -ENOMEM;
8084
8085
8086
8087 if (vsi->rss_lut_user)
8088 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8089 else
8090 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8091 if (vsi->rss_hkey_user)
8092 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8093 else
8094 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8095 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8096 kfree(lut);
8097
8098 return ret;
8099}
8100
8101
8102
8103
8104
8105
8106
8107
8108
8109
8110static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8111 const u8 *lut, u16 lut_size)
8112{
8113 struct i40e_pf *pf = vsi->back;
8114 struct i40e_hw *hw = &pf->hw;
8115 u16 vf_id = vsi->vf_id;
8116 u8 i;
8117
8118
8119 if (seed) {
8120 u32 *seed_dw = (u32 *)seed;
8121
8122 if (vsi->type == I40E_VSI_MAIN) {
8123 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8124 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
8125 seed_dw[i]);
8126 } else if (vsi->type == I40E_VSI_SRIOV) {
8127 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8128 i40e_write_rx_ctl(hw,
8129 I40E_VFQF_HKEY1(i, vf_id),
8130 seed_dw[i]);
8131 } else {
8132 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8133 }
8134 }
8135
8136 if (lut) {
8137 u32 *lut_dw = (u32 *)lut;
8138
8139 if (vsi->type == I40E_VSI_MAIN) {
8140 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8141 return -EINVAL;
8142 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8143 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8144 } else if (vsi->type == I40E_VSI_SRIOV) {
8145 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8146 return -EINVAL;
8147 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8148 i40e_write_rx_ctl(hw,
8149 I40E_VFQF_HLUT1(i, vf_id),
8150 lut_dw[i]);
8151 } else {
8152 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8153 }
8154 }
8155 i40e_flush(hw);
8156
8157 return 0;
8158}
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8170 u8 *lut, u16 lut_size)
8171{
8172 struct i40e_pf *pf = vsi->back;
8173 struct i40e_hw *hw = &pf->hw;
8174 u16 i;
8175
8176 if (seed) {
8177 u32 *seed_dw = (u32 *)seed;
8178
8179 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8180 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8181 }
8182 if (lut) {
8183 u32 *lut_dw = (u32 *)lut;
8184
8185 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8186 return -EINVAL;
8187 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8188 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8189 }
8190
8191 return 0;
8192}
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8204{
8205 struct i40e_pf *pf = vsi->back;
8206
8207 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8208 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8209 else
8210 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8211}
8212
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8223{
8224 struct i40e_pf *pf = vsi->back;
8225
8226 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8227 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8228 else
8229 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8230}
8231
8232
8233
8234
8235
8236
8237
8238
8239void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8240 u16 rss_table_size, u16 rss_size)
8241{
8242 u16 i;
8243
8244 for (i = 0; i < rss_table_size; i++)
8245 lut[i] = i % rss_size;
8246}
8247
8248
8249
8250
8251
8252static int i40e_pf_config_rss(struct i40e_pf *pf)
8253{
8254 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8255 u8 seed[I40E_HKEY_ARRAY_SIZE];
8256 u8 *lut;
8257 struct i40e_hw *hw = &pf->hw;
8258 u32 reg_val;
8259 u64 hena;
8260 int ret;
8261
8262
8263 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8264 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8265 hena |= i40e_pf_get_default_rss_hena(pf);
8266
8267 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8268 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8269
8270
8271 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8272 reg_val = (pf->rss_table_size == 512) ?
8273 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8274 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8275 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8276
8277
8278 if (!vsi->rss_size)
8279 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8280 vsi->num_queue_pairs);
8281 if (!vsi->rss_size)
8282 return -EINVAL;
8283
8284 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8285 if (!lut)
8286 return -ENOMEM;
8287
8288
8289 if (vsi->rss_lut_user)
8290 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8291 else
8292 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8293
8294
8295
8296
8297 if (vsi->rss_hkey_user)
8298 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8299 else
8300 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8301 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8302 kfree(lut);
8303
8304 return ret;
8305}
8306
8307
8308
8309
8310
8311
8312
8313
8314
8315int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8316{
8317 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8318 int new_rss_size;
8319
8320 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8321 return 0;
8322
8323 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8324
8325 if (queue_count != vsi->num_queue_pairs) {
8326 vsi->req_queue_pairs = queue_count;
8327 i40e_prep_for_reset(pf);
8328
8329 pf->alloc_rss_size = new_rss_size;
8330
8331 i40e_reset_and_rebuild(pf, true);
8332
8333
8334
8335
8336 if (queue_count < vsi->rss_size) {
8337 i40e_clear_rss_config_user(vsi);
8338 dev_dbg(&pf->pdev->dev,
8339 "discard user configured hash keys and lut\n");
8340 }
8341
8342
8343 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8344 vsi->num_queue_pairs);
8345
8346 i40e_pf_config_rss(pf);
8347 }
8348 dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n",
8349 pf->alloc_rss_size, pf->rss_size_max);
8350 return pf->alloc_rss_size;
8351}
8352
8353
8354
8355
8356
8357i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8358{
8359 i40e_status status;
8360 bool min_valid, max_valid;
8361 u32 max_bw, min_bw;
8362
8363 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8364 &min_valid, &max_valid);
8365
8366 if (!status) {
8367 if (min_valid)
8368 pf->npar_min_bw = min_bw;
8369 if (max_valid)
8370 pf->npar_max_bw = max_bw;
8371 }
8372
8373 return status;
8374}
8375
8376
8377
8378
8379
8380i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8381{
8382 struct i40e_aqc_configure_partition_bw_data bw_data;
8383 i40e_status status;
8384
8385
8386 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8387 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8388 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8389
8390
8391 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8392
8393 return status;
8394}
8395
8396
8397
8398
8399
8400i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8401{
8402
8403 enum i40e_admin_queue_err last_aq_status;
8404 i40e_status ret;
8405 u16 nvm_word;
8406
8407 if (pf->hw.partition_id != 1) {
8408 dev_info(&pf->pdev->dev,
8409 "Commit BW only works on partition 1! This is partition %d",
8410 pf->hw.partition_id);
8411 ret = I40E_NOT_SUPPORTED;
8412 goto bw_commit_out;
8413 }
8414
8415
8416 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8417 last_aq_status = pf->hw.aq.asq_last_status;
8418 if (ret) {
8419 dev_info(&pf->pdev->dev,
8420 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8421 i40e_stat_str(&pf->hw, ret),
8422 i40e_aq_str(&pf->hw, last_aq_status));
8423 goto bw_commit_out;
8424 }
8425
8426
8427 ret = i40e_aq_read_nvm(&pf->hw,
8428 I40E_SR_NVM_CONTROL_WORD,
8429 0x10, sizeof(nvm_word), &nvm_word,
8430 false, NULL);
8431
8432
8433
8434 last_aq_status = pf->hw.aq.asq_last_status;
8435 i40e_release_nvm(&pf->hw);
8436 if (ret) {
8437 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8438 i40e_stat_str(&pf->hw, ret),
8439 i40e_aq_str(&pf->hw, last_aq_status));
8440 goto bw_commit_out;
8441 }
8442
8443
8444 msleep(50);
8445
8446
8447 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8448 last_aq_status = pf->hw.aq.asq_last_status;
8449 if (ret) {
8450 dev_info(&pf->pdev->dev,
8451 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8452 i40e_stat_str(&pf->hw, ret),
8453 i40e_aq_str(&pf->hw, last_aq_status));
8454 goto bw_commit_out;
8455 }
8456
8457
8458
8459
8460 ret = i40e_aq_update_nvm(&pf->hw,
8461 I40E_SR_NVM_CONTROL_WORD,
8462 0x10, sizeof(nvm_word),
8463 &nvm_word, true, NULL);
8464
8465
8466
8467 last_aq_status = pf->hw.aq.asq_last_status;
8468 i40e_release_nvm(&pf->hw);
8469 if (ret)
8470 dev_info(&pf->pdev->dev,
8471 "BW settings NOT SAVED, err %s aq_err %s\n",
8472 i40e_stat_str(&pf->hw, ret),
8473 i40e_aq_str(&pf->hw, last_aq_status));
8474bw_commit_out:
8475
8476 return ret;
8477}
8478
8479
8480
8481
8482
8483
8484
8485
8486
8487static int i40e_sw_init(struct i40e_pf *pf)
8488{
8489 int err = 0;
8490 int size;
8491
8492 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
8493 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
8494 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
8495 if (I40E_DEBUG_USER & debug)
8496 pf->hw.debug_mask = debug;
8497 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
8498 I40E_DEFAULT_MSG_ENABLE);
8499 }
8500
8501
8502 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8503 I40E_FLAG_MSI_ENABLED |
8504 I40E_FLAG_MSIX_ENABLED;
8505
8506
8507 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8508 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8509
8510
8511
8512
8513 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8514 pf->alloc_rss_size = 1;
8515 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8516 pf->rss_size_max = min_t(int, pf->rss_size_max,
8517 pf->hw.func_caps.num_tx_qp);
8518 if (pf->hw.func_caps.rss) {
8519 pf->flags |= I40E_FLAG_RSS_ENABLED;
8520 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8521 num_online_cpus());
8522 }
8523
8524
8525 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8526 pf->flags |= I40E_FLAG_MFP_ENABLED;
8527 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8528 if (i40e_get_npar_bw_setting(pf))
8529 dev_warn(&pf->pdev->dev,
8530 "Could not get NPAR bw settings\n");
8531 else
8532 dev_info(&pf->pdev->dev,
8533 "Min BW = %8.8x, Max BW = %8.8x\n",
8534 pf->npar_min_bw, pf->npar_max_bw);
8535 }
8536
8537
8538 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8539 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8540 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8541 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8542 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8543 pf->hw.num_partitions > 1)
8544 dev_info(&pf->pdev->dev,
8545 "Flow Director Sideband mode Disabled in MFP mode\n");
8546 else
8547 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8548 pf->fdir_pf_filter_count =
8549 pf->hw.func_caps.fd_filters_guaranteed;
8550 pf->hw.fdir_shared_filter_count =
8551 pf->hw.func_caps.fd_filters_best_effort;
8552 }
8553
8554 if (i40e_is_mac_710(&pf->hw) &&
8555 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8556 (pf->hw.aq.fw_maj_ver < 4))) {
8557 pf->flags |= I40E_FLAG_RESTART_AUTONEG;
8558
8559 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8560 }
8561
8562
8563 if (i40e_is_mac_710(&pf->hw) &&
8564 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8565 (pf->hw.aq.fw_maj_ver < 4)))
8566 pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8567
8568
8569 if (i40e_is_mac_710(&pf->hw) &&
8570 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8571 (pf->hw.aq.fw_maj_ver >= 5)))
8572 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
8573
8574 if (pf->hw.func_caps.vmdq) {
8575 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8576 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8577 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8578 }
8579
8580 if (pf->hw.func_caps.iwarp) {
8581 pf->flags |= I40E_FLAG_IWARP_ENABLED;
8582
8583 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8584 }
8585
8586#ifdef I40E_FCOE
8587 i40e_init_pf_fcoe(pf);
8588
8589#endif
8590#ifdef CONFIG_PCI_IOV
8591 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8592 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8593 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8594 pf->num_req_vfs = min_t(int,
8595 pf->hw.func_caps.num_vfs,
8596 I40E_MAX_VF_COUNT);
8597 }
8598#endif
8599 if (pf->hw.mac.type == I40E_MAC_X722) {
8600 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8601 I40E_FLAG_128_QP_RSS_CAPABLE |
8602 I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8603 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8604 I40E_FLAG_WB_ON_ITR_CAPABLE |
8605 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8606 I40E_FLAG_NO_PCI_LINK_CHECK |
8607 I40E_FLAG_USE_SET_LLDP_MIB |
8608 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8609 } else if ((pf->hw.aq.api_maj_ver > 1) ||
8610 ((pf->hw.aq.api_maj_ver == 1) &&
8611 (pf->hw.aq.api_min_ver > 4))) {
8612
8613 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8614 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8615 } else {
8616 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8617 }
8618
8619 pf->eeprom_version = 0xDEAD;
8620 pf->lan_veb = I40E_NO_VEB;
8621 pf->lan_vsi = I40E_NO_VSI;
8622
8623
8624 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8625
8626
8627 size = sizeof(struct i40e_lump_tracking)
8628 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8629 pf->qp_pile = kzalloc(size, GFP_KERNEL);
8630 if (!pf->qp_pile) {
8631 err = -ENOMEM;
8632 goto sw_init_done;
8633 }
8634 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8635 pf->qp_pile->search_hint = 0;
8636
8637 pf->tx_timeout_recovery_level = 1;
8638
8639 mutex_init(&pf->switch_mutex);
8640
8641
8642 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8643 i40e_set_npar_bw_setting(pf);
8644
8645sw_init_done:
8646 return err;
8647}
8648
8649
8650
8651
8652
8653
8654
8655
8656bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8657{
8658 bool need_reset = false;
8659
8660
8661
8662
8663 if (features & NETIF_F_NTUPLE) {
8664
8665 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8666 need_reset = true;
8667
8668 if (pf->num_fdsb_msix > 0)
8669 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8670 } else {
8671
8672 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8673 need_reset = true;
8674 i40e_fdir_filter_exit(pf);
8675 }
8676 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8677 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8678
8679 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8680 pf->fdir_pf_active_filters = 0;
8681
8682 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8683 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
8684 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8685 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8686 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8687 }
8688 }
8689 return need_reset;
8690}
8691
8692
8693
8694
8695
8696static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
8697{
8698 struct i40e_pf *pf = vsi->back;
8699 struct i40e_hw *hw = &pf->hw;
8700 u16 vf_id = vsi->vf_id;
8701 u8 i;
8702
8703 if (vsi->type == I40E_VSI_MAIN) {
8704 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8705 wr32(hw, I40E_PFQF_HLUT(i), 0);
8706 } else if (vsi->type == I40E_VSI_SRIOV) {
8707 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8708 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
8709 } else {
8710 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8711 }
8712}
8713
8714
8715
8716
8717
8718
8719static int i40e_set_features(struct net_device *netdev,
8720 netdev_features_t features)
8721{
8722 struct i40e_netdev_priv *np = netdev_priv(netdev);
8723 struct i40e_vsi *vsi = np->vsi;
8724 struct i40e_pf *pf = vsi->back;
8725 bool need_reset;
8726
8727 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
8728 i40e_pf_config_rss(pf);
8729 else if (!(features & NETIF_F_RXHASH) &&
8730 netdev->features & NETIF_F_RXHASH)
8731 i40e_clear_rss_lut(vsi);
8732
8733 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8734 i40e_vlan_stripping_enable(vsi);
8735 else
8736 i40e_vlan_stripping_disable(vsi);
8737
8738 need_reset = i40e_set_ntuple(pf, features);
8739
8740 if (need_reset)
8741 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8742
8743 return 0;
8744}
8745
8746
8747
8748
8749
8750
8751
8752
8753static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8754{
8755 u8 i;
8756
8757 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8758 if (pf->udp_ports[i].index == port)
8759 return i;
8760 }
8761
8762 return i;
8763}
8764
8765
8766
8767
8768
8769
8770static void i40e_udp_tunnel_add(struct net_device *netdev,
8771 struct udp_tunnel_info *ti)
8772{
8773 struct i40e_netdev_priv *np = netdev_priv(netdev);
8774 struct i40e_vsi *vsi = np->vsi;
8775 struct i40e_pf *pf = vsi->back;
8776 __be16 port = ti->port;
8777 u8 next_idx;
8778 u8 idx;
8779
8780 idx = i40e_get_udp_port_idx(pf, port);
8781
8782
8783 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8784 netdev_info(netdev, "port %d already offloaded\n",
8785 ntohs(port));
8786 return;
8787 }
8788
8789
8790 next_idx = i40e_get_udp_port_idx(pf, 0);
8791
8792 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8793 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
8794 ntohs(port));
8795 return;
8796 }
8797
8798 switch (ti->type) {
8799 case UDP_TUNNEL_TYPE_VXLAN:
8800 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8801 break;
8802 case UDP_TUNNEL_TYPE_GENEVE:
8803 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8804 return;
8805 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8806 break;
8807 default:
8808 return;
8809 }
8810
8811
8812 pf->udp_ports[next_idx].index = port;
8813 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8814 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8815}
8816
8817
8818
8819
8820
8821
8822static void i40e_udp_tunnel_del(struct net_device *netdev,
8823 struct udp_tunnel_info *ti)
8824{
8825 struct i40e_netdev_priv *np = netdev_priv(netdev);
8826 struct i40e_vsi *vsi = np->vsi;
8827 struct i40e_pf *pf = vsi->back;
8828 __be16 port = ti->port;
8829 u8 idx;
8830
8831 idx = i40e_get_udp_port_idx(pf, port);
8832
8833
8834 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8835 goto not_found;
8836
8837 switch (ti->type) {
8838 case UDP_TUNNEL_TYPE_VXLAN:
8839 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8840 goto not_found;
8841 break;
8842 case UDP_TUNNEL_TYPE_GENEVE:
8843 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8844 goto not_found;
8845 break;
8846 default:
8847 goto not_found;
8848 }
8849
8850
8851
8852
8853 pf->udp_ports[idx].index = 0;
8854 pf->pending_udp_bitmap |= BIT_ULL(idx);
8855 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8856
8857 return;
8858not_found:
8859 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8860 ntohs(port));
8861}
8862
8863static int i40e_get_phys_port_id(struct net_device *netdev,
8864 struct netdev_phys_item_id *ppid)
8865{
8866 struct i40e_netdev_priv *np = netdev_priv(netdev);
8867 struct i40e_pf *pf = np->vsi->back;
8868 struct i40e_hw *hw = &pf->hw;
8869
8870 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8871 return -EOPNOTSUPP;
8872
8873 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8874 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8875
8876 return 0;
8877}
8878
8879
8880
8881
8882
8883
8884
8885
8886
8887static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8888 struct net_device *dev,
8889 const unsigned char *addr, u16 vid,
8890 u16 flags)
8891{
8892 struct i40e_netdev_priv *np = netdev_priv(dev);
8893 struct i40e_pf *pf = np->vsi->back;
8894 int err = 0;
8895
8896 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8897 return -EOPNOTSUPP;
8898
8899 if (vid) {
8900 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8901 return -EINVAL;
8902 }
8903
8904
8905
8906
8907 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8908 netdev_info(dev, "FDB only supports static addresses\n");
8909 return -EINVAL;
8910 }
8911
8912 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8913 err = dev_uc_add_excl(dev, addr);
8914 else if (is_multicast_ether_addr(addr))
8915 err = dev_mc_add_excl(dev, addr);
8916 else
8917 err = -EINVAL;
8918
8919
8920 if (err == -EEXIST && !(flags & NLM_F_EXCL))
8921 err = 0;
8922
8923 return err;
8924}
8925
8926
8927
8928
8929
8930
8931
8932
8933
8934
8935
8936
8937
8938static int i40e_ndo_bridge_setlink(struct net_device *dev,
8939 struct nlmsghdr *nlh,
8940 u16 flags)
8941{
8942 struct i40e_netdev_priv *np = netdev_priv(dev);
8943 struct i40e_vsi *vsi = np->vsi;
8944 struct i40e_pf *pf = vsi->back;
8945 struct i40e_veb *veb = NULL;
8946 struct nlattr *attr, *br_spec;
8947 int i, rem;
8948
8949
8950 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8951 return -EOPNOTSUPP;
8952
8953
8954 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8955 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8956 veb = pf->veb[i];
8957 }
8958
8959 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8960
8961 nla_for_each_nested(attr, br_spec, rem) {
8962 __u16 mode;
8963
8964 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8965 continue;
8966
8967 mode = nla_get_u16(attr);
8968 if ((mode != BRIDGE_MODE_VEPA) &&
8969 (mode != BRIDGE_MODE_VEB))
8970 return -EINVAL;
8971
8972
8973 if (!veb) {
8974 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8975 vsi->tc_config.enabled_tc);
8976 if (veb) {
8977 veb->bridge_mode = mode;
8978 i40e_config_bridge_mode(veb);
8979 } else {
8980
8981 return -ENOENT;
8982 }
8983 break;
8984 } else if (mode != veb->bridge_mode) {
8985
8986 veb->bridge_mode = mode;
8987
8988 if (mode == BRIDGE_MODE_VEB)
8989 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8990 else
8991 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8992 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8993 break;
8994 }
8995 }
8996
8997 return 0;
8998}
8999
9000
9001
9002
9003
9004
9005
9006
9007
9008
9009
9010
9011
9012static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9013 struct net_device *dev,
9014 u32 __always_unused filter_mask,
9015 int nlflags)
9016{
9017 struct i40e_netdev_priv *np = netdev_priv(dev);
9018 struct i40e_vsi *vsi = np->vsi;
9019 struct i40e_pf *pf = vsi->back;
9020 struct i40e_veb *veb = NULL;
9021 int i;
9022
9023
9024 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9025 return -EOPNOTSUPP;
9026
9027
9028 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9029 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9030 veb = pf->veb[i];
9031 }
9032
9033 if (!veb)
9034 return 0;
9035
9036 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9037 0, 0, nlflags, filter_mask, NULL);
9038}
9039
9040
9041
9042
9043#define I40E_MAX_TUNNEL_HDR_LEN 128
9044
9045
9046
9047
9048
9049
9050static netdev_features_t i40e_features_check(struct sk_buff *skb,
9051 struct net_device *dev,
9052 netdev_features_t features)
9053{
9054 if (skb->encapsulation &&
9055 ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
9056 I40E_MAX_TUNNEL_HDR_LEN))
9057 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9058
9059 return features;
9060}
9061
9062static const struct net_device_ops i40e_netdev_ops = {
9063 .ndo_open = i40e_open,
9064 .ndo_stop = i40e_close,
9065 .ndo_start_xmit = i40e_lan_xmit_frame,
9066 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9067 .ndo_set_rx_mode = i40e_set_rx_mode,
9068 .ndo_validate_addr = eth_validate_addr,
9069 .ndo_set_mac_address = i40e_set_mac,
9070 .ndo_change_mtu = i40e_change_mtu,
9071 .ndo_do_ioctl = i40e_ioctl,
9072 .ndo_tx_timeout = i40e_tx_timeout,
9073 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9074 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9075#ifdef CONFIG_NET_POLL_CONTROLLER
9076 .ndo_poll_controller = i40e_netpoll,
9077#endif
9078 .ndo_setup_tc = __i40e_setup_tc,
9079#ifdef I40E_FCOE
9080 .ndo_fcoe_enable = i40e_fcoe_enable,
9081 .ndo_fcoe_disable = i40e_fcoe_disable,
9082#endif
9083 .ndo_set_features = i40e_set_features,
9084 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9085 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
9086 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
9087 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9088 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9089 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9090 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9091 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9092 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
9093 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9094 .ndo_fdb_add = i40e_ndo_fdb_add,
9095 .ndo_features_check = i40e_features_check,
9096 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9097 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
9098};
9099
9100
9101
9102
9103
9104
9105
9106static int i40e_config_netdev(struct i40e_vsi *vsi)
9107{
9108 struct i40e_pf *pf = vsi->back;
9109 struct i40e_hw *hw = &pf->hw;
9110 struct i40e_netdev_priv *np;
9111 struct net_device *netdev;
9112 u8 mac_addr[ETH_ALEN];
9113 int etherdev_size;
9114
9115 etherdev_size = sizeof(struct i40e_netdev_priv);
9116 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9117 if (!netdev)
9118 return -ENOMEM;
9119
9120 vsi->netdev = netdev;
9121 np = netdev_priv(netdev);
9122 np->vsi = vsi;
9123
9124 netdev->hw_enc_features |= NETIF_F_SG |
9125 NETIF_F_IP_CSUM |
9126 NETIF_F_IPV6_CSUM |
9127 NETIF_F_HIGHDMA |
9128 NETIF_F_SOFT_FEATURES |
9129 NETIF_F_TSO |
9130 NETIF_F_TSO_ECN |
9131 NETIF_F_TSO6 |
9132 NETIF_F_GSO_GRE |
9133 NETIF_F_GSO_GRE_CSUM |
9134 NETIF_F_GSO_IPXIP4 |
9135 NETIF_F_GSO_IPXIP6 |
9136 NETIF_F_GSO_UDP_TUNNEL |
9137 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9138 NETIF_F_GSO_PARTIAL |
9139 NETIF_F_SCTP_CRC |
9140 NETIF_F_RXHASH |
9141 NETIF_F_RXCSUM |
9142 0;
9143
9144 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
9145 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9146
9147 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9148
9149
9150 netdev->vlan_features |= netdev->hw_enc_features |
9151 NETIF_F_TSO_MANGLEID;
9152
9153 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9154 netdev->hw_features |= NETIF_F_NTUPLE;
9155
9156 netdev->hw_features |= netdev->hw_enc_features |
9157 NETIF_F_HW_VLAN_CTAG_TX |
9158 NETIF_F_HW_VLAN_CTAG_RX;
9159
9160 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9161 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9162
9163 if (vsi->type == I40E_VSI_MAIN) {
9164 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9165 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9166
9167
9168
9169
9170
9171 i40e_rm_default_mac_filter(vsi, mac_addr);
9172 spin_lock_bh(&vsi->mac_filter_list_lock);
9173 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
9174 spin_unlock_bh(&vsi->mac_filter_list_lock);
9175 } else {
9176
9177 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9178 pf->vsi[pf->lan_vsi]->netdev->name);
9179 random_ether_addr(mac_addr);
9180
9181 spin_lock_bh(&vsi->mac_filter_list_lock);
9182 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
9183 spin_unlock_bh(&vsi->mac_filter_list_lock);
9184 }
9185
9186 ether_addr_copy(netdev->dev_addr, mac_addr);
9187 ether_addr_copy(netdev->perm_addr, mac_addr);
9188
9189 netdev->priv_flags |= IFF_UNICAST_FLT;
9190 netdev->priv_flags |= IFF_SUPP_NOFCS;
9191
9192 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9193
9194 netdev->netdev_ops = &i40e_netdev_ops;
9195 netdev->watchdog_timeo = 5 * HZ;
9196 i40e_set_ethtool_ops(netdev);
9197#ifdef I40E_FCOE
9198 i40e_fcoe_config_netdev(netdev, vsi);
9199#endif
9200
9201 return 0;
9202}
9203
9204
9205
9206
9207
9208
9209
9210static void i40e_vsi_delete(struct i40e_vsi *vsi)
9211{
9212
9213 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9214 return;
9215
9216 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9217}
9218
9219
9220
9221
9222
9223
9224
9225int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9226{
9227 struct i40e_veb *veb;
9228 struct i40e_pf *pf = vsi->back;
9229
9230
9231 if (vsi->veb_idx == I40E_NO_VEB)
9232 return 1;
9233
9234 veb = pf->veb[vsi->veb_idx];
9235 if (!veb) {
9236 dev_info(&pf->pdev->dev,
9237 "There is no veb associated with the bridge\n");
9238 return -ENOENT;
9239 }
9240
9241
9242 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9243 return 0;
9244 } else {
9245
9246 return 1;
9247 }
9248
9249
9250 return 0;
9251}
9252
9253
9254
9255
9256
9257
9258
9259
9260static int i40e_add_vsi(struct i40e_vsi *vsi)
9261{
9262 int ret = -ENODEV;
9263 i40e_status aq_ret = 0;
9264 struct i40e_pf *pf = vsi->back;
9265 struct i40e_hw *hw = &pf->hw;
9266 struct i40e_vsi_context ctxt;
9267 struct i40e_mac_filter *f, *ftmp;
9268
9269 u8 enabled_tc = 0x1;
9270 int f_count = 0;
9271
9272 memset(&ctxt, 0, sizeof(ctxt));
9273 switch (vsi->type) {
9274 case I40E_VSI_MAIN:
9275
9276
9277
9278
9279
9280 ctxt.seid = pf->main_vsi_seid;
9281 ctxt.pf_num = pf->hw.pf_id;
9282 ctxt.vf_num = 0;
9283 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9284 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9285 if (ret) {
9286 dev_info(&pf->pdev->dev,
9287 "couldn't get PF vsi config, err %s aq_err %s\n",
9288 i40e_stat_str(&pf->hw, ret),
9289 i40e_aq_str(&pf->hw,
9290 pf->hw.aq.asq_last_status));
9291 return -ENOENT;
9292 }
9293 vsi->info = ctxt.info;
9294 vsi->info.valid_sections = 0;
9295
9296 vsi->seid = ctxt.seid;
9297 vsi->id = ctxt.vsi_number;
9298
9299 enabled_tc = i40e_pf_get_tc_map(pf);
9300
9301
9302 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9303 !(pf->hw.func_caps.iscsi)) {
9304 memset(&ctxt, 0, sizeof(ctxt));
9305 ctxt.seid = pf->main_vsi_seid;
9306 ctxt.pf_num = pf->hw.pf_id;
9307 ctxt.vf_num = 0;
9308 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9309 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9310 if (ret) {
9311 dev_info(&pf->pdev->dev,
9312 "update vsi failed, err %s aq_err %s\n",
9313 i40e_stat_str(&pf->hw, ret),
9314 i40e_aq_str(&pf->hw,
9315 pf->hw.aq.asq_last_status));
9316 ret = -ENOENT;
9317 goto err;
9318 }
9319
9320 i40e_vsi_update_queue_map(vsi, &ctxt);
9321 vsi->info.valid_sections = 0;
9322 } else {
9323
9324
9325
9326
9327
9328
9329 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9330 if (ret) {
9331 dev_info(&pf->pdev->dev,
9332 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9333 enabled_tc,
9334 i40e_stat_str(&pf->hw, ret),
9335 i40e_aq_str(&pf->hw,
9336 pf->hw.aq.asq_last_status));
9337 ret = -ENOENT;
9338 }
9339 }
9340 break;
9341
9342 case I40E_VSI_FDIR:
9343 ctxt.pf_num = hw->pf_id;
9344 ctxt.vf_num = 0;
9345 ctxt.uplink_seid = vsi->uplink_seid;
9346 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9347 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9348 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9349 (i40e_is_vsi_uplink_mode_veb(vsi))) {
9350 ctxt.info.valid_sections |=
9351 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9352 ctxt.info.switch_id =
9353 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9354 }
9355 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9356 break;
9357
9358 case I40E_VSI_VMDQ2:
9359 ctxt.pf_num = hw->pf_id;
9360 ctxt.vf_num = 0;
9361 ctxt.uplink_seid = vsi->uplink_seid;
9362 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9363 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9364
9365
9366
9367
9368 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9369 ctxt.info.valid_sections |=
9370 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9371 ctxt.info.switch_id =
9372 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9373 }
9374
9375
9376 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9377 break;
9378
9379 case I40E_VSI_SRIOV:
9380 ctxt.pf_num = hw->pf_id;
9381 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9382 ctxt.uplink_seid = vsi->uplink_seid;
9383 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9384 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9385
9386
9387
9388
9389 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9390 ctxt.info.valid_sections |=
9391 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9392 ctxt.info.switch_id =
9393 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9394 }
9395
9396 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9397 ctxt.info.valid_sections |=
9398 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9399 ctxt.info.queueing_opt_flags |=
9400 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9401 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9402 }
9403
9404 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9405 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9406 if (pf->vf[vsi->vf_id].spoofchk) {
9407 ctxt.info.valid_sections |=
9408 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9409 ctxt.info.sec_flags |=
9410 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9411 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9412 }
9413
9414 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9415 break;
9416
9417#ifdef I40E_FCOE
9418 case I40E_VSI_FCOE:
9419 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9420 if (ret) {
9421 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9422 return ret;
9423 }
9424 break;
9425
9426#endif
9427 case I40E_VSI_IWARP:
9428
9429 break;
9430
9431 default:
9432 return -ENODEV;
9433 }
9434
9435 if (vsi->type != I40E_VSI_MAIN) {
9436 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9437 if (ret) {
9438 dev_info(&vsi->back->pdev->dev,
9439 "add vsi failed, err %s aq_err %s\n",
9440 i40e_stat_str(&pf->hw, ret),
9441 i40e_aq_str(&pf->hw,
9442 pf->hw.aq.asq_last_status));
9443 ret = -ENOENT;
9444 goto err;
9445 }
9446 vsi->info = ctxt.info;
9447 vsi->info.valid_sections = 0;
9448 vsi->seid = ctxt.seid;
9449 vsi->id = ctxt.vsi_number;
9450 }
9451
9452 if (vsi->type != I40E_VSI_FDIR) {
9453 aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
9454 if (aq_ret) {
9455 ret = i40e_aq_rc_to_posix(aq_ret,
9456 hw->aq.asq_last_status);
9457 dev_info(&pf->pdev->dev,
9458 "set brdcast promisc failed, err %s, aq_err %s\n",
9459 i40e_stat_str(hw, aq_ret),
9460 i40e_aq_str(hw, hw->aq.asq_last_status));
9461 }
9462 }
9463
9464 vsi->active_filters = 0;
9465 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
9466 spin_lock_bh(&vsi->mac_filter_list_lock);
9467
9468 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
9469 f->state = I40E_FILTER_NEW;
9470 f_count++;
9471 }
9472 spin_unlock_bh(&vsi->mac_filter_list_lock);
9473
9474 if (f_count) {
9475 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9476 pf->flags |= I40E_FLAG_FILTER_SYNC;
9477 }
9478
9479
9480 ret = i40e_vsi_get_bw_info(vsi);
9481 if (ret) {
9482 dev_info(&pf->pdev->dev,
9483 "couldn't get vsi bw info, err %s aq_err %s\n",
9484 i40e_stat_str(&pf->hw, ret),
9485 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9486
9487 ret = 0;
9488 }
9489
9490err:
9491 return ret;
9492}
9493
9494
9495
9496
9497
9498
9499
9500int i40e_vsi_release(struct i40e_vsi *vsi)
9501{
9502 struct i40e_mac_filter *f, *ftmp;
9503 struct i40e_veb *veb = NULL;
9504 struct i40e_pf *pf;
9505 u16 uplink_seid;
9506 int i, n;
9507
9508 pf = vsi->back;
9509
9510
9511 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9512 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9513 vsi->seid, vsi->uplink_seid);
9514 return -ENODEV;
9515 }
9516 if (vsi == pf->vsi[pf->lan_vsi] &&
9517 !test_bit(__I40E_DOWN, &pf->state)) {
9518 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9519 return -ENODEV;
9520 }
9521
9522 uplink_seid = vsi->uplink_seid;
9523 if (vsi->type != I40E_VSI_SRIOV) {
9524 if (vsi->netdev_registered) {
9525 vsi->netdev_registered = false;
9526 if (vsi->netdev) {
9527
9528 unregister_netdev(vsi->netdev);
9529 }
9530 } else {
9531 i40e_vsi_close(vsi);
9532 }
9533 i40e_vsi_disable_irq(vsi);
9534 }
9535
9536 spin_lock_bh(&vsi->mac_filter_list_lock);
9537 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
9538 i40e_del_filter(vsi, f->macaddr, f->vlan,
9539 f->is_vf, f->is_netdev);
9540 spin_unlock_bh(&vsi->mac_filter_list_lock);
9541
9542 i40e_sync_vsi_filters(vsi);
9543
9544 i40e_vsi_delete(vsi);
9545 i40e_vsi_free_q_vectors(vsi);
9546 if (vsi->netdev) {
9547 free_netdev(vsi->netdev);
9548 vsi->netdev = NULL;
9549 }
9550 i40e_vsi_clear_rings(vsi);
9551 i40e_vsi_clear(vsi);
9552
9553
9554
9555
9556
9557
9558
9559
9560
9561 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9562 if (pf->vsi[i] &&
9563 pf->vsi[i]->uplink_seid == uplink_seid &&
9564 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9565 n++;
9566 }
9567 }
9568 for (i = 0; i < I40E_MAX_VEB; i++) {
9569 if (!pf->veb[i])
9570 continue;
9571 if (pf->veb[i]->uplink_seid == uplink_seid)
9572 n++;
9573 if (pf->veb[i]->seid == uplink_seid)
9574 veb = pf->veb[i];
9575 }
9576 if (n == 0 && veb && veb->uplink_seid != 0)
9577 i40e_veb_release(veb);
9578
9579 return 0;
9580}
9581
9582
9583
9584
9585
9586
9587
9588
9589
9590
9591
9592static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9593{
9594 int ret = -ENOENT;
9595 struct i40e_pf *pf = vsi->back;
9596
9597 if (vsi->q_vectors[0]) {
9598 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9599 vsi->seid);
9600 return -EEXIST;
9601 }
9602
9603 if (vsi->base_vector) {
9604 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9605 vsi->seid, vsi->base_vector);
9606 return -EEXIST;
9607 }
9608
9609 ret = i40e_vsi_alloc_q_vectors(vsi);
9610 if (ret) {
9611 dev_info(&pf->pdev->dev,
9612 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9613 vsi->num_q_vectors, vsi->seid, ret);
9614 vsi->num_q_vectors = 0;
9615 goto vector_setup_out;
9616 }
9617
9618
9619
9620
9621 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9622 return ret;
9623 if (vsi->num_q_vectors)
9624 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9625 vsi->num_q_vectors, vsi->idx);
9626 if (vsi->base_vector < 0) {
9627 dev_info(&pf->pdev->dev,
9628 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9629 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9630 i40e_vsi_free_q_vectors(vsi);
9631 ret = -ENOENT;
9632 goto vector_setup_out;
9633 }
9634
9635vector_setup_out:
9636 return ret;
9637}
9638
9639
9640
9641
9642
9643
9644
9645
9646
9647
9648static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9649{
9650 struct i40e_pf *pf;
9651 u8 enabled_tc;
9652 int ret;
9653
9654 if (!vsi)
9655 return NULL;
9656
9657 pf = vsi->back;
9658
9659 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9660 i40e_vsi_clear_rings(vsi);
9661
9662 i40e_vsi_free_arrays(vsi, false);
9663 i40e_set_num_rings_in_vsi(vsi);
9664 ret = i40e_vsi_alloc_arrays(vsi, false);
9665 if (ret)
9666 goto err_vsi;
9667
9668 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9669 if (ret < 0) {
9670 dev_info(&pf->pdev->dev,
9671 "failed to get tracking for %d queues for VSI %d err %d\n",
9672 vsi->alloc_queue_pairs, vsi->seid, ret);
9673 goto err_vsi;
9674 }
9675 vsi->base_queue = ret;
9676
9677
9678
9679
9680 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9681 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9682 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9683 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9684 if (vsi->type == I40E_VSI_MAIN)
9685 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
9686
9687
9688 ret = i40e_alloc_rings(vsi);
9689 if (ret)
9690 goto err_rings;
9691
9692
9693 i40e_vsi_map_rings_to_vectors(vsi);
9694 return vsi;
9695
9696err_rings:
9697 i40e_vsi_free_q_vectors(vsi);
9698 if (vsi->netdev_registered) {
9699 vsi->netdev_registered = false;
9700 unregister_netdev(vsi->netdev);
9701 free_netdev(vsi->netdev);
9702 vsi->netdev = NULL;
9703 }
9704 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9705err_vsi:
9706 i40e_vsi_clear(vsi);
9707 return NULL;
9708}
9709
9710
9711
9712
9713
9714
9715
9716
9717
9718
9719
9720
9721
9722
9723struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9724 u16 uplink_seid, u32 param1)
9725{
9726 struct i40e_vsi *vsi = NULL;
9727 struct i40e_veb *veb = NULL;
9728 int ret, i;
9729 int v_idx;
9730
9731
9732
9733
9734
9735
9736
9737
9738
9739
9740
9741
9742
9743
9744 for (i = 0; i < I40E_MAX_VEB; i++) {
9745 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9746 veb = pf->veb[i];
9747 break;
9748 }
9749 }
9750
9751 if (!veb && uplink_seid != pf->mac_seid) {
9752
9753 for (i = 0; i < pf->num_alloc_vsi; i++) {
9754 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9755 vsi = pf->vsi[i];
9756 break;
9757 }
9758 }
9759 if (!vsi) {
9760 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9761 uplink_seid);
9762 return NULL;
9763 }
9764
9765 if (vsi->uplink_seid == pf->mac_seid)
9766 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9767 vsi->tc_config.enabled_tc);
9768 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9769 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9770 vsi->tc_config.enabled_tc);
9771 if (veb) {
9772 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9773 dev_info(&vsi->back->pdev->dev,
9774 "New VSI creation error, uplink seid of LAN VSI expected.\n");
9775 return NULL;
9776 }
9777
9778
9779
9780
9781 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9782 veb->bridge_mode = BRIDGE_MODE_VEPA;
9783 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9784 }
9785 i40e_config_bridge_mode(veb);
9786 }
9787 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9788 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9789 veb = pf->veb[i];
9790 }
9791 if (!veb) {
9792 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9793 return NULL;
9794 }
9795
9796 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9797 uplink_seid = veb->seid;
9798 }
9799
9800
9801 v_idx = i40e_vsi_mem_alloc(pf, type);
9802 if (v_idx < 0)
9803 goto err_alloc;
9804 vsi = pf->vsi[v_idx];
9805 if (!vsi)
9806 goto err_alloc;
9807 vsi->type = type;
9808 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9809
9810 if (type == I40E_VSI_MAIN)
9811 pf->lan_vsi = v_idx;
9812 else if (type == I40E_VSI_SRIOV)
9813 vsi->vf_id = param1;
9814
9815 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9816 vsi->idx);
9817 if (ret < 0) {
9818 dev_info(&pf->pdev->dev,
9819 "failed to get tracking for %d queues for VSI %d err=%d\n",
9820 vsi->alloc_queue_pairs, vsi->seid, ret);
9821 goto err_vsi;
9822 }
9823 vsi->base_queue = ret;
9824
9825
9826 vsi->uplink_seid = uplink_seid;
9827 ret = i40e_add_vsi(vsi);
9828 if (ret)
9829 goto err_vsi;
9830
9831 switch (vsi->type) {
9832
9833 case I40E_VSI_MAIN:
9834
9835
9836
9837 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9838 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9839 if (ret) {
9840 dev_warn(&pf->pdev->dev,
9841 "could not set up macaddr; err %d\n",
9842 ret);
9843 }
9844 }
9845 case I40E_VSI_VMDQ2:
9846 case I40E_VSI_FCOE:
9847 ret = i40e_config_netdev(vsi);
9848 if (ret)
9849 goto err_netdev;
9850 ret = register_netdev(vsi->netdev);
9851 if (ret)
9852 goto err_netdev;
9853 vsi->netdev_registered = true;
9854 netif_carrier_off(vsi->netdev);
9855#ifdef CONFIG_I40E_DCB
9856
9857 i40e_dcbnl_setup(vsi);
9858#endif
9859
9860
9861 case I40E_VSI_FDIR:
9862
9863 ret = i40e_vsi_setup_vectors(vsi);
9864 if (ret)
9865 goto err_msix;
9866
9867 ret = i40e_alloc_rings(vsi);
9868 if (ret)
9869 goto err_rings;
9870
9871
9872 i40e_vsi_map_rings_to_vectors(vsi);
9873
9874 i40e_vsi_reset_stats(vsi);
9875 break;
9876
9877 default:
9878
9879 break;
9880 }
9881
9882 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9883 (vsi->type == I40E_VSI_VMDQ2)) {
9884 ret = i40e_vsi_config_rss(vsi);
9885 }
9886 return vsi;
9887
9888err_rings:
9889 i40e_vsi_free_q_vectors(vsi);
9890err_msix:
9891 if (vsi->netdev_registered) {
9892 vsi->netdev_registered = false;
9893 unregister_netdev(vsi->netdev);
9894 free_netdev(vsi->netdev);
9895 vsi->netdev = NULL;
9896 }
9897err_netdev:
9898 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9899err_vsi:
9900 i40e_vsi_clear(vsi);
9901err_alloc:
9902 return NULL;
9903}
9904
9905
9906
9907
9908
9909
9910
9911static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9912{
9913 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9914 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9915 struct i40e_pf *pf = veb->pf;
9916 struct i40e_hw *hw = &pf->hw;
9917 u32 tc_bw_max;
9918 int ret = 0;
9919 int i;
9920
9921 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9922 &bw_data, NULL);
9923 if (ret) {
9924 dev_info(&pf->pdev->dev,
9925 "query veb bw config failed, err %s aq_err %s\n",
9926 i40e_stat_str(&pf->hw, ret),
9927 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9928 goto out;
9929 }
9930
9931 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9932 &ets_data, NULL);
9933 if (ret) {
9934 dev_info(&pf->pdev->dev,
9935 "query veb bw ets config failed, err %s aq_err %s\n",
9936 i40e_stat_str(&pf->hw, ret),
9937 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9938 goto out;
9939 }
9940
9941 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9942 veb->bw_max_quanta = ets_data.tc_bw_max;
9943 veb->is_abs_credits = bw_data.absolute_credits_enable;
9944 veb->enabled_tc = ets_data.tc_valid_bits;
9945 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9946 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9947 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9948 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9949 veb->bw_tc_limit_credits[i] =
9950 le16_to_cpu(bw_data.tc_bw_limits[i]);
9951 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9952 }
9953
9954out:
9955 return ret;
9956}
9957
9958
9959
9960
9961
9962
9963
9964
9965static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9966{
9967 int ret = -ENOENT;
9968 struct i40e_veb *veb;
9969 int i;
9970
9971
9972 mutex_lock(&pf->switch_mutex);
9973
9974
9975
9976
9977
9978
9979
9980 i = 0;
9981 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9982 i++;
9983 if (i >= I40E_MAX_VEB) {
9984 ret = -ENOMEM;
9985 goto err_alloc_veb;
9986 }
9987
9988 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9989 if (!veb) {
9990 ret = -ENOMEM;
9991 goto err_alloc_veb;
9992 }
9993 veb->pf = pf;
9994 veb->idx = i;
9995 veb->enabled_tc = 1;
9996
9997 pf->veb[i] = veb;
9998 ret = i;
9999err_alloc_veb:
10000 mutex_unlock(&pf->switch_mutex);
10001 return ret;
10002}
10003
10004
10005
10006
10007
10008
10009
10010
10011static void i40e_switch_branch_release(struct i40e_veb *branch)
10012{
10013 struct i40e_pf *pf = branch->pf;
10014 u16 branch_seid = branch->seid;
10015 u16 veb_idx = branch->idx;
10016 int i;
10017
10018
10019 for (i = 0; i < I40E_MAX_VEB; i++) {
10020 if (!pf->veb[i])
10021 continue;
10022 if (pf->veb[i]->uplink_seid == branch->seid)
10023 i40e_switch_branch_release(pf->veb[i]);
10024 }
10025
10026
10027
10028
10029
10030
10031 for (i = 0; i < pf->num_alloc_vsi; i++) {
10032 if (!pf->vsi[i])
10033 continue;
10034 if (pf->vsi[i]->uplink_seid == branch_seid &&
10035 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10036 i40e_vsi_release(pf->vsi[i]);
10037 }
10038 }
10039
10040
10041
10042
10043
10044
10045 if (pf->veb[veb_idx])
10046 i40e_veb_release(pf->veb[veb_idx]);
10047}
10048
10049
10050
10051
10052
10053static void i40e_veb_clear(struct i40e_veb *veb)
10054{
10055 if (!veb)
10056 return;
10057
10058 if (veb->pf) {
10059 struct i40e_pf *pf = veb->pf;
10060
10061 mutex_lock(&pf->switch_mutex);
10062 if (pf->veb[veb->idx] == veb)
10063 pf->veb[veb->idx] = NULL;
10064 mutex_unlock(&pf->switch_mutex);
10065 }
10066
10067 kfree(veb);
10068}
10069
10070
10071
10072
10073
10074void i40e_veb_release(struct i40e_veb *veb)
10075{
10076 struct i40e_vsi *vsi = NULL;
10077 struct i40e_pf *pf;
10078 int i, n = 0;
10079
10080 pf = veb->pf;
10081
10082
10083 for (i = 0; i < pf->num_alloc_vsi; i++) {
10084 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10085 n++;
10086 vsi = pf->vsi[i];
10087 }
10088 }
10089 if (n != 1) {
10090 dev_info(&pf->pdev->dev,
10091 "can't remove VEB %d with %d VSIs left\n",
10092 veb->seid, n);
10093 return;
10094 }
10095
10096
10097 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10098 if (veb->uplink_seid) {
10099 vsi->uplink_seid = veb->uplink_seid;
10100 if (veb->uplink_seid == pf->mac_seid)
10101 vsi->veb_idx = I40E_NO_VEB;
10102 else
10103 vsi->veb_idx = veb->veb_idx;
10104 } else {
10105
10106 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10107 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10108 }
10109
10110 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10111 i40e_veb_clear(veb);
10112}
10113
10114
10115
10116
10117
10118
10119static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10120{
10121 struct i40e_pf *pf = veb->pf;
10122 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10123 int ret;
10124
10125 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10126 veb->enabled_tc, false,
10127 &veb->seid, enable_stats, NULL);
10128
10129
10130 if (ret) {
10131 dev_info(&pf->pdev->dev,
10132 "couldn't add VEB, err %s aq_err %s\n",
10133 i40e_stat_str(&pf->hw, ret),
10134 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10135 return -EPERM;
10136 }
10137
10138
10139 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10140 &veb->stats_idx, NULL, NULL, NULL);
10141 if (ret) {
10142 dev_info(&pf->pdev->dev,
10143 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10144 i40e_stat_str(&pf->hw, ret),
10145 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10146 return -EPERM;
10147 }
10148 ret = i40e_veb_get_bw_info(veb);
10149 if (ret) {
10150 dev_info(&pf->pdev->dev,
10151 "couldn't get VEB bw info, err %s aq_err %s\n",
10152 i40e_stat_str(&pf->hw, ret),
10153 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10154 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10155 return -ENOENT;
10156 }
10157
10158 vsi->uplink_seid = veb->seid;
10159 vsi->veb_idx = veb->idx;
10160 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10161
10162 return 0;
10163}
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178
10179
10180
10181struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10182 u16 uplink_seid, u16 vsi_seid,
10183 u8 enabled_tc)
10184{
10185 struct i40e_veb *veb, *uplink_veb = NULL;
10186 int vsi_idx, veb_idx;
10187 int ret;
10188
10189
10190 if ((uplink_seid == 0 || vsi_seid == 0) &&
10191 (uplink_seid + vsi_seid != 0)) {
10192 dev_info(&pf->pdev->dev,
10193 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10194 uplink_seid, vsi_seid);
10195 return NULL;
10196 }
10197
10198
10199 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10200 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10201 break;
10202 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10203 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10204 vsi_seid);
10205 return NULL;
10206 }
10207
10208 if (uplink_seid && uplink_seid != pf->mac_seid) {
10209 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10210 if (pf->veb[veb_idx] &&
10211 pf->veb[veb_idx]->seid == uplink_seid) {
10212 uplink_veb = pf->veb[veb_idx];
10213 break;
10214 }
10215 }
10216 if (!uplink_veb) {
10217 dev_info(&pf->pdev->dev,
10218 "uplink seid %d not found\n", uplink_seid);
10219 return NULL;
10220 }
10221 }
10222
10223
10224 veb_idx = i40e_veb_mem_alloc(pf);
10225 if (veb_idx < 0)
10226 goto err_alloc;
10227 veb = pf->veb[veb_idx];
10228 veb->flags = flags;
10229 veb->uplink_seid = uplink_seid;
10230 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10231 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10232
10233
10234 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10235 if (ret)
10236 goto err_veb;
10237 if (vsi_idx == pf->lan_vsi)
10238 pf->lan_veb = veb->idx;
10239
10240 return veb;
10241
10242err_veb:
10243 i40e_veb_clear(veb);
10244err_alloc:
10245 return NULL;
10246}
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10258 struct i40e_aqc_switch_config_element_resp *ele,
10259 u16 num_reported, bool printconfig)
10260{
10261 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10262 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10263 u8 element_type = ele->element_type;
10264 u16 seid = le16_to_cpu(ele->seid);
10265
10266 if (printconfig)
10267 dev_info(&pf->pdev->dev,
10268 "type=%d seid=%d uplink=%d downlink=%d\n",
10269 element_type, seid, uplink_seid, downlink_seid);
10270
10271 switch (element_type) {
10272 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10273 pf->mac_seid = seid;
10274 break;
10275 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10276
10277 if (uplink_seid != pf->mac_seid)
10278 break;
10279 if (pf->lan_veb == I40E_NO_VEB) {
10280 int v;
10281
10282
10283 for (v = 0; v < I40E_MAX_VEB; v++) {
10284 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10285 pf->lan_veb = v;
10286 break;
10287 }
10288 }
10289 if (pf->lan_veb == I40E_NO_VEB) {
10290 v = i40e_veb_mem_alloc(pf);
10291 if (v < 0)
10292 break;
10293 pf->lan_veb = v;
10294 }
10295 }
10296
10297 pf->veb[pf->lan_veb]->seid = seid;
10298 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10299 pf->veb[pf->lan_veb]->pf = pf;
10300 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10301 break;
10302 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10303 if (num_reported != 1)
10304 break;
10305
10306
10307
10308 pf->mac_seid = uplink_seid;
10309 pf->pf_seid = downlink_seid;
10310 pf->main_vsi_seid = seid;
10311 if (printconfig)
10312 dev_info(&pf->pdev->dev,
10313 "pf_seid=%d main_vsi_seid=%d\n",
10314 pf->pf_seid, pf->main_vsi_seid);
10315 break;
10316 case I40E_SWITCH_ELEMENT_TYPE_PF:
10317 case I40E_SWITCH_ELEMENT_TYPE_VF:
10318 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10319 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10320 case I40E_SWITCH_ELEMENT_TYPE_PE:
10321 case I40E_SWITCH_ELEMENT_TYPE_PA:
10322
10323 break;
10324 default:
10325 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10326 element_type, seid);
10327 break;
10328 }
10329}
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10340{
10341 struct i40e_aqc_get_switch_config_resp *sw_config;
10342 u16 next_seid = 0;
10343 int ret = 0;
10344 u8 *aq_buf;
10345 int i;
10346
10347 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10348 if (!aq_buf)
10349 return -ENOMEM;
10350
10351 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10352 do {
10353 u16 num_reported, num_total;
10354
10355 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10356 I40E_AQ_LARGE_BUF,
10357 &next_seid, NULL);
10358 if (ret) {
10359 dev_info(&pf->pdev->dev,
10360 "get switch config failed err %s aq_err %s\n",
10361 i40e_stat_str(&pf->hw, ret),
10362 i40e_aq_str(&pf->hw,
10363 pf->hw.aq.asq_last_status));
10364 kfree(aq_buf);
10365 return -ENOENT;
10366 }
10367
10368 num_reported = le16_to_cpu(sw_config->header.num_reported);
10369 num_total = le16_to_cpu(sw_config->header.num_total);
10370
10371 if (printconfig)
10372 dev_info(&pf->pdev->dev,
10373 "header: %d reported %d total\n",
10374 num_reported, num_total);
10375
10376 for (i = 0; i < num_reported; i++) {
10377 struct i40e_aqc_switch_config_element_resp *ele =
10378 &sw_config->element[i];
10379
10380 i40e_setup_pf_switch_element(pf, ele, num_reported,
10381 printconfig);
10382 }
10383 } while (next_seid != 0);
10384
10385 kfree(aq_buf);
10386 return ret;
10387}
10388
10389
10390
10391
10392
10393
10394
10395
10396static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10397{
10398 u16 flags = 0;
10399 int ret;
10400
10401
10402 ret = i40e_fetch_switch_configuration(pf, false);
10403 if (ret) {
10404 dev_info(&pf->pdev->dev,
10405 "couldn't fetch switch config, err %s aq_err %s\n",
10406 i40e_stat_str(&pf->hw, ret),
10407 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10408 return ret;
10409 }
10410 i40e_pf_reset_stats(pf);
10411
10412
10413
10414
10415
10416
10417
10418 if ((pf->hw.pf_id == 0) &&
10419 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10420 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10421
10422 if (pf->hw.pf_id == 0) {
10423 u16 valid_flags;
10424
10425 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10426 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10427 NULL);
10428 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10429 dev_info(&pf->pdev->dev,
10430 "couldn't set switch config bits, err %s aq_err %s\n",
10431 i40e_stat_str(&pf->hw, ret),
10432 i40e_aq_str(&pf->hw,
10433 pf->hw.aq.asq_last_status));
10434
10435 }
10436 }
10437
10438
10439 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10440 struct i40e_vsi *vsi = NULL;
10441 u16 uplink_seid;
10442
10443
10444
10445
10446 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10447 uplink_seid = pf->veb[pf->lan_veb]->seid;
10448 else
10449 uplink_seid = pf->mac_seid;
10450 if (pf->lan_vsi == I40E_NO_VSI)
10451 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10452 else if (reinit)
10453 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10454 if (!vsi) {
10455 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10456 i40e_fdir_teardown(pf);
10457 return -EAGAIN;
10458 }
10459 } else {
10460
10461 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10462
10463 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10464 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10465 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10466 }
10467 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10468
10469 i40e_fdir_sb_setup(pf);
10470
10471
10472 ret = i40e_setup_pf_filter_control(pf);
10473 if (ret) {
10474 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10475 ret);
10476
10477 }
10478
10479
10480
10481
10482 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10483 i40e_pf_config_rss(pf);
10484
10485
10486 i40e_update_link_info(&pf->hw);
10487 i40e_link_event(pf);
10488
10489
10490 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10491 I40E_AQ_AN_COMPLETED) ? true : false);
10492
10493 i40e_ptp_init(pf);
10494
10495 return ret;
10496}
10497
10498
10499
10500
10501
10502static void i40e_determine_queue_usage(struct i40e_pf *pf)
10503{
10504 int queues_left;
10505
10506 pf->num_lan_qps = 0;
10507#ifdef I40E_FCOE
10508 pf->num_fcoe_qps = 0;
10509#endif
10510
10511
10512
10513
10514
10515 queues_left = pf->hw.func_caps.num_tx_qp;
10516
10517 if ((queues_left == 1) ||
10518 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10519
10520 queues_left = 0;
10521 pf->alloc_rss_size = pf->num_lan_qps = 1;
10522
10523
10524 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10525 I40E_FLAG_IWARP_ENABLED |
10526#ifdef I40E_FCOE
10527 I40E_FLAG_FCOE_ENABLED |
10528#endif
10529 I40E_FLAG_FD_SB_ENABLED |
10530 I40E_FLAG_FD_ATR_ENABLED |
10531 I40E_FLAG_DCB_CAPABLE |
10532 I40E_FLAG_DCB_ENABLED |
10533 I40E_FLAG_SRIOV_ENABLED |
10534 I40E_FLAG_VMDQ_ENABLED);
10535 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10536 I40E_FLAG_FD_SB_ENABLED |
10537 I40E_FLAG_FD_ATR_ENABLED |
10538 I40E_FLAG_DCB_CAPABLE))) {
10539
10540 pf->alloc_rss_size = pf->num_lan_qps = 1;
10541 queues_left -= pf->num_lan_qps;
10542
10543 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10544 I40E_FLAG_IWARP_ENABLED |
10545#ifdef I40E_FCOE
10546 I40E_FLAG_FCOE_ENABLED |
10547#endif
10548 I40E_FLAG_FD_SB_ENABLED |
10549 I40E_FLAG_FD_ATR_ENABLED |
10550 I40E_FLAG_DCB_ENABLED |
10551 I40E_FLAG_VMDQ_ENABLED);
10552 } else {
10553
10554 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10555 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10556 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10557 I40E_FLAG_DCB_ENABLED);
10558 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10559 }
10560 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10561 num_online_cpus());
10562 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10563 pf->hw.func_caps.num_tx_qp);
10564
10565 queues_left -= pf->num_lan_qps;
10566 }
10567
10568#ifdef I40E_FCOE
10569 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10570 if (I40E_DEFAULT_FCOE <= queues_left) {
10571 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10572 } else if (I40E_MINIMUM_FCOE <= queues_left) {
10573 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10574 } else {
10575 pf->num_fcoe_qps = 0;
10576 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10577 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10578 }
10579
10580 queues_left -= pf->num_fcoe_qps;
10581 }
10582
10583#endif
10584 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10585 if (queues_left > 1) {
10586 queues_left -= 1;
10587 } else {
10588 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10589 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10590 }
10591 }
10592
10593 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10594 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10595 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10596 (queues_left / pf->num_vf_qps));
10597 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10598 }
10599
10600 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10601 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10602 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10603 (queues_left / pf->num_vmdq_qps));
10604 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10605 }
10606
10607 pf->queues_left = queues_left;
10608 dev_dbg(&pf->pdev->dev,
10609 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10610 pf->hw.func_caps.num_tx_qp,
10611 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10612 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10613 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10614 queues_left);
10615#ifdef I40E_FCOE
10616 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
10617#endif
10618}
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10632{
10633 struct i40e_filter_control_settings *settings = &pf->filter_settings;
10634
10635 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10636
10637
10638 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10639 settings->enable_fdir = true;
10640
10641
10642 settings->enable_ethtype = true;
10643 settings->enable_macvlan = true;
10644
10645 if (i40e_set_filter_control(&pf->hw, settings))
10646 return -ENOENT;
10647
10648 return 0;
10649}
10650
10651#define INFO_STRING_LEN 255
10652#define REMAIN(__x) (INFO_STRING_LEN - (__x))
10653static void i40e_print_features(struct i40e_pf *pf)
10654{
10655 struct i40e_hw *hw = &pf->hw;
10656 char *buf;
10657 int i;
10658
10659 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10660 if (!buf)
10661 return;
10662
10663 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
10664#ifdef CONFIG_PCI_IOV
10665 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
10666#endif
10667 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
10668 pf->hw.func_caps.num_vsis,
10669 pf->vsi[pf->lan_vsi]->num_queue_pairs);
10670 if (pf->flags & I40E_FLAG_RSS_ENABLED)
10671 i += snprintf(&buf[i], REMAIN(i), " RSS");
10672 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10673 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
10674 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10675 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10676 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
10677 }
10678 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10679 i += snprintf(&buf[i], REMAIN(i), " DCB");
10680 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10681 i += snprintf(&buf[i], REMAIN(i), " Geneve");
10682 if (pf->flags & I40E_FLAG_PTP)
10683 i += snprintf(&buf[i], REMAIN(i), " PTP");
10684#ifdef I40E_FCOE
10685 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
10686 i += snprintf(&buf[i], REMAIN(i), " FCOE");
10687#endif
10688 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10689 i += snprintf(&buf[i], REMAIN(i), " VEB");
10690 else
10691 i += snprintf(&buf[i], REMAIN(i), " VEPA");
10692
10693 dev_info(&pf->pdev->dev, "%s\n", buf);
10694 kfree(buf);
10695 WARN_ON(i > INFO_STRING_LEN);
10696}
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10710{
10711 pf->flags &= ~I40E_FLAG_PF_MAC;
10712 if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
10713 pf->flags |= I40E_FLAG_PF_MAC;
10714}
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10728{
10729 struct i40e_aq_get_phy_abilities_resp abilities;
10730 struct i40e_pf *pf;
10731 struct i40e_hw *hw;
10732 static u16 pfs_found;
10733 u16 wol_nvm_bits;
10734 u16 link_status;
10735 int err;
10736 u32 val;
10737 u32 i;
10738 u8 set_fc_aq_fail;
10739
10740 err = pci_enable_device_mem(pdev);
10741 if (err)
10742 return err;
10743
10744
10745 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10746 if (err) {
10747 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10748 if (err) {
10749 dev_err(&pdev->dev,
10750 "DMA configuration failed: 0x%x\n", err);
10751 goto err_dma;
10752 }
10753 }
10754
10755
10756 err = pci_request_mem_regions(pdev, i40e_driver_name);
10757 if (err) {
10758 dev_info(&pdev->dev,
10759 "pci_request_selected_regions failed %d\n", err);
10760 goto err_pci_reg;
10761 }
10762
10763 pci_enable_pcie_error_reporting(pdev);
10764 pci_set_master(pdev);
10765
10766
10767
10768
10769
10770
10771 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10772 if (!pf) {
10773 err = -ENOMEM;
10774 goto err_pf_alloc;
10775 }
10776 pf->next_vsi = 0;
10777 pf->pdev = pdev;
10778 set_bit(__I40E_DOWN, &pf->state);
10779
10780 hw = &pf->hw;
10781 hw->back = pf;
10782
10783 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10784 I40E_MAX_CSR_SPACE);
10785
10786 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10787 if (!hw->hw_addr) {
10788 err = -EIO;
10789 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10790 (unsigned int)pci_resource_start(pdev, 0),
10791 pf->ioremap_len, err);
10792 goto err_ioremap;
10793 }
10794 hw->vendor_id = pdev->vendor;
10795 hw->device_id = pdev->device;
10796 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10797 hw->subsystem_vendor_id = pdev->subsystem_vendor;
10798 hw->subsystem_device_id = pdev->subsystem_device;
10799 hw->bus.device = PCI_SLOT(pdev->devfn);
10800 hw->bus.func = PCI_FUNC(pdev->devfn);
10801 pf->instance = pfs_found;
10802
10803
10804
10805
10806 mutex_init(&hw->aq.asq_mutex);
10807 mutex_init(&hw->aq.arq_mutex);
10808
10809 if (debug != -1) {
10810 pf->msg_enable = pf->hw.debug_mask;
10811 pf->msg_enable = debug;
10812 }
10813
10814
10815 if (hw->revision_id == 0 &&
10816 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10817 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10818 i40e_flush(hw);
10819 msleep(200);
10820 pf->corer_count++;
10821
10822 i40e_clear_pxe_mode(hw);
10823 }
10824
10825
10826 i40e_clear_hw(hw);
10827 err = i40e_pf_reset(hw);
10828 if (err) {
10829 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10830 goto err_pf_reset;
10831 }
10832 pf->pfr_count++;
10833
10834 hw->aq.num_arq_entries = I40E_AQ_LEN;
10835 hw->aq.num_asq_entries = I40E_AQ_LEN;
10836 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10837 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10838 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10839
10840 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10841 "%s-%s:misc",
10842 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10843
10844 err = i40e_init_shared_code(hw);
10845 if (err) {
10846 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10847 err);
10848 goto err_pf_reset;
10849 }
10850
10851
10852 pf->hw.fc.requested_mode = I40E_FC_NONE;
10853
10854 err = i40e_init_adminq(hw);
10855 if (err) {
10856 if (err == I40E_ERR_FIRMWARE_API_VERSION)
10857 dev_info(&pdev->dev,
10858 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10859 else
10860 dev_info(&pdev->dev,
10861 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10862
10863 goto err_pf_reset;
10864 }
10865
10866
10867 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10868 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10869 hw->aq.api_maj_ver, hw->aq.api_min_ver,
10870 i40e_nvm_version_str(hw));
10871
10872 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10873 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10874 dev_info(&pdev->dev,
10875 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10876 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10877 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10878 dev_info(&pdev->dev,
10879 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10880
10881 i40e_verify_eeprom(pf);
10882
10883
10884 if (hw->revision_id < 1)
10885 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10886
10887 i40e_clear_pxe_mode(hw);
10888 err = i40e_get_capabilities(pf);
10889 if (err)
10890 goto err_adminq_setup;
10891
10892 err = i40e_sw_init(pf);
10893 if (err) {
10894 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10895 goto err_sw_init;
10896 }
10897
10898 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10899 hw->func_caps.num_rx_qp,
10900 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10901 if (err) {
10902 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10903 goto err_init_lan_hmc;
10904 }
10905
10906 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10907 if (err) {
10908 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10909 err = -ENOENT;
10910 goto err_configure_lan_hmc;
10911 }
10912
10913
10914
10915
10916
10917 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
10918 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10919 i40e_aq_stop_lldp(hw, true, NULL);
10920 }
10921
10922 i40e_get_mac_addr(hw, hw->mac.addr);
10923
10924 i40e_get_platform_mac_addr(pdev, pf);
10925 if (!is_valid_ether_addr(hw->mac.addr)) {
10926 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10927 err = -EIO;
10928 goto err_mac_addr;
10929 }
10930 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10931 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10932 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10933 if (is_valid_ether_addr(hw->mac.port_addr))
10934 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10935#ifdef I40E_FCOE
10936 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10937 if (err)
10938 dev_info(&pdev->dev,
10939 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10940 if (!is_valid_ether_addr(hw->mac.san_addr)) {
10941 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10942 hw->mac.san_addr);
10943 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10944 }
10945 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10946#endif
10947
10948 pci_set_drvdata(pdev, pf);
10949 pci_save_state(pdev);
10950#ifdef CONFIG_I40E_DCB
10951 err = i40e_init_pf_dcb(pf);
10952 if (err) {
10953 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10954 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
10955
10956 }
10957#endif
10958
10959
10960 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10961 pf->service_timer_period = HZ;
10962
10963 INIT_WORK(&pf->service_task, i40e_service_task);
10964 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10965 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10966
10967
10968 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
10969 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
10970 pf->wol_en = false;
10971 else
10972 pf->wol_en = true;
10973 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10974
10975
10976 i40e_determine_queue_usage(pf);
10977 err = i40e_init_interrupt_scheme(pf);
10978 if (err)
10979 goto err_switch_setup;
10980
10981
10982
10983
10984
10985
10986 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10987 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10988 else
10989 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10990
10991
10992 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
10993 GFP_KERNEL);
10994 if (!pf->vsi) {
10995 err = -ENOMEM;
10996 goto err_switch_setup;
10997 }
10998
10999#ifdef CONFIG_PCI_IOV
11000
11001 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11002 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11003 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11004 if (pci_num_vf(pdev))
11005 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11006 }
11007#endif
11008 err = i40e_setup_pf_switch(pf, false);
11009 if (err) {
11010 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11011 goto err_vsis;
11012 }
11013
11014
11015 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11016 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11017 dev_dbg(&pf->pdev->dev,
11018 "Set fc with err %s aq_err %s on get_phy_cap\n",
11019 i40e_stat_str(hw, err),
11020 i40e_aq_str(hw, hw->aq.asq_last_status));
11021 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11022 dev_dbg(&pf->pdev->dev,
11023 "Set fc with err %s aq_err %s on set_phy_config\n",
11024 i40e_stat_str(hw, err),
11025 i40e_aq_str(hw, hw->aq.asq_last_status));
11026 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11027 dev_dbg(&pf->pdev->dev,
11028 "Set fc with err %s aq_err %s on get_link_info\n",
11029 i40e_stat_str(hw, err),
11030 i40e_aq_str(hw, hw->aq.asq_last_status));
11031
11032
11033 for (i = 0; i < pf->num_alloc_vsi; i++) {
11034 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11035 i40e_vsi_open(pf->vsi[i]);
11036 break;
11037 }
11038 }
11039
11040
11041
11042
11043 err = i40e_aq_set_phy_int_mask(&pf->hw,
11044 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11045 I40E_AQ_EVENT_MEDIA_NA |
11046 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11047 if (err)
11048 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11049 i40e_stat_str(&pf->hw, err),
11050 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11051
11052
11053
11054
11055
11056 val = rd32(hw, I40E_REG_MSS);
11057 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11058 val &= ~I40E_REG_MSS_MIN_MASK;
11059 val |= I40E_64BYTE_MSS;
11060 wr32(hw, I40E_REG_MSS, val);
11061 }
11062
11063 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
11064 msleep(75);
11065 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11066 if (err)
11067 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11068 i40e_stat_str(&pf->hw, err),
11069 i40e_aq_str(&pf->hw,
11070 pf->hw.aq.asq_last_status));
11071 }
11072
11073
11074
11075
11076 clear_bit(__I40E_DOWN, &pf->state);
11077
11078
11079
11080
11081
11082
11083 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11084 err = i40e_setup_misc_vector(pf);
11085 if (err) {
11086 dev_info(&pdev->dev,
11087 "setup of misc vector failed: %d\n", err);
11088 goto err_vsis;
11089 }
11090 }
11091
11092#ifdef CONFIG_PCI_IOV
11093
11094 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11095 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11096 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11097
11098 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11099 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11100 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11101 i40e_flush(hw);
11102
11103 if (pci_num_vf(pdev)) {
11104 dev_info(&pdev->dev,
11105 "Active VFs found, allocating resources.\n");
11106 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11107 if (err)
11108 dev_info(&pdev->dev,
11109 "Error %d allocating resources for existing VFs\n",
11110 err);
11111 }
11112 }
11113#endif
11114
11115 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11116 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11117 pf->num_iwarp_msix,
11118 I40E_IWARP_IRQ_PILE_ID);
11119 if (pf->iwarp_base_vector < 0) {
11120 dev_info(&pdev->dev,
11121 "failed to get tracking for %d vectors for IWARP err=%d\n",
11122 pf->num_iwarp_msix, pf->iwarp_base_vector);
11123 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11124 }
11125 }
11126
11127 i40e_dbg_pf_init(pf);
11128
11129
11130 i40e_send_version(pf);
11131
11132
11133 mod_timer(&pf->service_timer,
11134 round_jiffies(jiffies + pf->service_timer_period));
11135
11136
11137 err = i40e_lan_add_device(pf);
11138 if (err)
11139 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11140 err);
11141
11142#ifdef I40E_FCOE
11143
11144 i40e_fcoe_vsi_setup(pf);
11145
11146#endif
11147#define PCI_SPEED_SIZE 8
11148#define PCI_WIDTH_SIZE 8
11149
11150
11151
11152
11153 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11154 char speed[PCI_SPEED_SIZE] = "Unknown";
11155 char width[PCI_WIDTH_SIZE] = "Unknown";
11156
11157
11158
11159
11160 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11161 &link_status);
11162
11163 i40e_set_pci_config_data(hw, link_status);
11164
11165 switch (hw->bus.speed) {
11166 case i40e_bus_speed_8000:
11167 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11168 case i40e_bus_speed_5000:
11169 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11170 case i40e_bus_speed_2500:
11171 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11172 default:
11173 break;
11174 }
11175 switch (hw->bus.width) {
11176 case i40e_bus_width_pcie_x8:
11177 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11178 case i40e_bus_width_pcie_x4:
11179 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11180 case i40e_bus_width_pcie_x2:
11181 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11182 case i40e_bus_width_pcie_x1:
11183 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11184 default:
11185 break;
11186 }
11187
11188 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11189 speed, width);
11190
11191 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11192 hw->bus.speed < i40e_bus_speed_8000) {
11193 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11194 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11195 }
11196 }
11197
11198
11199 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11200 if (err)
11201 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11202 i40e_stat_str(&pf->hw, err),
11203 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11204 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11205
11206
11207 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11208 if (err)
11209 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11210 i40e_stat_str(&pf->hw, err),
11211 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11212 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11213
11214
11215
11216
11217
11218
11219
11220 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11221 pf->main_vsi_seid);
11222
11223 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11224 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11225 pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
11226
11227
11228 i40e_print_features(pf);
11229
11230 return 0;
11231
11232
11233err_vsis:
11234 set_bit(__I40E_DOWN, &pf->state);
11235 i40e_clear_interrupt_scheme(pf);
11236 kfree(pf->vsi);
11237err_switch_setup:
11238 i40e_reset_interrupt_capability(pf);
11239 del_timer_sync(&pf->service_timer);
11240err_mac_addr:
11241err_configure_lan_hmc:
11242 (void)i40e_shutdown_lan_hmc(hw);
11243err_init_lan_hmc:
11244 kfree(pf->qp_pile);
11245err_sw_init:
11246err_adminq_setup:
11247err_pf_reset:
11248 iounmap(hw->hw_addr);
11249err_ioremap:
11250 kfree(pf);
11251err_pf_alloc:
11252 pci_disable_pcie_error_reporting(pdev);
11253 pci_release_mem_regions(pdev);
11254err_pci_reg:
11255err_dma:
11256 pci_disable_device(pdev);
11257 return err;
11258}
11259
11260
11261
11262
11263
11264
11265
11266
11267
11268
11269static void i40e_remove(struct pci_dev *pdev)
11270{
11271 struct i40e_pf *pf = pci_get_drvdata(pdev);
11272 struct i40e_hw *hw = &pf->hw;
11273 i40e_status ret_code;
11274 int i;
11275
11276 i40e_dbg_pf_exit(pf);
11277
11278 i40e_ptp_stop(pf);
11279
11280
11281 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11282 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11283
11284
11285 set_bit(__I40E_SUSPENDED, &pf->state);
11286 set_bit(__I40E_DOWN, &pf->state);
11287 if (pf->service_timer.data)
11288 del_timer_sync(&pf->service_timer);
11289 if (pf->service_task.func)
11290 cancel_work_sync(&pf->service_task);
11291
11292 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11293 i40e_free_vfs(pf);
11294 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11295 }
11296
11297 i40e_fdir_teardown(pf);
11298
11299
11300
11301
11302 for (i = 0; i < I40E_MAX_VEB; i++) {
11303 if (!pf->veb[i])
11304 continue;
11305
11306 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11307 pf->veb[i]->uplink_seid == 0)
11308 i40e_switch_branch_release(pf->veb[i]);
11309 }
11310
11311
11312
11313
11314 if (pf->vsi[pf->lan_vsi])
11315 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11316
11317
11318 ret_code = i40e_lan_del_device(pf);
11319 if (ret_code) {
11320 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11321 ret_code);
11322 }
11323
11324
11325 if (hw->hmc.hmc_obj) {
11326 ret_code = i40e_shutdown_lan_hmc(hw);
11327 if (ret_code)
11328 dev_warn(&pdev->dev,
11329 "Failed to destroy the HMC resources: %d\n",
11330 ret_code);
11331 }
11332
11333
11334 i40e_shutdown_adminq(hw);
11335
11336
11337 mutex_destroy(&hw->aq.arq_mutex);
11338 mutex_destroy(&hw->aq.asq_mutex);
11339
11340
11341 i40e_clear_interrupt_scheme(pf);
11342 for (i = 0; i < pf->num_alloc_vsi; i++) {
11343 if (pf->vsi[i]) {
11344 i40e_vsi_clear_rings(pf->vsi[i]);
11345 i40e_vsi_clear(pf->vsi[i]);
11346 pf->vsi[i] = NULL;
11347 }
11348 }
11349
11350 for (i = 0; i < I40E_MAX_VEB; i++) {
11351 kfree(pf->veb[i]);
11352 pf->veb[i] = NULL;
11353 }
11354
11355 kfree(pf->qp_pile);
11356 kfree(pf->vsi);
11357
11358 iounmap(hw->hw_addr);
11359 kfree(pf);
11360 pci_release_mem_regions(pdev);
11361
11362 pci_disable_pcie_error_reporting(pdev);
11363 pci_disable_device(pdev);
11364}
11365
11366
11367
11368
11369
11370
11371
11372
11373
11374static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11375 enum pci_channel_state error)
11376{
11377 struct i40e_pf *pf = pci_get_drvdata(pdev);
11378
11379 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11380
11381 if (!pf) {
11382 dev_info(&pdev->dev,
11383 "Cannot recover - error happened during device probe\n");
11384 return PCI_ERS_RESULT_DISCONNECT;
11385 }
11386
11387
11388 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11389 rtnl_lock();
11390 i40e_prep_for_reset(pf);
11391 rtnl_unlock();
11392 }
11393
11394
11395 return PCI_ERS_RESULT_NEED_RESET;
11396}
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11408{
11409 struct i40e_pf *pf = pci_get_drvdata(pdev);
11410 pci_ers_result_t result;
11411 int err;
11412 u32 reg;
11413
11414 dev_dbg(&pdev->dev, "%s\n", __func__);
11415 if (pci_enable_device_mem(pdev)) {
11416 dev_info(&pdev->dev,
11417 "Cannot re-enable PCI device after reset.\n");
11418 result = PCI_ERS_RESULT_DISCONNECT;
11419 } else {
11420 pci_set_master(pdev);
11421 pci_restore_state(pdev);
11422 pci_save_state(pdev);
11423 pci_wake_from_d3(pdev, false);
11424
11425 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11426 if (reg == 0)
11427 result = PCI_ERS_RESULT_RECOVERED;
11428 else
11429 result = PCI_ERS_RESULT_DISCONNECT;
11430 }
11431
11432 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11433 if (err) {
11434 dev_info(&pdev->dev,
11435 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11436 err);
11437
11438 }
11439
11440 return result;
11441}
11442
11443
11444
11445
11446
11447
11448
11449
11450static void i40e_pci_error_resume(struct pci_dev *pdev)
11451{
11452 struct i40e_pf *pf = pci_get_drvdata(pdev);
11453
11454 dev_dbg(&pdev->dev, "%s\n", __func__);
11455 if (test_bit(__I40E_SUSPENDED, &pf->state))
11456 return;
11457
11458 rtnl_lock();
11459 i40e_handle_reset_warning(pf);
11460 rtnl_unlock();
11461}
11462
11463
11464
11465
11466
11467static void i40e_shutdown(struct pci_dev *pdev)
11468{
11469 struct i40e_pf *pf = pci_get_drvdata(pdev);
11470 struct i40e_hw *hw = &pf->hw;
11471
11472 set_bit(__I40E_SUSPENDED, &pf->state);
11473 set_bit(__I40E_DOWN, &pf->state);
11474 rtnl_lock();
11475 i40e_prep_for_reset(pf);
11476 rtnl_unlock();
11477
11478 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11479 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11480
11481 del_timer_sync(&pf->service_timer);
11482 cancel_work_sync(&pf->service_task);
11483 i40e_fdir_teardown(pf);
11484
11485 rtnl_lock();
11486 i40e_prep_for_reset(pf);
11487 rtnl_unlock();
11488
11489 wr32(hw, I40E_PFPM_APM,
11490 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11491 wr32(hw, I40E_PFPM_WUFC,
11492 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11493
11494 i40e_clear_interrupt_scheme(pf);
11495
11496 if (system_state == SYSTEM_POWER_OFF) {
11497 pci_wake_from_d3(pdev, pf->wol_en);
11498 pci_set_power_state(pdev, PCI_D3hot);
11499 }
11500}
11501
11502#ifdef CONFIG_PM
11503
11504
11505
11506
11507static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11508{
11509 struct i40e_pf *pf = pci_get_drvdata(pdev);
11510 struct i40e_hw *hw = &pf->hw;
11511 int retval = 0;
11512
11513 set_bit(__I40E_SUSPENDED, &pf->state);
11514 set_bit(__I40E_DOWN, &pf->state);
11515
11516 rtnl_lock();
11517 i40e_prep_for_reset(pf);
11518 rtnl_unlock();
11519
11520 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11521 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11522
11523 i40e_stop_misc_vector(pf);
11524
11525 retval = pci_save_state(pdev);
11526 if (retval)
11527 return retval;
11528
11529 pci_wake_from_d3(pdev, pf->wol_en);
11530 pci_set_power_state(pdev, PCI_D3hot);
11531
11532 return retval;
11533}
11534
11535
11536
11537
11538
11539static int i40e_resume(struct pci_dev *pdev)
11540{
11541 struct i40e_pf *pf = pci_get_drvdata(pdev);
11542 u32 err;
11543
11544 pci_set_power_state(pdev, PCI_D0);
11545 pci_restore_state(pdev);
11546
11547
11548
11549 pci_save_state(pdev);
11550
11551 err = pci_enable_device_mem(pdev);
11552 if (err) {
11553 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11554 return err;
11555 }
11556 pci_set_master(pdev);
11557
11558
11559 pci_wake_from_d3(pdev, false);
11560
11561
11562 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11563 clear_bit(__I40E_DOWN, &pf->state);
11564 rtnl_lock();
11565 i40e_reset_and_rebuild(pf, false);
11566 rtnl_unlock();
11567 }
11568
11569 return 0;
11570}
11571
11572#endif
11573static const struct pci_error_handlers i40e_err_handler = {
11574 .error_detected = i40e_pci_error_detected,
11575 .slot_reset = i40e_pci_error_slot_reset,
11576 .resume = i40e_pci_error_resume,
11577};
11578
11579static struct pci_driver i40e_driver = {
11580 .name = i40e_driver_name,
11581 .id_table = i40e_pci_tbl,
11582 .probe = i40e_probe,
11583 .remove = i40e_remove,
11584#ifdef CONFIG_PM
11585 .suspend = i40e_suspend,
11586 .resume = i40e_resume,
11587#endif
11588 .shutdown = i40e_shutdown,
11589 .err_handler = &i40e_err_handler,
11590 .sriov_configure = i40e_pci_sriov_configure,
11591};
11592
11593
11594
11595
11596
11597
11598
11599static int __init i40e_init_module(void)
11600{
11601 pr_info("%s: %s - version %s\n", i40e_driver_name,
11602 i40e_driver_string, i40e_driver_version_str);
11603 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11604
11605
11606
11607
11608
11609 i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
11610 i40e_driver_name);
11611 if (!i40e_wq) {
11612 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11613 return -ENOMEM;
11614 }
11615
11616 i40e_dbg_init();
11617 return pci_register_driver(&i40e_driver);
11618}
11619module_init(i40e_init_module);
11620
11621
11622
11623
11624
11625
11626
11627static void __exit i40e_exit_module(void)
11628{
11629 pci_unregister_driver(&i40e_driver);
11630 destroy_workqueue(i40e_wq);
11631 i40e_dbg_exit();
11632}
11633module_exit(i40e_exit_module);
11634