1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "i40e.h"
30
31const char i40e_driver_name[] = "i40e";
32static const char i40e_driver_string[] =
33 "Intel(R) Ethernet Connection XL710 Network Driver";
34
35#define DRV_KERN "-k"
36
37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 11
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN
43const char i40e_driver_version_str[] = DRV_VERSION;
44static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
45
46
47static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
48static void i40e_handle_reset_warning(struct i40e_pf *pf);
49static int i40e_add_vsi(struct i40e_vsi *vsi);
50static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
51static int i40e_setup_pf_switch(struct i40e_pf *pf);
52static int i40e_setup_misc_vector(struct i40e_pf *pf);
53static void i40e_determine_queue_usage(struct i40e_pf *pf);
54static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
55
56
57
58
59
60
61
62
63static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
64 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
65 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
66 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
67 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
68 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
69 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
70 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
71 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
72 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
73 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
74
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
78
79#define I40E_MAX_VF_COUNT 128
80static int debug = -1;
81module_param(debug, int, 0);
82MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
83
84MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
85MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(DRV_VERSION);
88
89
90
91
92
93
94
95
96int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
97 u64 size, u32 alignment)
98{
99 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
100
101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL);
104 if (!mem->va)
105 return -ENOMEM;
106
107 return 0;
108}
109
110
111
112
113
114
115int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
116{
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
118
119 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
120 mem->va = NULL;
121 mem->pa = 0;
122 mem->size = 0;
123
124 return 0;
125}
126
127
128
129
130
131
132
133int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
134 u32 size)
135{
136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL);
138
139 if (!mem->va)
140 return -ENOMEM;
141
142 return 0;
143}
144
145
146
147
148
149
150int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
151{
152
153 kfree(mem->va);
154 mem->va = NULL;
155 mem->size = 0;
156
157 return 0;
158}
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id)
175{
176 int ret = -ENOMEM;
177 int i, j;
178
179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
180 dev_info(&pf->pdev->dev,
181 "param err: pile=%p needed=%d id=0x%04x\n",
182 pile, needed, id);
183 return -EINVAL;
184 }
185
186
187 i = pile->search_hint;
188 while (i < pile->num_entries) {
189
190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
191 i++;
192 continue;
193 }
194
195
196 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
197 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
198 break;
199 }
200
201 if (j == needed) {
202
203 for (j = 0; j < needed; j++)
204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
205 ret = i;
206 pile->search_hint = i + j;
207 break;
208 } else {
209
210 i += j;
211 }
212 }
213
214 return ret;
215}
216
217
218
219
220
221
222
223
224
225static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
226{
227 int valid_id = (id | I40E_PILE_VALID_BIT);
228 int count = 0;
229 int i;
230
231 if (!pile || index >= pile->num_entries)
232 return -EINVAL;
233
234 for (i = index;
235 i < pile->num_entries && pile->list[i] == valid_id;
236 i++) {
237 pile->list[i] = 0;
238 count++;
239 }
240
241 if (count && index < pile->search_hint)
242 pile->search_hint = index;
243
244 return count;
245}
246
247
248
249
250
251
252
253static void i40e_service_event_schedule(struct i40e_pf *pf)
254{
255 if (!test_bit(__I40E_DOWN, &pf->state) &&
256 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
257 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
258 schedule_work(&pf->service_task);
259}
260
261
262
263
264
265
266
267
268
269static void i40e_tx_timeout(struct net_device *netdev)
270{
271 struct i40e_netdev_priv *np = netdev_priv(netdev);
272 struct i40e_vsi *vsi = np->vsi;
273 struct i40e_pf *pf = vsi->back;
274
275 pf->tx_timeout_count++;
276
277 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
278 pf->tx_timeout_recovery_level = 0;
279 pf->tx_timeout_last_recovery = jiffies;
280 netdev_info(netdev, "tx_timeout recovery level %d\n",
281 pf->tx_timeout_recovery_level);
282
283 switch (pf->tx_timeout_recovery_level) {
284 case 0:
285
286 if (in_interrupt()) {
287 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
288 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
289 } else {
290 i40e_vsi_reinit_locked(vsi);
291 }
292 break;
293 case 1:
294 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
295 break;
296 case 2:
297 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
298 break;
299 case 3:
300 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
301 break;
302 default:
303 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
304 i40e_down(vsi);
305 break;
306 }
307 i40e_service_event_schedule(pf);
308 pf->tx_timeout_recovery_level++;
309}
310
311
312
313
314
315
316static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
317{
318 rx_ring->next_to_use = val;
319
320
321
322
323
324
325 wmb();
326 writel(val, rx_ring->tail);
327}
328
329
330
331
332
333
334
335
336struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
337{
338 return &vsi->net_stats;
339}
340
341
342
343
344
345
346
347
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev,
350 struct rtnl_link_stats64 *stats)
351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi;
354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
355 int i;
356
357 if (!vsi->tx_rings)
358 return stats;
359
360 rcu_read_lock();
361 for (i = 0; i < vsi->num_queue_pairs; i++) {
362 struct i40e_ring *tx_ring, *rx_ring;
363 u64 bytes, packets;
364 unsigned int start;
365
366 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
367 if (!tx_ring)
368 continue;
369
370 do {
371 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
372 packets = tx_ring->stats.packets;
373 bytes = tx_ring->stats.bytes;
374 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
375
376 stats->tx_packets += packets;
377 stats->tx_bytes += bytes;
378 rx_ring = &tx_ring[1];
379
380 do {
381 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
382 packets = rx_ring->stats.packets;
383 bytes = rx_ring->stats.bytes;
384 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
385
386 stats->rx_packets += packets;
387 stats->rx_bytes += bytes;
388 }
389 rcu_read_unlock();
390
391
392 stats->multicast = vsi_stats->multicast;
393 stats->tx_errors = vsi_stats->tx_errors;
394 stats->tx_dropped = vsi_stats->tx_dropped;
395 stats->rx_errors = vsi_stats->rx_errors;
396 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
397 stats->rx_length_errors = vsi_stats->rx_length_errors;
398
399 return stats;
400}
401
402
403
404
405
406void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
407{
408 struct rtnl_link_stats64 *ns;
409 int i;
410
411 if (!vsi)
412 return;
413
414 ns = i40e_get_vsi_stats_struct(vsi);
415 memset(ns, 0, sizeof(*ns));
416 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
417 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
418 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
419 if (vsi->rx_rings)
420 for (i = 0; i < vsi->num_queue_pairs; i++) {
421 memset(&vsi->rx_rings[i]->stats, 0 ,
422 sizeof(vsi->rx_rings[i]->stats));
423 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
424 sizeof(vsi->rx_rings[i]->rx_stats));
425 memset(&vsi->tx_rings[i]->stats, 0 ,
426 sizeof(vsi->tx_rings[i]->stats));
427 memset(&vsi->tx_rings[i]->tx_stats, 0,
428 sizeof(vsi->tx_rings[i]->tx_stats));
429 }
430 vsi->stat_offsets_loaded = false;
431}
432
433
434
435
436
437void i40e_pf_reset_stats(struct i40e_pf *pf)
438{
439 memset(&pf->stats, 0, sizeof(pf->stats));
440 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
441 pf->stat_offsets_loaded = false;
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
460 bool offset_loaded, u64 *offset, u64 *stat)
461{
462 u64 new_data;
463
464 if (hw->device_id == I40E_QEMU_DEVICE_ID) {
465 new_data = rd32(hw, loreg);
466 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
467 } else {
468 new_data = rd64(hw, loreg);
469 }
470 if (!offset_loaded)
471 *offset = new_data;
472 if (likely(new_data >= *offset))
473 *stat = new_data - *offset;
474 else
475 *stat = (new_data + ((u64)1 << 48)) - *offset;
476 *stat &= 0xFFFFFFFFFFFFULL;
477}
478
479
480
481
482
483
484
485
486
487static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
488 bool offset_loaded, u64 *offset, u64 *stat)
489{
490 u32 new_data;
491
492 new_data = rd32(hw, reg);
493 if (!offset_loaded)
494 *offset = new_data;
495 if (likely(new_data >= *offset))
496 *stat = (u32)(new_data - *offset);
497 else
498 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
499}
500
501
502
503
504
505void i40e_update_eth_stats(struct i40e_vsi *vsi)
506{
507 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
508 struct i40e_pf *pf = vsi->back;
509 struct i40e_hw *hw = &pf->hw;
510 struct i40e_eth_stats *oes;
511 struct i40e_eth_stats *es;
512
513 es = &vsi->eth_stats;
514 oes = &vsi->eth_stats_offsets;
515
516
517 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
518 vsi->stat_offsets_loaded,
519 &oes->tx_errors, &es->tx_errors);
520 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
521 vsi->stat_offsets_loaded,
522 &oes->rx_discards, &es->rx_discards);
523
524 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
525 I40E_GLV_GORCL(stat_idx),
526 vsi->stat_offsets_loaded,
527 &oes->rx_bytes, &es->rx_bytes);
528 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
529 I40E_GLV_UPRCL(stat_idx),
530 vsi->stat_offsets_loaded,
531 &oes->rx_unicast, &es->rx_unicast);
532 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
533 I40E_GLV_MPRCL(stat_idx),
534 vsi->stat_offsets_loaded,
535 &oes->rx_multicast, &es->rx_multicast);
536 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
537 I40E_GLV_BPRCL(stat_idx),
538 vsi->stat_offsets_loaded,
539 &oes->rx_broadcast, &es->rx_broadcast);
540
541 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
542 I40E_GLV_GOTCL(stat_idx),
543 vsi->stat_offsets_loaded,
544 &oes->tx_bytes, &es->tx_bytes);
545 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
546 I40E_GLV_UPTCL(stat_idx),
547 vsi->stat_offsets_loaded,
548 &oes->tx_unicast, &es->tx_unicast);
549 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
550 I40E_GLV_MPTCL(stat_idx),
551 vsi->stat_offsets_loaded,
552 &oes->tx_multicast, &es->tx_multicast);
553 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
554 I40E_GLV_BPTCL(stat_idx),
555 vsi->stat_offsets_loaded,
556 &oes->tx_broadcast, &es->tx_broadcast);
557 vsi->stat_offsets_loaded = true;
558}
559
560
561
562
563
564static void i40e_update_veb_stats(struct i40e_veb *veb)
565{
566 struct i40e_pf *pf = veb->pf;
567 struct i40e_hw *hw = &pf->hw;
568 struct i40e_eth_stats *oes;
569 struct i40e_eth_stats *es;
570 int idx = 0;
571
572 idx = veb->stats_idx;
573 es = &veb->stats;
574 oes = &veb->stats_offsets;
575
576
577 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
578 veb->stat_offsets_loaded,
579 &oes->tx_discards, &es->tx_discards);
580 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
581 veb->stat_offsets_loaded,
582 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
583
584 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
585 veb->stat_offsets_loaded,
586 &oes->rx_bytes, &es->rx_bytes);
587 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
588 veb->stat_offsets_loaded,
589 &oes->rx_unicast, &es->rx_unicast);
590 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
591 veb->stat_offsets_loaded,
592 &oes->rx_multicast, &es->rx_multicast);
593 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
594 veb->stat_offsets_loaded,
595 &oes->rx_broadcast, &es->rx_broadcast);
596
597 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
598 veb->stat_offsets_loaded,
599 &oes->tx_bytes, &es->tx_bytes);
600 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
601 veb->stat_offsets_loaded,
602 &oes->tx_unicast, &es->tx_unicast);
603 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
604 veb->stat_offsets_loaded,
605 &oes->tx_multicast, &es->tx_multicast);
606 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
607 veb->stat_offsets_loaded,
608 &oes->tx_broadcast, &es->tx_broadcast);
609 veb->stat_offsets_loaded = true;
610}
611
612
613
614
615
616
617
618static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
619{
620 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
621 struct i40e_hw_port_stats *nsd = &pf->stats;
622 struct i40e_hw *hw = &pf->hw;
623 u64 xoff = 0;
624 u16 i, v;
625
626 if ((hw->fc.current_mode != I40E_FC_FULL) &&
627 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
628 return;
629
630 xoff = nsd->link_xoff_rx;
631 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
632 pf->stat_offsets_loaded,
633 &osd->link_xoff_rx, &nsd->link_xoff_rx);
634
635
636 if (!(nsd->link_xoff_rx - xoff))
637 return;
638
639
640 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
641 struct i40e_vsi *vsi = pf->vsi[v];
642
643 if (!vsi)
644 continue;
645
646 for (i = 0; i < vsi->num_queue_pairs; i++) {
647 struct i40e_ring *ring = vsi->tx_rings[i];
648 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
649 }
650 }
651}
652
653
654
655
656
657
658
659static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
660{
661 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
662 struct i40e_hw_port_stats *nsd = &pf->stats;
663 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
664 struct i40e_dcbx_config *dcb_cfg;
665 struct i40e_hw *hw = &pf->hw;
666 u16 i, v;
667 u8 tc;
668
669 dcb_cfg = &hw->local_dcbx_config;
670
671
672 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
673 !(dcb_cfg->pfc.pfcenable)) {
674 i40e_update_link_xoff_rx(pf);
675 return;
676 }
677
678 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
679 u64 prio_xoff = nsd->priority_xoff_rx[i];
680 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
681 pf->stat_offsets_loaded,
682 &osd->priority_xoff_rx[i],
683 &nsd->priority_xoff_rx[i]);
684
685
686 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
687 continue;
688
689 tc = dcb_cfg->etscfg.prioritytable[i];
690 xoff[tc] = true;
691 }
692
693
694 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
695 struct i40e_vsi *vsi = pf->vsi[v];
696
697 if (!vsi)
698 continue;
699
700 for (i = 0; i < vsi->num_queue_pairs; i++) {
701 struct i40e_ring *ring = vsi->tx_rings[i];
702
703 tc = ring->dcb_tc;
704 if (xoff[tc])
705 clear_bit(__I40E_HANG_CHECK_ARMED,
706 &ring->state);
707 }
708 }
709}
710
711
712
713
714
715
716
717
718
719
720
721void i40e_update_stats(struct i40e_vsi *vsi)
722{
723 struct i40e_pf *pf = vsi->back;
724 struct i40e_hw *hw = &pf->hw;
725 struct rtnl_link_stats64 *ons;
726 struct rtnl_link_stats64 *ns;
727 struct i40e_eth_stats *oes;
728 struct i40e_eth_stats *es;
729 u32 tx_restart, tx_busy;
730 u32 rx_page, rx_buf;
731 u64 rx_p, rx_b;
732 u64 tx_p, tx_b;
733 int i;
734 u16 q;
735
736 if (test_bit(__I40E_DOWN, &vsi->state) ||
737 test_bit(__I40E_CONFIG_BUSY, &pf->state))
738 return;
739
740 ns = i40e_get_vsi_stats_struct(vsi);
741 ons = &vsi->net_stats_offsets;
742 es = &vsi->eth_stats;
743 oes = &vsi->eth_stats_offsets;
744
745
746
747
748 rx_b = rx_p = 0;
749 tx_b = tx_p = 0;
750 tx_restart = tx_busy = 0;
751 rx_page = 0;
752 rx_buf = 0;
753 rcu_read_lock();
754 for (q = 0; q < vsi->num_queue_pairs; q++) {
755 struct i40e_ring *p;
756 u64 bytes, packets;
757 unsigned int start;
758
759
760 p = ACCESS_ONCE(vsi->tx_rings[q]);
761
762 do {
763 start = u64_stats_fetch_begin_bh(&p->syncp);
764 packets = p->stats.packets;
765 bytes = p->stats.bytes;
766 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
767 tx_b += bytes;
768 tx_p += packets;
769 tx_restart += p->tx_stats.restart_queue;
770 tx_busy += p->tx_stats.tx_busy;
771
772
773 p = &p[1];
774 do {
775 start = u64_stats_fetch_begin_bh(&p->syncp);
776 packets = p->stats.packets;
777 bytes = p->stats.bytes;
778 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
779 rx_b += bytes;
780 rx_p += packets;
781 rx_buf += p->rx_stats.alloc_rx_buff_failed;
782 rx_page += p->rx_stats.alloc_rx_page_failed;
783 }
784 rcu_read_unlock();
785 vsi->tx_restart = tx_restart;
786 vsi->tx_busy = tx_busy;
787 vsi->rx_page_failed = rx_page;
788 vsi->rx_buf_failed = rx_buf;
789
790 ns->rx_packets = rx_p;
791 ns->rx_bytes = rx_b;
792 ns->tx_packets = tx_p;
793 ns->tx_bytes = tx_b;
794
795 i40e_update_eth_stats(vsi);
796
797 ons->rx_errors = oes->rx_errors;
798 ns->rx_errors = es->rx_errors;
799 ons->tx_errors = oes->tx_errors;
800 ns->tx_errors = es->tx_errors;
801 ons->multicast = oes->rx_multicast;
802 ns->multicast = es->rx_multicast;
803 ons->tx_dropped = oes->tx_discards;
804 ns->tx_dropped = es->tx_discards;
805
806
807 if (vsi == pf->vsi[pf->lan_vsi]) {
808 struct i40e_hw_port_stats *nsd = &pf->stats;
809 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
810
811 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
812 I40E_GLPRT_GORCL(hw->port),
813 pf->stat_offsets_loaded,
814 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
815 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
816 I40E_GLPRT_GOTCL(hw->port),
817 pf->stat_offsets_loaded,
818 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
819 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
820 pf->stat_offsets_loaded,
821 &osd->eth.rx_discards,
822 &nsd->eth.rx_discards);
823 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
824 pf->stat_offsets_loaded,
825 &osd->eth.tx_discards,
826 &nsd->eth.tx_discards);
827 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
828 I40E_GLPRT_MPRCL(hw->port),
829 pf->stat_offsets_loaded,
830 &osd->eth.rx_multicast,
831 &nsd->eth.rx_multicast);
832
833 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
834 pf->stat_offsets_loaded,
835 &osd->tx_dropped_link_down,
836 &nsd->tx_dropped_link_down);
837
838 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
839 pf->stat_offsets_loaded,
840 &osd->crc_errors, &nsd->crc_errors);
841 ns->rx_crc_errors = nsd->crc_errors;
842
843 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
844 pf->stat_offsets_loaded,
845 &osd->illegal_bytes, &nsd->illegal_bytes);
846 ns->rx_errors = nsd->crc_errors
847 + nsd->illegal_bytes;
848
849 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
850 pf->stat_offsets_loaded,
851 &osd->mac_local_faults,
852 &nsd->mac_local_faults);
853 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
854 pf->stat_offsets_loaded,
855 &osd->mac_remote_faults,
856 &nsd->mac_remote_faults);
857
858 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
859 pf->stat_offsets_loaded,
860 &osd->rx_length_errors,
861 &nsd->rx_length_errors);
862 ns->rx_length_errors = nsd->rx_length_errors;
863
864 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
865 pf->stat_offsets_loaded,
866 &osd->link_xon_rx, &nsd->link_xon_rx);
867 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
868 pf->stat_offsets_loaded,
869 &osd->link_xon_tx, &nsd->link_xon_tx);
870 i40e_update_prio_xoff_rx(pf);
871 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
872 pf->stat_offsets_loaded,
873 &osd->link_xoff_tx, &nsd->link_xoff_tx);
874
875 for (i = 0; i < 8; i++) {
876 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
877 pf->stat_offsets_loaded,
878 &osd->priority_xon_rx[i],
879 &nsd->priority_xon_rx[i]);
880 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
881 pf->stat_offsets_loaded,
882 &osd->priority_xon_tx[i],
883 &nsd->priority_xon_tx[i]);
884 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
885 pf->stat_offsets_loaded,
886 &osd->priority_xoff_tx[i],
887 &nsd->priority_xoff_tx[i]);
888 i40e_stat_update32(hw,
889 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
890 pf->stat_offsets_loaded,
891 &osd->priority_xon_2_xoff[i],
892 &nsd->priority_xon_2_xoff[i]);
893 }
894
895 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
896 I40E_GLPRT_PRC64L(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->rx_size_64, &nsd->rx_size_64);
899 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
900 I40E_GLPRT_PRC127L(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->rx_size_127, &nsd->rx_size_127);
903 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
904 I40E_GLPRT_PRC255L(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->rx_size_255, &nsd->rx_size_255);
907 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
908 I40E_GLPRT_PRC511L(hw->port),
909 pf->stat_offsets_loaded,
910 &osd->rx_size_511, &nsd->rx_size_511);
911 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
912 I40E_GLPRT_PRC1023L(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->rx_size_1023, &nsd->rx_size_1023);
915 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
916 I40E_GLPRT_PRC1522L(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->rx_size_1522, &nsd->rx_size_1522);
919 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
920 I40E_GLPRT_PRC9522L(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->rx_size_big, &nsd->rx_size_big);
923
924 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
925 I40E_GLPRT_PTC64L(hw->port),
926 pf->stat_offsets_loaded,
927 &osd->tx_size_64, &nsd->tx_size_64);
928 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
929 I40E_GLPRT_PTC127L(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->tx_size_127, &nsd->tx_size_127);
932 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
933 I40E_GLPRT_PTC255L(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->tx_size_255, &nsd->tx_size_255);
936 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
937 I40E_GLPRT_PTC511L(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->tx_size_511, &nsd->tx_size_511);
940 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
941 I40E_GLPRT_PTC1023L(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->tx_size_1023, &nsd->tx_size_1023);
944 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
945 I40E_GLPRT_PTC1522L(hw->port),
946 pf->stat_offsets_loaded,
947 &osd->tx_size_1522, &nsd->tx_size_1522);
948 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
949 I40E_GLPRT_PTC9522L(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->tx_size_big, &nsd->tx_size_big);
952
953 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->rx_undersize, &nsd->rx_undersize);
956 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
957 pf->stat_offsets_loaded,
958 &osd->rx_fragments, &nsd->rx_fragments);
959 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
960 pf->stat_offsets_loaded,
961 &osd->rx_oversize, &nsd->rx_oversize);
962 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
963 pf->stat_offsets_loaded,
964 &osd->rx_jabber, &nsd->rx_jabber);
965 }
966
967 pf->stat_offsets_loaded = true;
968}
969
970
971
972
973
974
975
976
977
978
979
980static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
981 u8 *macaddr, s16 vlan,
982 bool is_vf, bool is_netdev)
983{
984 struct i40e_mac_filter *f;
985
986 if (!vsi || !macaddr)
987 return NULL;
988
989 list_for_each_entry(f, &vsi->mac_filter_list, list) {
990 if ((ether_addr_equal(macaddr, f->macaddr)) &&
991 (vlan == f->vlan) &&
992 (!is_vf || f->is_vf) &&
993 (!is_netdev || f->is_netdev))
994 return f;
995 }
996 return NULL;
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1010 bool is_vf, bool is_netdev)
1011{
1012 struct i40e_mac_filter *f;
1013
1014 if (!vsi || !macaddr)
1015 return NULL;
1016
1017 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1018 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1019 (!is_vf || f->is_vf) &&
1020 (!is_netdev || f->is_netdev))
1021 return f;
1022 }
1023 return NULL;
1024}
1025
1026
1027
1028
1029
1030
1031
1032bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1033{
1034 struct i40e_mac_filter *f;
1035
1036
1037
1038
1039 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1040 if (f->vlan >= 0)
1041 return true;
1042 }
1043
1044 return false;
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1060 bool is_vf, bool is_netdev)
1061{
1062 struct i40e_mac_filter *f;
1063
1064 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1065 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1066 is_vf, is_netdev)) {
1067 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1068 is_vf, is_netdev))
1069 return NULL;
1070 }
1071 }
1072
1073 return list_first_entry_or_null(&vsi->mac_filter_list,
1074 struct i40e_mac_filter, list);
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1088 u8 *macaddr, s16 vlan,
1089 bool is_vf, bool is_netdev)
1090{
1091 struct i40e_mac_filter *f;
1092
1093 if (!vsi || !macaddr)
1094 return NULL;
1095
1096 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1097 if (!f) {
1098 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1099 if (!f)
1100 goto add_filter_out;
1101
1102 memcpy(f->macaddr, macaddr, ETH_ALEN);
1103 f->vlan = vlan;
1104 f->changed = true;
1105
1106 INIT_LIST_HEAD(&f->list);
1107 list_add(&f->list, &vsi->mac_filter_list);
1108 }
1109
1110
1111 if (is_vf) {
1112 if (!f->is_vf) {
1113 f->is_vf = true;
1114 f->counter++;
1115 }
1116 } else if (is_netdev) {
1117 if (!f->is_netdev) {
1118 f->is_netdev = true;
1119 f->counter++;
1120 }
1121 } else {
1122 f->counter++;
1123 }
1124
1125
1126
1127
1128 if (f->changed) {
1129 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1130 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1131 }
1132
1133add_filter_out:
1134 return f;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145void i40e_del_filter(struct i40e_vsi *vsi,
1146 u8 *macaddr, s16 vlan,
1147 bool is_vf, bool is_netdev)
1148{
1149 struct i40e_mac_filter *f;
1150
1151 if (!vsi || !macaddr)
1152 return;
1153
1154 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1155 if (!f || f->counter == 0)
1156 return;
1157
1158 if (is_vf) {
1159 if (f->is_vf) {
1160 f->is_vf = false;
1161 f->counter--;
1162 }
1163 } else if (is_netdev) {
1164 if (f->is_netdev) {
1165 f->is_netdev = false;
1166 f->counter--;
1167 }
1168 } else {
1169
1170 int min_f = 0;
1171 min_f += (f->is_vf ? 1 : 0);
1172 min_f += (f->is_netdev ? 1 : 0);
1173
1174 if (f->counter > min_f)
1175 f->counter--;
1176 }
1177
1178
1179
1180
1181 if (f->counter == 0) {
1182 f->changed = true;
1183 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1184 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1185 }
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195static int i40e_set_mac(struct net_device *netdev, void *p)
1196{
1197 struct i40e_netdev_priv *np = netdev_priv(netdev);
1198 struct i40e_vsi *vsi = np->vsi;
1199 struct sockaddr *addr = p;
1200 struct i40e_mac_filter *f;
1201
1202 if (!is_valid_ether_addr(addr->sa_data))
1203 return -EADDRNOTAVAIL;
1204
1205 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1206
1207 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1208 return 0;
1209
1210 if (vsi->type == I40E_VSI_MAIN) {
1211 i40e_status ret;
1212 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1213 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1214 addr->sa_data, NULL);
1215 if (ret) {
1216 netdev_info(netdev,
1217 "Addr change for Main VSI failed: %d\n",
1218 ret);
1219 return -EADDRNOTAVAIL;
1220 }
1221
1222 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1223 }
1224
1225
1226
1227
1228 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1229 if (!f)
1230 return -ENOMEM;
1231
1232 i40e_sync_vsi_filters(vsi);
1233 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1234 i40e_sync_vsi_filters(vsi);
1235
1236 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1237
1238 return 0;
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1251 struct i40e_vsi_context *ctxt,
1252 u8 enabled_tc,
1253 bool is_add)
1254{
1255 struct i40e_pf *pf = vsi->back;
1256 u16 sections = 0;
1257 u8 netdev_tc = 0;
1258 u16 numtc = 0;
1259 u16 qcount;
1260 u8 offset;
1261 u16 qmap;
1262 int i;
1263
1264 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1265 offset = 0;
1266
1267 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1268
1269 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1270 if (enabled_tc & (1 << i))
1271 numtc++;
1272 }
1273 if (!numtc) {
1274 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1275 numtc = 1;
1276 }
1277 } else {
1278
1279 numtc = 1;
1280 }
1281
1282 vsi->tc_config.numtc = numtc;
1283 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1284
1285
1286 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1287
1288 if (vsi->tc_config.enabled_tc & (1 << i)) {
1289 int pow, num_qps;
1290
1291 vsi->tc_config.tc_info[i].qoffset = offset;
1292 switch (vsi->type) {
1293 case I40E_VSI_MAIN:
1294 if (i == 0)
1295 qcount = pf->rss_size;
1296 else
1297 qcount = pf->num_tc_qps;
1298 vsi->tc_config.tc_info[i].qcount = qcount;
1299 break;
1300 case I40E_VSI_FDIR:
1301 case I40E_VSI_SRIOV:
1302 case I40E_VSI_VMDQ2:
1303 default:
1304 qcount = vsi->alloc_queue_pairs;
1305 vsi->tc_config.tc_info[i].qcount = qcount;
1306 WARN_ON(i != 0);
1307 break;
1308 }
1309
1310
1311 num_qps = vsi->tc_config.tc_info[i].qcount;
1312 pow = 0;
1313 while (num_qps &&
1314 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
1315 pow++;
1316 num_qps >>= 1;
1317 }
1318
1319 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1320 qmap =
1321 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1322 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1323
1324 offset += vsi->tc_config.tc_info[i].qcount;
1325 } else {
1326
1327
1328
1329
1330 vsi->tc_config.tc_info[i].qoffset = 0;
1331 vsi->tc_config.tc_info[i].qcount = 1;
1332 vsi->tc_config.tc_info[i].netdev_tc = 0;
1333
1334 qmap = 0;
1335 }
1336 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1337 }
1338
1339
1340 vsi->num_queue_pairs = offset;
1341
1342
1343 if (is_add) {
1344 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1345
1346 ctxt->info.up_enable_bits = enabled_tc;
1347 }
1348 if (vsi->type == I40E_VSI_SRIOV) {
1349 ctxt->info.mapping_flags |=
1350 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1351 for (i = 0; i < vsi->num_queue_pairs; i++)
1352 ctxt->info.queue_mapping[i] =
1353 cpu_to_le16(vsi->base_queue + i);
1354 } else {
1355 ctxt->info.mapping_flags |=
1356 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1357 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1358 }
1359 ctxt->info.valid_sections |= cpu_to_le16(sections);
1360}
1361
1362
1363
1364
1365
1366static void i40e_set_rx_mode(struct net_device *netdev)
1367{
1368 struct i40e_netdev_priv *np = netdev_priv(netdev);
1369 struct i40e_mac_filter *f, *ftmp;
1370 struct i40e_vsi *vsi = np->vsi;
1371 struct netdev_hw_addr *uca;
1372 struct netdev_hw_addr *mca;
1373 struct netdev_hw_addr *ha;
1374
1375
1376 netdev_for_each_uc_addr(uca, netdev) {
1377 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1378 if (i40e_is_vsi_in_vlan(vsi))
1379 i40e_put_mac_in_vlan(vsi, uca->addr,
1380 false, true);
1381 else
1382 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1383 false, true);
1384 }
1385 }
1386
1387 netdev_for_each_mc_addr(mca, netdev) {
1388 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1389 if (i40e_is_vsi_in_vlan(vsi))
1390 i40e_put_mac_in_vlan(vsi, mca->addr,
1391 false, true);
1392 else
1393 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1394 false, true);
1395 }
1396 }
1397
1398
1399 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1400 bool found = false;
1401
1402 if (!f->is_netdev)
1403 continue;
1404
1405 if (is_multicast_ether_addr(f->macaddr)) {
1406 netdev_for_each_mc_addr(mca, netdev) {
1407 if (ether_addr_equal(mca->addr, f->macaddr)) {
1408 found = true;
1409 break;
1410 }
1411 }
1412 } else {
1413 netdev_for_each_uc_addr(uca, netdev) {
1414 if (ether_addr_equal(uca->addr, f->macaddr)) {
1415 found = true;
1416 break;
1417 }
1418 }
1419
1420 for_each_dev_addr(netdev, ha) {
1421 if (ether_addr_equal(ha->addr, f->macaddr)) {
1422 found = true;
1423 break;
1424 }
1425 }
1426 }
1427 if (!found)
1428 i40e_del_filter(
1429 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1430 }
1431
1432
1433 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1434 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1435 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1436 }
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1448{
1449 struct i40e_mac_filter *f, *ftmp;
1450 bool promisc_forced_on = false;
1451 bool add_happened = false;
1452 int filter_list_len = 0;
1453 u32 changed_flags = 0;
1454 i40e_status aq_ret = 0;
1455 struct i40e_pf *pf;
1456 int num_add = 0;
1457 int num_del = 0;
1458 u16 cmd_flags;
1459
1460
1461 struct i40e_aqc_add_macvlan_element_data *add_list;
1462 struct i40e_aqc_remove_macvlan_element_data *del_list;
1463
1464 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1465 usleep_range(1000, 2000);
1466 pf = vsi->back;
1467
1468 if (vsi->netdev) {
1469 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1470 vsi->current_netdev_flags = vsi->netdev->flags;
1471 }
1472
1473 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1474 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1475
1476 filter_list_len = pf->hw.aq.asq_buf_size /
1477 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1478 del_list = kcalloc(filter_list_len,
1479 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1480 GFP_KERNEL);
1481 if (!del_list)
1482 return -ENOMEM;
1483
1484 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1485 if (!f->changed)
1486 continue;
1487
1488 if (f->counter != 0)
1489 continue;
1490 f->changed = false;
1491 cmd_flags = 0;
1492
1493
1494 memcpy(del_list[num_del].mac_addr,
1495 f->macaddr, ETH_ALEN);
1496 del_list[num_del].vlan_tag =
1497 cpu_to_le16((u16)(f->vlan ==
1498 I40E_VLAN_ANY ? 0 : f->vlan));
1499
1500
1501 if (f->vlan == I40E_VLAN_ANY ||
1502 (vsi->netdev && !(vsi->netdev->features &
1503 NETIF_F_HW_VLAN_CTAG_FILTER)))
1504 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1505 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1506 del_list[num_del].flags = cmd_flags;
1507 num_del++;
1508
1509
1510 list_del(&f->list);
1511 kfree(f);
1512
1513
1514 if (num_del == filter_list_len) {
1515 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1516 vsi->seid, del_list, num_del,
1517 NULL);
1518 num_del = 0;
1519 memset(del_list, 0, sizeof(*del_list));
1520
1521 if (aq_ret)
1522 dev_info(&pf->pdev->dev,
1523 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1524 aq_ret,
1525 pf->hw.aq.asq_last_status);
1526 }
1527 }
1528 if (num_del) {
1529 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1530 del_list, num_del, NULL);
1531 num_del = 0;
1532
1533 if (aq_ret)
1534 dev_info(&pf->pdev->dev,
1535 "ignoring delete macvlan error, err %d, aq_err %d\n",
1536 aq_ret, pf->hw.aq.asq_last_status);
1537 }
1538
1539 kfree(del_list);
1540 del_list = NULL;
1541
1542
1543 filter_list_len = pf->hw.aq.asq_buf_size /
1544 sizeof(struct i40e_aqc_add_macvlan_element_data),
1545 add_list = kcalloc(filter_list_len,
1546 sizeof(struct i40e_aqc_add_macvlan_element_data),
1547 GFP_KERNEL);
1548 if (!add_list)
1549 return -ENOMEM;
1550
1551 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1552 if (!f->changed)
1553 continue;
1554
1555 if (f->counter == 0)
1556 continue;
1557 f->changed = false;
1558 add_happened = true;
1559 cmd_flags = 0;
1560
1561
1562 memcpy(add_list[num_add].mac_addr,
1563 f->macaddr, ETH_ALEN);
1564 add_list[num_add].vlan_tag =
1565 cpu_to_le16(
1566 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1567 add_list[num_add].queue_number = 0;
1568
1569 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1570
1571
1572 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
1573 !(vsi->netdev->features &
1574 NETIF_F_HW_VLAN_CTAG_FILTER)))
1575 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1576 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1577 num_add++;
1578
1579
1580 if (num_add == filter_list_len) {
1581 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1582 add_list, num_add,
1583 NULL);
1584 num_add = 0;
1585
1586 if (aq_ret)
1587 break;
1588 memset(add_list, 0, sizeof(*add_list));
1589 }
1590 }
1591 if (num_add) {
1592 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1593 add_list, num_add, NULL);
1594 num_add = 0;
1595 }
1596 kfree(add_list);
1597 add_list = NULL;
1598
1599 if (add_happened && (!aq_ret)) {
1600 ;
1601 } else if (add_happened && (aq_ret)) {
1602 dev_info(&pf->pdev->dev,
1603 "add filter failed, err %d, aq_err %d\n",
1604 aq_ret, pf->hw.aq.asq_last_status);
1605 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1606 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1607 &vsi->state)) {
1608 promisc_forced_on = true;
1609 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1610 &vsi->state);
1611 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1612 }
1613 }
1614 }
1615
1616
1617 if (changed_flags & IFF_ALLMULTI) {
1618 bool cur_multipromisc;
1619 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1620 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1621 vsi->seid,
1622 cur_multipromisc,
1623 NULL);
1624 if (aq_ret)
1625 dev_info(&pf->pdev->dev,
1626 "set multi promisc failed, err %d, aq_err %d\n",
1627 aq_ret, pf->hw.aq.asq_last_status);
1628 }
1629 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1630 bool cur_promisc;
1631 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1632 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1633 &vsi->state));
1634 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1635 vsi->seid,
1636 cur_promisc, NULL);
1637 if (aq_ret)
1638 dev_info(&pf->pdev->dev,
1639 "set uni promisc failed, err %d, aq_err %d\n",
1640 aq_ret, pf->hw.aq.asq_last_status);
1641 }
1642
1643 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1644 return 0;
1645}
1646
1647
1648
1649
1650
1651static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1652{
1653 int v;
1654
1655 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1656 return;
1657 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1658
1659 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1660 if (pf->vsi[v] &&
1661 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1662 i40e_sync_vsi_filters(pf->vsi[v]);
1663 }
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1674{
1675 struct i40e_netdev_priv *np = netdev_priv(netdev);
1676 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1677 struct i40e_vsi *vsi = np->vsi;
1678
1679
1680 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1681 return -EINVAL;
1682
1683 netdev_info(netdev, "changing MTU from %d to %d\n",
1684 netdev->mtu, new_mtu);
1685 netdev->mtu = new_mtu;
1686 if (netif_running(netdev))
1687 i40e_vsi_reinit_locked(vsi);
1688
1689 return 0;
1690}
1691
1692
1693
1694
1695
1696void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1697{
1698 struct i40e_vsi_context ctxt;
1699 i40e_status ret;
1700
1701 if ((vsi->info.valid_sections &
1702 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1703 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1704 return;
1705
1706 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1707 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1708 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1709
1710 ctxt.seid = vsi->seid;
1711 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1712 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1713 if (ret) {
1714 dev_info(&vsi->back->pdev->dev,
1715 "%s: update vsi failed, aq_err=%d\n",
1716 __func__, vsi->back->hw.aq.asq_last_status);
1717 }
1718}
1719
1720
1721
1722
1723
1724void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1725{
1726 struct i40e_vsi_context ctxt;
1727 i40e_status ret;
1728
1729 if ((vsi->info.valid_sections &
1730 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1731 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1732 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1733 return;
1734
1735 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1736 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1737 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1738
1739 ctxt.seid = vsi->seid;
1740 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1741 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1742 if (ret) {
1743 dev_info(&vsi->back->pdev->dev,
1744 "%s: update vsi failed, aq_err=%d\n",
1745 __func__, vsi->back->hw.aq.asq_last_status);
1746 }
1747}
1748
1749
1750
1751
1752
1753
1754static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1755{
1756 struct i40e_netdev_priv *np = netdev_priv(netdev);
1757 struct i40e_vsi *vsi = np->vsi;
1758
1759 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1760 i40e_vlan_stripping_enable(vsi);
1761 else
1762 i40e_vlan_stripping_disable(vsi);
1763}
1764
1765
1766
1767
1768
1769
1770int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1771{
1772 struct i40e_mac_filter *f, *add_f;
1773 bool is_netdev, is_vf;
1774 int ret;
1775
1776 is_vf = (vsi->type == I40E_VSI_SRIOV);
1777 is_netdev = !!(vsi->netdev);
1778
1779 if (is_netdev) {
1780 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1781 is_vf, is_netdev);
1782 if (!add_f) {
1783 dev_info(&vsi->back->pdev->dev,
1784 "Could not add vlan filter %d for %pM\n",
1785 vid, vsi->netdev->dev_addr);
1786 return -ENOMEM;
1787 }
1788 }
1789
1790 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1791 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1792 if (!add_f) {
1793 dev_info(&vsi->back->pdev->dev,
1794 "Could not add vlan filter %d for %pM\n",
1795 vid, f->macaddr);
1796 return -ENOMEM;
1797 }
1798 }
1799
1800 ret = i40e_sync_vsi_filters(vsi);
1801 if (ret) {
1802 dev_info(&vsi->back->pdev->dev,
1803 "Could not sync filters for vid %d\n", vid);
1804 return ret;
1805 }
1806
1807
1808
1809
1810
1811
1812 if (vid > 0) {
1813 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1814 I40E_VLAN_ANY,
1815 is_vf, is_netdev)) {
1816 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1817 I40E_VLAN_ANY, is_vf, is_netdev);
1818 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1819 is_vf, is_netdev);
1820 if (!add_f) {
1821 dev_info(&vsi->back->pdev->dev,
1822 "Could not add filter 0 for %pM\n",
1823 vsi->netdev->dev_addr);
1824 return -ENOMEM;
1825 }
1826 }
1827
1828 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1829 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1830 is_vf, is_netdev)) {
1831 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1832 is_vf, is_netdev);
1833 add_f = i40e_add_filter(vsi, f->macaddr,
1834 0, is_vf, is_netdev);
1835 if (!add_f) {
1836 dev_info(&vsi->back->pdev->dev,
1837 "Could not add filter 0 for %pM\n",
1838 f->macaddr);
1839 return -ENOMEM;
1840 }
1841 }
1842 }
1843 ret = i40e_sync_vsi_filters(vsi);
1844 }
1845
1846 return ret;
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1857{
1858 struct net_device *netdev = vsi->netdev;
1859 struct i40e_mac_filter *f, *add_f;
1860 bool is_vf, is_netdev;
1861 int filter_count = 0;
1862 int ret;
1863
1864 is_vf = (vsi->type == I40E_VSI_SRIOV);
1865 is_netdev = !!(netdev);
1866
1867 if (is_netdev)
1868 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1869
1870 list_for_each_entry(f, &vsi->mac_filter_list, list)
1871 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1872
1873 ret = i40e_sync_vsi_filters(vsi);
1874 if (ret) {
1875 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
1876 return ret;
1877 }
1878
1879
1880
1881
1882
1883
1884 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1885 if (is_netdev) {
1886 if (f->vlan &&
1887 ether_addr_equal(netdev->dev_addr, f->macaddr))
1888 filter_count++;
1889 }
1890
1891 if (f->vlan)
1892 filter_count++;
1893 }
1894
1895 if (!filter_count && is_netdev) {
1896 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1897 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1898 is_vf, is_netdev);
1899 if (!f) {
1900 dev_info(&vsi->back->pdev->dev,
1901 "Could not add filter %d for %pM\n",
1902 I40E_VLAN_ANY, netdev->dev_addr);
1903 return -ENOMEM;
1904 }
1905 }
1906
1907 if (!filter_count) {
1908 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1909 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1910 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1911 is_vf, is_netdev);
1912 if (!add_f) {
1913 dev_info(&vsi->back->pdev->dev,
1914 "Could not add filter %d for %pM\n",
1915 I40E_VLAN_ANY, f->macaddr);
1916 return -ENOMEM;
1917 }
1918 }
1919 }
1920
1921 return i40e_sync_vsi_filters(vsi);
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1932 __always_unused __be16 proto, u16 vid)
1933{
1934 struct i40e_netdev_priv *np = netdev_priv(netdev);
1935 struct i40e_vsi *vsi = np->vsi;
1936 int ret = 0;
1937
1938 if (vid > 4095)
1939 return -EINVAL;
1940
1941 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1942
1943
1944
1945
1946
1947 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1948
1949 if (!ret && (vid < VLAN_N_VID))
1950 set_bit(vid, vsi->active_vlans);
1951
1952 return ret;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1963 __always_unused __be16 proto, u16 vid)
1964{
1965 struct i40e_netdev_priv *np = netdev_priv(netdev);
1966 struct i40e_vsi *vsi = np->vsi;
1967
1968 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1969
1970
1971
1972
1973
1974 i40e_vsi_kill_vlan(vsi, vid);
1975
1976 clear_bit(vid, vsi->active_vlans);
1977
1978 return 0;
1979}
1980
1981
1982
1983
1984
1985static void i40e_restore_vlan(struct i40e_vsi *vsi)
1986{
1987 u16 vid;
1988
1989 if (!vsi->netdev)
1990 return;
1991
1992 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1993
1994 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
1995 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
1996 vid);
1997}
1998
1999
2000
2001
2002
2003
2004int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2005{
2006 struct i40e_vsi_context ctxt;
2007 i40e_status aq_ret;
2008
2009 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2010 vsi->info.pvid = cpu_to_le16(vid);
2011 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
2012 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2013
2014 ctxt.seid = vsi->seid;
2015 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2016 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2017 if (aq_ret) {
2018 dev_info(&vsi->back->pdev->dev,
2019 "%s: update vsi failed, aq_err=%d\n",
2020 __func__, vsi->back->hw.aq.asq_last_status);
2021 return -ENOENT;
2022 }
2023
2024 return 0;
2025}
2026
2027
2028
2029
2030
2031
2032
2033void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2034{
2035 vsi->info.pvid = 0;
2036 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2050{
2051 int i, err = 0;
2052
2053 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2054 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2055
2056 return err;
2057}
2058
2059
2060
2061
2062
2063
2064
2065static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2066{
2067 int i;
2068
2069 for (i = 0; i < vsi->num_queue_pairs; i++)
2070 if (vsi->tx_rings[i]->desc)
2071 i40e_free_tx_resources(vsi->tx_rings[i]);
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2085{
2086 int i, err = 0;
2087
2088 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2089 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2090 return err;
2091}
2092
2093
2094
2095
2096
2097
2098
2099static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2100{
2101 int i;
2102
2103 for (i = 0; i < vsi->num_queue_pairs; i++)
2104 if (vsi->rx_rings[i]->desc)
2105 i40e_free_rx_resources(vsi->rx_rings[i]);
2106}
2107
2108
2109
2110
2111
2112
2113
2114static int i40e_configure_tx_ring(struct i40e_ring *ring)
2115{
2116 struct i40e_vsi *vsi = ring->vsi;
2117 u16 pf_q = vsi->base_queue + ring->queue_index;
2118 struct i40e_hw *hw = &vsi->back->hw;
2119 struct i40e_hmc_obj_txq tx_ctx;
2120 i40e_status err = 0;
2121 u32 qtx_ctl = 0;
2122
2123
2124 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
2125 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2126 ring->atr_count = 0;
2127 } else {
2128 ring->atr_sample_rate = 0;
2129 }
2130
2131
2132 if (ring->q_vector && ring->netdev &&
2133 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2134 netif_set_xps_queue(ring->netdev,
2135 &ring->q_vector->affinity_mask,
2136 ring->queue_index);
2137
2138
2139 memset(&tx_ctx, 0, sizeof(tx_ctx));
2140
2141 tx_ctx.new_context = 1;
2142 tx_ctx.base = (ring->dma / 128);
2143 tx_ctx.qlen = ring->count;
2144 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
2145 I40E_FLAG_FDIR_ATR_ENABLED));
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2158 tx_ctx.rdylist_act = 0;
2159
2160
2161 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2162 if (err) {
2163 dev_info(&vsi->back->pdev->dev,
2164 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2165 ring->queue_index, pf_q, err);
2166 return -ENOMEM;
2167 }
2168
2169
2170 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2171 if (err) {
2172 dev_info(&vsi->back->pdev->dev,
2173 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2174 ring->queue_index, pf_q, err);
2175 return -ENOMEM;
2176 }
2177
2178
2179 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2180 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2181 I40E_QTX_CTL_PF_INDX_MASK);
2182 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2183 i40e_flush(hw);
2184
2185 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2186
2187
2188 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2189
2190 return 0;
2191}
2192
2193
2194
2195
2196
2197
2198
2199static int i40e_configure_rx_ring(struct i40e_ring *ring)
2200{
2201 struct i40e_vsi *vsi = ring->vsi;
2202 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2203 u16 pf_q = vsi->base_queue + ring->queue_index;
2204 struct i40e_hw *hw = &vsi->back->hw;
2205 struct i40e_hmc_obj_rxq rx_ctx;
2206 i40e_status err = 0;
2207
2208 ring->state = 0;
2209
2210
2211 memset(&rx_ctx, 0, sizeof(rx_ctx));
2212
2213 ring->rx_buf_len = vsi->rx_buf_len;
2214 ring->rx_hdr_len = vsi->rx_hdr_len;
2215
2216 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2217 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2218
2219 rx_ctx.base = (ring->dma / 128);
2220 rx_ctx.qlen = ring->count;
2221
2222 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2223 set_ring_16byte_desc_enabled(ring);
2224 rx_ctx.dsize = 0;
2225 } else {
2226 rx_ctx.dsize = 1;
2227 }
2228
2229 rx_ctx.dtype = vsi->dtype;
2230 if (vsi->dtype) {
2231 set_ring_ps_enabled(ring);
2232 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2233 I40E_RX_SPLIT_IP |
2234 I40E_RX_SPLIT_TCP_UDP |
2235 I40E_RX_SPLIT_SCTP;
2236 } else {
2237 rx_ctx.hsplit_0 = 0;
2238 }
2239
2240 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2241 (chain_len * ring->rx_buf_len));
2242 rx_ctx.tphrdesc_ena = 1;
2243 rx_ctx.tphwdesc_ena = 1;
2244 rx_ctx.tphdata_ena = 1;
2245 rx_ctx.tphhead_ena = 1;
2246 rx_ctx.lrxqthresh = 2;
2247 rx_ctx.crcstrip = 1;
2248 rx_ctx.l2tsel = 1;
2249 rx_ctx.showiv = 1;
2250
2251
2252 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2253 if (err) {
2254 dev_info(&vsi->back->pdev->dev,
2255 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2256 ring->queue_index, pf_q, err);
2257 return -ENOMEM;
2258 }
2259
2260
2261 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2262 if (err) {
2263 dev_info(&vsi->back->pdev->dev,
2264 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2265 ring->queue_index, pf_q, err);
2266 return -ENOMEM;
2267 }
2268
2269
2270 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2271 writel(0, ring->tail);
2272
2273 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2274
2275 return 0;
2276}
2277
2278
2279
2280
2281
2282
2283
2284static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2285{
2286 int err = 0;
2287 u16 i;
2288
2289 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2290 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2291
2292 return err;
2293}
2294
2295
2296
2297
2298
2299
2300
2301static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2302{
2303 int err = 0;
2304 u16 i;
2305
2306 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2307 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2308 + ETH_FCS_LEN + VLAN_HLEN;
2309 else
2310 vsi->max_frame = I40E_RXBUFFER_2048;
2311
2312
2313 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2314 I40E_FLAG_RX_PS_ENABLED)) {
2315 case I40E_FLAG_RX_1BUF_ENABLED:
2316 vsi->rx_hdr_len = 0;
2317 vsi->rx_buf_len = vsi->max_frame;
2318 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2319 break;
2320 case I40E_FLAG_RX_PS_ENABLED:
2321 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2322 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2323 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2324 break;
2325 default:
2326 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2327 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2328 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2329 break;
2330 }
2331
2332
2333 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2334 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2335 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2336 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2337
2338
2339 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2340 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2341
2342 return err;
2343}
2344
2345
2346
2347
2348
2349static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2350{
2351 u16 qoffset, qcount;
2352 int i, n;
2353
2354 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2355 return;
2356
2357 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2358 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2359 continue;
2360
2361 qoffset = vsi->tc_config.tc_info[n].qoffset;
2362 qcount = vsi->tc_config.tc_info[n].qcount;
2363 for (i = qoffset; i < (qoffset + qcount); i++) {
2364 struct i40e_ring *rx_ring = vsi->rx_rings[i];
2365 struct i40e_ring *tx_ring = vsi->tx_rings[i];
2366 rx_ring->dcb_tc = n;
2367 tx_ring->dcb_tc = n;
2368 }
2369 }
2370}
2371
2372
2373
2374
2375
2376static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2377{
2378 if (vsi->netdev)
2379 i40e_set_rx_mode(vsi->netdev);
2380}
2381
2382
2383
2384
2385
2386static int i40e_vsi_configure(struct i40e_vsi *vsi)
2387{
2388 int err;
2389
2390 i40e_set_vsi_rx_mode(vsi);
2391 i40e_restore_vlan(vsi);
2392 i40e_vsi_config_dcb_rings(vsi);
2393 err = i40e_vsi_configure_tx(vsi);
2394 if (!err)
2395 err = i40e_vsi_configure_rx(vsi);
2396
2397 return err;
2398}
2399
2400
2401
2402
2403
2404static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2405{
2406 struct i40e_pf *pf = vsi->back;
2407 struct i40e_q_vector *q_vector;
2408 struct i40e_hw *hw = &pf->hw;
2409 u16 vector;
2410 int i, q;
2411 u32 val;
2412 u32 qp;
2413
2414
2415
2416
2417
2418 qp = vsi->base_queue;
2419 vector = vsi->base_vector;
2420 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2421 q_vector = vsi->q_vectors[i];
2422 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2423 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2424 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2425 q_vector->rx.itr);
2426 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2427 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2428 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2429 q_vector->tx.itr);
2430
2431
2432 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2433 for (q = 0; q < q_vector->num_ringpairs; q++) {
2434 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2435 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2436 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2437 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2438 (I40E_QUEUE_TYPE_TX
2439 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2440
2441 wr32(hw, I40E_QINT_RQCTL(qp), val);
2442
2443 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2444 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2445 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2446 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2447 (I40E_QUEUE_TYPE_RX
2448 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2449
2450
2451 if (q == (q_vector->num_ringpairs - 1))
2452 val |= (I40E_QUEUE_END_OF_LIST
2453 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2454
2455 wr32(hw, I40E_QINT_TQCTL(qp), val);
2456 qp++;
2457 }
2458 }
2459
2460 i40e_flush(hw);
2461}
2462
2463
2464
2465
2466
2467static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2468{
2469 u32 val;
2470
2471
2472 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2473 rd32(hw, I40E_PFINT_ICR0);
2474
2475 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2476 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2477 I40E_PFINT_ICR0_ENA_GRST_MASK |
2478 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2479 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2480 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2481 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2482 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2483 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2484
2485 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2486
2487
2488 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2489 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2490
2491
2492 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2493}
2494
2495
2496
2497
2498
2499static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2500{
2501 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2502 struct i40e_pf *pf = vsi->back;
2503 struct i40e_hw *hw = &pf->hw;
2504 u32 val;
2505
2506
2507 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2508 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2509 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2510 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2511 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2512 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2513
2514 i40e_enable_misc_int_causes(hw);
2515
2516
2517 wr32(hw, I40E_PFINT_LNKLST0, 0);
2518
2519
2520 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2521 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2522 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2523
2524 wr32(hw, I40E_QINT_RQCTL(0), val);
2525
2526 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2527 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2528 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2529
2530 wr32(hw, I40E_QINT_TQCTL(0), val);
2531 i40e_flush(hw);
2532}
2533
2534
2535
2536
2537
2538void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2539{
2540 struct i40e_hw *hw = &pf->hw;
2541 u32 val;
2542
2543 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2544 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2545 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2546
2547 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2548 i40e_flush(hw);
2549}
2550
2551
2552
2553
2554
2555
2556void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2557{
2558 struct i40e_pf *pf = vsi->back;
2559 struct i40e_hw *hw = &pf->hw;
2560 u32 val;
2561
2562 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2563 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2564 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2565 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2566
2567}
2568
2569
2570
2571
2572
2573
2574static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2575{
2576 struct i40e_q_vector *q_vector = data;
2577
2578 if (!q_vector->tx.ring && !q_vector->rx.ring)
2579 return IRQ_HANDLED;
2580
2581 napi_schedule(&q_vector->napi);
2582
2583 return IRQ_HANDLED;
2584}
2585
2586
2587
2588
2589
2590
2591static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2592{
2593 struct i40e_q_vector *q_vector = data;
2594
2595 if (!q_vector->tx.ring && !q_vector->rx.ring)
2596 return IRQ_HANDLED;
2597
2598 pr_info("fdir ring cleaning needed\n");
2599
2600 return IRQ_HANDLED;
2601}
2602
2603
2604
2605
2606
2607
2608
2609
2610static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2611{
2612 int q_vectors = vsi->num_q_vectors;
2613 struct i40e_pf *pf = vsi->back;
2614 int base = vsi->base_vector;
2615 int rx_int_idx = 0;
2616 int tx_int_idx = 0;
2617 int vector, err;
2618
2619 for (vector = 0; vector < q_vectors; vector++) {
2620 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
2621
2622 if (q_vector->tx.ring && q_vector->rx.ring) {
2623 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2624 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2625 tx_int_idx++;
2626 } else if (q_vector->rx.ring) {
2627 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2628 "%s-%s-%d", basename, "rx", rx_int_idx++);
2629 } else if (q_vector->tx.ring) {
2630 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2631 "%s-%s-%d", basename, "tx", tx_int_idx++);
2632 } else {
2633
2634 continue;
2635 }
2636 err = request_irq(pf->msix_entries[base + vector].vector,
2637 vsi->irq_handler,
2638 0,
2639 q_vector->name,
2640 q_vector);
2641 if (err) {
2642 dev_info(&pf->pdev->dev,
2643 "%s: request_irq failed, error: %d\n",
2644 __func__, err);
2645 goto free_queue_irqs;
2646 }
2647
2648 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2649 &q_vector->affinity_mask);
2650 }
2651
2652 return 0;
2653
2654free_queue_irqs:
2655 while (vector) {
2656 vector--;
2657 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2658 NULL);
2659 free_irq(pf->msix_entries[base + vector].vector,
2660 &(vsi->q_vectors[vector]));
2661 }
2662 return err;
2663}
2664
2665
2666
2667
2668
2669static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2670{
2671 struct i40e_pf *pf = vsi->back;
2672 struct i40e_hw *hw = &pf->hw;
2673 int base = vsi->base_vector;
2674 int i;
2675
2676 for (i = 0; i < vsi->num_queue_pairs; i++) {
2677 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2678 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
2679 }
2680
2681 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2682 for (i = vsi->base_vector;
2683 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2684 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2685
2686 i40e_flush(hw);
2687 for (i = 0; i < vsi->num_q_vectors; i++)
2688 synchronize_irq(pf->msix_entries[i + base].vector);
2689 } else {
2690
2691 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2692 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2693 i40e_flush(hw);
2694 synchronize_irq(pf->pdev->irq);
2695 }
2696}
2697
2698
2699
2700
2701
2702static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2703{
2704 struct i40e_pf *pf = vsi->back;
2705 int i;
2706
2707 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2708 for (i = vsi->base_vector;
2709 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2710 i40e_irq_dynamic_enable(vsi, i);
2711 } else {
2712 i40e_irq_dynamic_enable_icr0(pf);
2713 }
2714
2715 i40e_flush(&pf->hw);
2716 return 0;
2717}
2718
2719
2720
2721
2722
2723static void i40e_stop_misc_vector(struct i40e_pf *pf)
2724{
2725
2726 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2727 i40e_flush(&pf->hw);
2728}
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739static irqreturn_t i40e_intr(int irq, void *data)
2740{
2741 struct i40e_pf *pf = (struct i40e_pf *)data;
2742 struct i40e_hw *hw = &pf->hw;
2743 u32 icr0, icr0_remaining;
2744 u32 val, ena_mask;
2745
2746 icr0 = rd32(hw, I40E_PFINT_ICR0);
2747
2748 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2749 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2750 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2751
2752
2753 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2754 return IRQ_NONE;
2755
2756 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2757
2758
2759 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2760
2761
2762 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2763 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2764 wr32(hw, I40E_QINT_RQCTL(0), qval);
2765
2766 qval = rd32(hw, I40E_QINT_TQCTL(0));
2767 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2768 wr32(hw, I40E_QINT_TQCTL(0), qval);
2769
2770 if (!test_bit(__I40E_DOWN, &pf->state))
2771 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
2772 }
2773
2774 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2775 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2776 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2777 }
2778
2779 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2780 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2781 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2782 }
2783
2784 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2785 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2786 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2787 }
2788
2789 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2790 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2791 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2792 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2793 val = rd32(hw, I40E_GLGEN_RSTAT);
2794 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2795 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2796 if (val & I40E_RESET_CORER)
2797 pf->corer_count++;
2798 else if (val & I40E_RESET_GLOBR)
2799 pf->globr_count++;
2800 else if (val & I40E_RESET_EMPR)
2801 pf->empr_count++;
2802 }
2803
2804
2805
2806
2807
2808 icr0_remaining = icr0 & ena_mask;
2809 if (icr0_remaining) {
2810 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2811 icr0_remaining);
2812 if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
2813 (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2814 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2815 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2816 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2817 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2818 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2819 } else {
2820 dev_info(&pf->pdev->dev, "device will be reset\n");
2821 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2822 i40e_service_event_schedule(pf);
2823 }
2824 }
2825 ena_mask &= ~icr0_remaining;
2826 }
2827
2828
2829 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2830 if (!test_bit(__I40E_DOWN, &pf->state)) {
2831 i40e_service_event_schedule(pf);
2832 i40e_irq_dynamic_enable_icr0(pf);
2833 }
2834
2835 return IRQ_HANDLED;
2836}
2837
2838
2839
2840
2841
2842
2843
2844static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
2845{
2846 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
2847 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
2848 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
2849
2850 tx_ring->q_vector = q_vector;
2851 tx_ring->next = q_vector->tx.ring;
2852 q_vector->tx.ring = tx_ring;
2853 q_vector->tx.count++;
2854
2855 rx_ring->q_vector = q_vector;
2856 rx_ring->next = q_vector->rx.ring;
2857 q_vector->rx.ring = rx_ring;
2858 q_vector->rx.count++;
2859}
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2871{
2872 int qp_remaining = vsi->num_queue_pairs;
2873 int q_vectors = vsi->num_q_vectors;
2874 int num_ringpairs;
2875 int v_start = 0;
2876 int qp_idx = 0;
2877
2878
2879
2880
2881 for (; v_start < q_vectors && qp_remaining; v_start++) {
2882 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
2883
2884 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2885
2886 q_vector->num_ringpairs = num_ringpairs;
2887
2888 q_vector->rx.count = 0;
2889 q_vector->tx.count = 0;
2890 q_vector->rx.ring = NULL;
2891 q_vector->tx.ring = NULL;
2892
2893 while (num_ringpairs--) {
2894 map_vector_to_qp(vsi, v_start, qp_idx);
2895 qp_idx++;
2896 qp_remaining--;
2897 }
2898 }
2899}
2900
2901
2902
2903
2904
2905
2906static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
2907{
2908 struct i40e_pf *pf = vsi->back;
2909 int err;
2910
2911 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2912 err = i40e_vsi_request_irq_msix(vsi, basename);
2913 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
2914 err = request_irq(pf->pdev->irq, i40e_intr, 0,
2915 pf->misc_int_name, pf);
2916 else
2917 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
2918 pf->misc_int_name, pf);
2919
2920 if (err)
2921 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
2922
2923 return err;
2924}
2925
2926#ifdef CONFIG_NET_POLL_CONTROLLER
2927
2928
2929
2930
2931
2932
2933
2934static void i40e_netpoll(struct net_device *netdev)
2935{
2936 struct i40e_netdev_priv *np = netdev_priv(netdev);
2937 struct i40e_vsi *vsi = np->vsi;
2938 struct i40e_pf *pf = vsi->back;
2939 int i;
2940
2941
2942 if (test_bit(__I40E_DOWN, &vsi->state))
2943 return;
2944
2945 pf->flags |= I40E_FLAG_IN_NETPOLL;
2946 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2947 for (i = 0; i < vsi->num_q_vectors; i++)
2948 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
2949 } else {
2950 i40e_intr(pf->pdev->irq, netdev);
2951 }
2952 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
2953}
2954#endif
2955
2956
2957
2958
2959
2960
2961static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
2962{
2963 struct i40e_pf *pf = vsi->back;
2964 struct i40e_hw *hw = &pf->hw;
2965 int i, j, pf_q;
2966 u32 tx_reg;
2967
2968 pf_q = vsi->base_queue;
2969 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2970 j = 1000;
2971 do {
2972 usleep_range(1000, 2000);
2973 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2974 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
2975 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
2976
2977 if (enable) {
2978
2979 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2980 dev_info(&pf->pdev->dev,
2981 "Tx %d already enabled\n", i);
2982 continue;
2983 }
2984 } else {
2985
2986 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2987 dev_info(&pf->pdev->dev,
2988 "Tx %d already disabled\n", i);
2989 continue;
2990 }
2991 }
2992
2993
2994 if (enable)
2995 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2996 I40E_QTX_ENA_QENA_STAT_MASK;
2997 else
2998 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2999
3000 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3001
3002
3003 for (j = 0; j < 10; j++) {
3004 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3005 if (enable) {
3006 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3007 break;
3008 } else {
3009 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3010 break;
3011 }
3012
3013 udelay(10);
3014 }
3015 if (j >= 10) {
3016 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3017 pf_q, (enable ? "en" : "dis"));
3018 return -ETIMEDOUT;
3019 }
3020 }
3021
3022 return 0;
3023}
3024
3025
3026
3027
3028
3029
3030static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3031{
3032 struct i40e_pf *pf = vsi->back;
3033 struct i40e_hw *hw = &pf->hw;
3034 int i, j, pf_q;
3035 u32 rx_reg;
3036
3037 pf_q = vsi->base_queue;
3038 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3039 j = 1000;
3040 do {
3041 usleep_range(1000, 2000);
3042 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3043 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
3044 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
3045
3046 if (enable) {
3047
3048 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3049 continue;
3050 } else {
3051
3052 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3053 continue;
3054 }
3055
3056
3057 if (enable)
3058 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3059 I40E_QRX_ENA_QENA_STAT_MASK;
3060 else
3061 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3062 I40E_QRX_ENA_QENA_STAT_MASK);
3063 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3064
3065
3066 for (j = 0; j < 10; j++) {
3067 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3068
3069 if (enable) {
3070 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3071 break;
3072 } else {
3073 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3074 break;
3075 }
3076
3077 udelay(10);
3078 }
3079 if (j >= 10) {
3080 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3081 pf_q, (enable ? "en" : "dis"));
3082 return -ETIMEDOUT;
3083 }
3084 }
3085
3086 return 0;
3087}
3088
3089
3090
3091
3092
3093
3094static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3095{
3096 int ret;
3097
3098
3099 if (request) {
3100 ret = i40e_vsi_control_rx(vsi, request);
3101 if (ret)
3102 return ret;
3103 ret = i40e_vsi_control_tx(vsi, request);
3104 } else {
3105 ret = i40e_vsi_control_tx(vsi, request);
3106 if (ret)
3107 return ret;
3108 ret = i40e_vsi_control_rx(vsi, request);
3109 }
3110
3111 return ret;
3112}
3113
3114
3115
3116
3117
3118static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3119{
3120 struct i40e_pf *pf = vsi->back;
3121 struct i40e_hw *hw = &pf->hw;
3122 int base = vsi->base_vector;
3123 u32 val, qp;
3124 int i;
3125
3126 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3127 if (!vsi->q_vectors)
3128 return;
3129
3130 for (i = 0; i < vsi->num_q_vectors; i++) {
3131 u16 vector = i + base;
3132
3133
3134 if (vsi->q_vectors[i]->num_ringpairs == 0)
3135 continue;
3136
3137
3138 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3139 NULL);
3140 free_irq(pf->msix_entries[vector].vector,
3141 vsi->q_vectors[i]);
3142
3143
3144
3145
3146
3147
3148
3149
3150 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3151 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3152 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3153 val |= I40E_QUEUE_END_OF_LIST
3154 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3155 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3156
3157 while (qp != I40E_QUEUE_END_OF_LIST) {
3158 u32 next;
3159
3160 val = rd32(hw, I40E_QINT_RQCTL(qp));
3161
3162 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3163 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3164 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3165 I40E_QINT_RQCTL_INTEVENT_MASK);
3166
3167 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3168 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3169
3170 wr32(hw, I40E_QINT_RQCTL(qp), val);
3171
3172 val = rd32(hw, I40E_QINT_TQCTL(qp));
3173
3174 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3175 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3176
3177 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3178 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3179 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3180 I40E_QINT_TQCTL_INTEVENT_MASK);
3181
3182 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3183 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3184
3185 wr32(hw, I40E_QINT_TQCTL(qp), val);
3186 qp = next;
3187 }
3188 }
3189 } else {
3190 free_irq(pf->pdev->irq, pf);
3191
3192 val = rd32(hw, I40E_PFINT_LNKLST0);
3193 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3194 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3195 val |= I40E_QUEUE_END_OF_LIST
3196 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3197 wr32(hw, I40E_PFINT_LNKLST0, val);
3198
3199 val = rd32(hw, I40E_QINT_RQCTL(qp));
3200 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3201 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3202 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3203 I40E_QINT_RQCTL_INTEVENT_MASK);
3204
3205 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3206 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3207
3208 wr32(hw, I40E_QINT_RQCTL(qp), val);
3209
3210 val = rd32(hw, I40E_QINT_TQCTL(qp));
3211
3212 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3213 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3214 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3215 I40E_QINT_TQCTL_INTEVENT_MASK);
3216
3217 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3218 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3219
3220 wr32(hw, I40E_QINT_TQCTL(qp), val);
3221 }
3222}
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3234{
3235 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3236 struct i40e_ring *ring;
3237
3238 if (!q_vector)
3239 return;
3240
3241
3242 i40e_for_each_ring(ring, q_vector->tx)
3243 ring->q_vector = NULL;
3244
3245 i40e_for_each_ring(ring, q_vector->rx)
3246 ring->q_vector = NULL;
3247
3248
3249 if (vsi->netdev)
3250 netif_napi_del(&q_vector->napi);
3251
3252 vsi->q_vectors[v_idx] = NULL;
3253
3254 kfree_rcu(q_vector, rcu);
3255}
3256
3257
3258
3259
3260
3261
3262
3263
3264static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3265{
3266 int v_idx;
3267
3268 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3269 i40e_free_q_vector(vsi, v_idx);
3270}
3271
3272
3273
3274
3275
3276static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3277{
3278
3279 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3280 pci_disable_msix(pf->pdev);
3281 kfree(pf->msix_entries);
3282 pf->msix_entries = NULL;
3283 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3284 pci_disable_msi(pf->pdev);
3285 }
3286 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3287}
3288
3289
3290
3291
3292
3293
3294
3295
3296static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3297{
3298 int i;
3299
3300 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3301 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3302 if (pf->vsi[i])
3303 i40e_vsi_free_q_vectors(pf->vsi[i]);
3304 i40e_reset_interrupt_capability(pf);
3305}
3306
3307
3308
3309
3310
3311static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3312{
3313 int q_idx;
3314
3315 if (!vsi->netdev)
3316 return;
3317
3318 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3319 napi_enable(&vsi->q_vectors[q_idx]->napi);
3320}
3321
3322
3323
3324
3325
3326static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3327{
3328 int q_idx;
3329
3330 if (!vsi->netdev)
3331 return;
3332
3333 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3334 napi_disable(&vsi->q_vectors[q_idx]->napi);
3335}
3336
3337
3338
3339
3340
3341static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3342{
3343 if (test_bit(__I40E_DOWN, &vsi->state))
3344 return;
3345
3346 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3347 if (vsi->netdev && netif_running(vsi->netdev)) {
3348 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3349 } else {
3350 set_bit(__I40E_DOWN, &vsi->state);
3351 i40e_down(vsi);
3352 }
3353}
3354
3355
3356
3357
3358
3359static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3360{
3361 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3362 return;
3363
3364 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3365 if (vsi->netdev && netif_running(vsi->netdev))
3366 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3367 else
3368 i40e_up(vsi);
3369}
3370
3371
3372
3373
3374
3375static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3376{
3377 int v;
3378
3379 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3380 if (pf->vsi[v])
3381 i40e_quiesce_vsi(pf->vsi[v]);
3382 }
3383}
3384
3385
3386
3387
3388
3389static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3390{
3391 int v;
3392
3393 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3394 if (pf->vsi[v])
3395 i40e_unquiesce_vsi(pf->vsi[v]);
3396 }
3397}
3398
3399
3400
3401
3402
3403
3404
3405static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3406{
3407 u8 num_tc = 0;
3408 int i;
3409
3410
3411
3412
3413
3414
3415 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3416 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3417 num_tc = dcbcfg->etscfg.prioritytable[i];
3418 }
3419
3420
3421
3422
3423 return num_tc + 1;
3424}
3425
3426
3427
3428
3429
3430
3431
3432
3433static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3434{
3435 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3436 u8 enabled_tc = 1;
3437 u8 i;
3438
3439 for (i = 0; i < num_tc; i++)
3440 enabled_tc |= 1 << i;
3441
3442 return enabled_tc;
3443}
3444
3445
3446
3447
3448
3449
3450
3451static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3452{
3453 struct i40e_hw *hw = &pf->hw;
3454 u8 i, enabled_tc;
3455 u8 num_tc = 0;
3456 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3457
3458
3459 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3460 return 1;
3461
3462
3463 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3464 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3465 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3466 if (enabled_tc & (1 << i))
3467 num_tc++;
3468 }
3469 return num_tc;
3470 }
3471
3472
3473 return i40e_dcb_get_num_tc(dcbcfg);
3474}
3475
3476
3477
3478
3479
3480
3481
3482static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3483{
3484 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3485 u8 i = 0;
3486
3487 if (!enabled_tc)
3488 return 0x1;
3489
3490
3491 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3492 if (enabled_tc & (1 << i))
3493 break;
3494 }
3495
3496 return 1 << i;
3497}
3498
3499
3500
3501
3502
3503
3504
3505static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3506{
3507
3508 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3509 return i40e_pf_get_default_tc(pf);
3510
3511
3512 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3513 return pf->hw.func_caps.enabled_tcmap;
3514
3515
3516 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3517}
3518
3519
3520
3521
3522
3523
3524
3525static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3526{
3527 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3528 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3529 struct i40e_pf *pf = vsi->back;
3530 struct i40e_hw *hw = &pf->hw;
3531 i40e_status aq_ret;
3532 u32 tc_bw_max;
3533 int i;
3534
3535
3536 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3537 if (aq_ret) {
3538 dev_info(&pf->pdev->dev,
3539 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3540 aq_ret, pf->hw.aq.asq_last_status);
3541 return -EINVAL;
3542 }
3543
3544
3545 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3546 NULL);
3547 if (aq_ret) {
3548 dev_info(&pf->pdev->dev,
3549 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3550 aq_ret, pf->hw.aq.asq_last_status);
3551 return -EINVAL;
3552 }
3553
3554 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3555 dev_info(&pf->pdev->dev,
3556 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3557 bw_config.tc_valid_bits,
3558 bw_ets_config.tc_valid_bits);
3559
3560 }
3561
3562 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3563 vsi->bw_max_quanta = bw_config.max_bw;
3564 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3565 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3566 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3567 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3568 vsi->bw_ets_limit_credits[i] =
3569 le16_to_cpu(bw_ets_config.credits[i]);
3570
3571 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3572 }
3573
3574 return 0;
3575}
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3586 u8 *bw_share)
3587{
3588 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3589 i40e_status aq_ret;
3590 int i;
3591
3592 bw_data.tc_valid_bits = enabled_tc;
3593 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3594 bw_data.tc_bw_credits[i] = bw_share[i];
3595
3596 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3597 NULL);
3598 if (aq_ret) {
3599 dev_info(&vsi->back->pdev->dev,
3600 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3601 __func__, vsi->back->hw.aq.asq_last_status);
3602 return -EINVAL;
3603 }
3604
3605 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3606 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3607
3608 return 0;
3609}
3610
3611
3612
3613
3614
3615
3616
3617static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3618{
3619 struct net_device *netdev = vsi->netdev;
3620 struct i40e_pf *pf = vsi->back;
3621 struct i40e_hw *hw = &pf->hw;
3622 u8 netdev_tc = 0;
3623 int i;
3624 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3625
3626 if (!netdev)
3627 return;
3628
3629 if (!enabled_tc) {
3630 netdev_reset_tc(netdev);
3631 return;
3632 }
3633
3634
3635 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3636 return;
3637
3638
3639 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3640
3641
3642
3643
3644
3645
3646
3647 if (vsi->tc_config.enabled_tc & (1 << i))
3648 netdev_set_tc_queue(netdev,
3649 vsi->tc_config.tc_info[i].netdev_tc,
3650 vsi->tc_config.tc_info[i].qcount,
3651 vsi->tc_config.tc_info[i].qoffset);
3652 }
3653
3654
3655 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3656
3657 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3658
3659 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3660 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3661 }
3662}
3663
3664
3665
3666
3667
3668
3669static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3670 struct i40e_vsi_context *ctxt)
3671{
3672
3673
3674
3675
3676 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3677 memcpy(&vsi->info.queue_mapping,
3678 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3679 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3680 sizeof(vsi->info.tc_mapping));
3681}
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3697{
3698 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3699 struct i40e_vsi_context ctxt;
3700 int ret = 0;
3701 int i;
3702
3703
3704 if (vsi->tc_config.enabled_tc == enabled_tc)
3705 return ret;
3706
3707
3708 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3709 if (enabled_tc & (1 << i))
3710 bw_share[i] = 1;
3711 }
3712
3713 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3714 if (ret) {
3715 dev_info(&vsi->back->pdev->dev,
3716 "Failed configuring TC map %d for VSI %d\n",
3717 enabled_tc, vsi->seid);
3718 goto out;
3719 }
3720
3721
3722 ctxt.seid = vsi->seid;
3723 ctxt.pf_num = vsi->back->hw.pf_id;
3724 ctxt.vf_num = 0;
3725 ctxt.uplink_seid = vsi->uplink_seid;
3726 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3727 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3728
3729
3730 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3731 if (ret) {
3732 dev_info(&vsi->back->pdev->dev,
3733 "update vsi failed, aq_err=%d\n",
3734 vsi->back->hw.aq.asq_last_status);
3735 goto out;
3736 }
3737
3738 i40e_vsi_update_queue_map(vsi, &ctxt);
3739 vsi->info.valid_sections = 0;
3740
3741
3742 ret = i40e_vsi_get_bw_info(vsi);
3743 if (ret) {
3744 dev_info(&vsi->back->pdev->dev,
3745 "Failed updating vsi bw info, aq_err=%d\n",
3746 vsi->back->hw.aq.asq_last_status);
3747 goto out;
3748 }
3749
3750
3751 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3752out:
3753 return ret;
3754}
3755
3756
3757
3758
3759
3760static int i40e_up_complete(struct i40e_vsi *vsi)
3761{
3762 struct i40e_pf *pf = vsi->back;
3763 int err;
3764
3765 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3766 i40e_vsi_configure_msix(vsi);
3767 else
3768 i40e_configure_msi_and_legacy(vsi);
3769
3770
3771 err = i40e_vsi_control_rings(vsi, true);
3772 if (err)
3773 return err;
3774
3775 clear_bit(__I40E_DOWN, &vsi->state);
3776 i40e_napi_enable_all(vsi);
3777 i40e_vsi_enable_irq(vsi);
3778
3779 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3780 (vsi->netdev)) {
3781 netdev_info(vsi->netdev, "NIC Link is Up\n");
3782 netif_tx_start_all_queues(vsi->netdev);
3783 netif_carrier_on(vsi->netdev);
3784 } else if (vsi->netdev) {
3785 netdev_info(vsi->netdev, "NIC Link is Down\n");
3786 }
3787 i40e_service_event_schedule(pf);
3788
3789 return 0;
3790}
3791
3792
3793
3794
3795
3796
3797
3798
3799static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
3800{
3801 struct i40e_pf *pf = vsi->back;
3802
3803 WARN_ON(in_interrupt());
3804 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
3805 usleep_range(1000, 2000);
3806 i40e_down(vsi);
3807
3808
3809
3810
3811
3812 if (vsi->type == I40E_VSI_SRIOV)
3813 msleep(2000);
3814 i40e_up(vsi);
3815 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
3816}
3817
3818
3819
3820
3821
3822int i40e_up(struct i40e_vsi *vsi)
3823{
3824 int err;
3825
3826 err = i40e_vsi_configure(vsi);
3827 if (!err)
3828 err = i40e_up_complete(vsi);
3829
3830 return err;
3831}
3832
3833
3834
3835
3836
3837void i40e_down(struct i40e_vsi *vsi)
3838{
3839 int i;
3840
3841
3842
3843
3844 if (vsi->netdev) {
3845 netif_carrier_off(vsi->netdev);
3846 netif_tx_disable(vsi->netdev);
3847 }
3848 i40e_vsi_disable_irq(vsi);
3849 i40e_vsi_control_rings(vsi, false);
3850 i40e_napi_disable_all(vsi);
3851
3852 for (i = 0; i < vsi->num_queue_pairs; i++) {
3853 i40e_clean_tx_ring(vsi->tx_rings[i]);
3854 i40e_clean_rx_ring(vsi->rx_rings[i]);
3855 }
3856}
3857
3858
3859
3860
3861
3862
3863static int i40e_setup_tc(struct net_device *netdev, u8 tc)
3864{
3865 struct i40e_netdev_priv *np = netdev_priv(netdev);
3866 struct i40e_vsi *vsi = np->vsi;
3867 struct i40e_pf *pf = vsi->back;
3868 u8 enabled_tc = 0;
3869 int ret = -EINVAL;
3870 int i;
3871
3872
3873 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
3874 netdev_info(netdev, "DCB is not enabled for adapter\n");
3875 goto exit;
3876 }
3877
3878
3879 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3880 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
3881 goto exit;
3882 }
3883
3884
3885 if (tc > i40e_pf_get_num_tc(pf)) {
3886 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
3887 goto exit;
3888 }
3889
3890
3891 for (i = 0; i < tc; i++)
3892 enabled_tc |= (1 << i);
3893
3894
3895 if (enabled_tc == vsi->tc_config.enabled_tc)
3896 return 0;
3897
3898
3899 i40e_quiesce_vsi(vsi);
3900
3901
3902 ret = i40e_vsi_config_tc(vsi, enabled_tc);
3903 if (ret) {
3904 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
3905 vsi->seid);
3906 goto exit;
3907 }
3908
3909
3910 i40e_unquiesce_vsi(vsi);
3911
3912exit:
3913 return ret;
3914}
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928static int i40e_open(struct net_device *netdev)
3929{
3930 struct i40e_netdev_priv *np = netdev_priv(netdev);
3931 struct i40e_vsi *vsi = np->vsi;
3932 struct i40e_pf *pf = vsi->back;
3933 char int_name[IFNAMSIZ];
3934 int err;
3935
3936
3937 if (test_bit(__I40E_TESTING, &pf->state))
3938 return -EBUSY;
3939
3940 netif_carrier_off(netdev);
3941
3942
3943 err = i40e_vsi_setup_tx_resources(vsi);
3944 if (err)
3945 goto err_setup_tx;
3946 err = i40e_vsi_setup_rx_resources(vsi);
3947 if (err)
3948 goto err_setup_rx;
3949
3950 err = i40e_vsi_configure(vsi);
3951 if (err)
3952 goto err_setup_rx;
3953
3954 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3955 dev_driver_string(&pf->pdev->dev), netdev->name);
3956 err = i40e_vsi_request_irq(vsi, int_name);
3957 if (err)
3958 goto err_setup_rx;
3959
3960 err = i40e_up_complete(vsi);
3961 if (err)
3962 goto err_up_complete;
3963
3964 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
3965 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
3966 if (err)
3967 netdev_info(netdev,
3968 "couldn't set broadcast err %d aq_err %d\n",
3969 err, pf->hw.aq.asq_last_status);
3970 }
3971
3972 return 0;
3973
3974err_up_complete:
3975 i40e_down(vsi);
3976 i40e_vsi_free_irq(vsi);
3977err_setup_rx:
3978 i40e_vsi_free_rx_resources(vsi);
3979err_setup_tx:
3980 i40e_vsi_free_tx_resources(vsi);
3981 if (vsi == pf->vsi[pf->lan_vsi])
3982 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
3983
3984 return err;
3985}
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997static int i40e_close(struct net_device *netdev)
3998{
3999 struct i40e_netdev_priv *np = netdev_priv(netdev);
4000 struct i40e_vsi *vsi = np->vsi;
4001
4002 if (test_and_set_bit(__I40E_DOWN, &vsi->state))
4003 return 0;
4004
4005 i40e_down(vsi);
4006 i40e_vsi_free_irq(vsi);
4007
4008 i40e_vsi_free_tx_resources(vsi);
4009 i40e_vsi_free_rx_resources(vsi);
4010
4011 return 0;
4012}
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4024{
4025 u32 val;
4026
4027 WARN_ON(in_interrupt());
4028
4029
4030 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040 dev_info(&pf->pdev->dev, "GlobalR requested\n");
4041 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4042 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4043 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4044
4045 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4046
4047
4048
4049
4050
4051 dev_info(&pf->pdev->dev, "CoreR requested\n");
4052 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4053 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4054 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4055 i40e_flush(&pf->hw);
4056
4057 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067 dev_info(&pf->pdev->dev, "PFR requested\n");
4068 i40e_handle_reset_warning(pf);
4069
4070 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4071 int v;
4072
4073
4074 dev_info(&pf->pdev->dev,
4075 "VSI reinit requested\n");
4076 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4077 struct i40e_vsi *vsi = pf->vsi[v];
4078 if (vsi != NULL &&
4079 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4080 i40e_vsi_reinit_locked(pf->vsi[v]);
4081 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4082 }
4083 }
4084
4085
4086 return;
4087 } else {
4088 dev_info(&pf->pdev->dev,
4089 "bad reset request 0x%08x\n", reset_flags);
4090 return;
4091 }
4092}
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4103 struct i40e_arq_event_info *e)
4104{
4105 struct i40e_aqc_lan_overflow *data =
4106 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4107 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4108 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4109 struct i40e_hw *hw = &pf->hw;
4110 struct i40e_vf *vf;
4111 u16 vf_id;
4112
4113 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4114 __func__, queue, qtx_ctl);
4115
4116
4117 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4118 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4119 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4120 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4121 vf_id -= hw->func_caps.vf_base_id;
4122 vf = &pf->vf[vf_id];
4123 i40e_vc_notify_vf_reset(vf);
4124
4125 msleep(20);
4126 i40e_reset_vf(vf, false);
4127 }
4128}
4129
4130
4131
4132
4133
4134static void i40e_service_event_complete(struct i40e_pf *pf)
4135{
4136 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4137
4138
4139 smp_mb__before_clear_bit();
4140 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4141}
4142
4143
4144
4145
4146
4147static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4148{
4149 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4150 return;
4151
4152 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4153
4154
4155 if (test_bit(__I40E_DOWN, &pf->state))
4156 return;
4157}
4158
4159
4160
4161
4162
4163
4164static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4165{
4166 if (!vsi)
4167 return;
4168
4169 switch (vsi->type) {
4170 case I40E_VSI_MAIN:
4171 if (!vsi->netdev || !vsi->netdev_registered)
4172 break;
4173
4174 if (link_up) {
4175 netif_carrier_on(vsi->netdev);
4176 netif_tx_wake_all_queues(vsi->netdev);
4177 } else {
4178 netif_carrier_off(vsi->netdev);
4179 netif_tx_stop_all_queues(vsi->netdev);
4180 }
4181 break;
4182
4183 case I40E_VSI_SRIOV:
4184 break;
4185
4186 case I40E_VSI_VMDQ2:
4187 case I40E_VSI_CTRL:
4188 case I40E_VSI_MIRROR:
4189 default:
4190
4191 break;
4192 }
4193}
4194
4195
4196
4197
4198
4199
4200static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4201{
4202 struct i40e_pf *pf;
4203 int i;
4204
4205 if (!veb || !veb->pf)
4206 return;
4207 pf = veb->pf;
4208
4209
4210 for (i = 0; i < I40E_MAX_VEB; i++)
4211 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4212 i40e_veb_link_event(pf->veb[i], link_up);
4213
4214
4215 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4216 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4217 i40e_vsi_link_event(pf->vsi[i], link_up);
4218}
4219
4220
4221
4222
4223
4224static void i40e_link_event(struct i40e_pf *pf)
4225{
4226 bool new_link, old_link;
4227
4228 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4229 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4230
4231 if (new_link == old_link)
4232 return;
4233
4234 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4235 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4236 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4237
4238
4239
4240
4241 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4242 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4243 else
4244 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4245
4246 if (pf->vf)
4247 i40e_vc_notify_link_state(pf);
4248}
4249
4250
4251
4252
4253
4254
4255
4256
4257static void i40e_check_hang_subtask(struct i40e_pf *pf)
4258{
4259 int i, v;
4260
4261
4262 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4263 return;
4264
4265
4266
4267
4268
4269
4270
4271 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4272 struct i40e_vsi *vsi = pf->vsi[v];
4273 int armed = 0;
4274
4275 if (!pf->vsi[v] ||
4276 test_bit(__I40E_DOWN, &vsi->state) ||
4277 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4278 continue;
4279
4280 for (i = 0; i < vsi->num_queue_pairs; i++) {
4281 set_check_for_tx_hang(vsi->tx_rings[i]);
4282 if (test_bit(__I40E_HANG_CHECK_ARMED,
4283 &vsi->tx_rings[i]->state))
4284 armed++;
4285 }
4286
4287 if (armed) {
4288 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4289 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4290 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4291 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4292 } else {
4293 u16 vec = vsi->base_vector - 1;
4294 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4295 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4296 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4297 wr32(&vsi->back->hw,
4298 I40E_PFINT_DYN_CTLN(vec), val);
4299 }
4300 i40e_flush(&vsi->back->hw);
4301 }
4302 }
4303}
4304
4305
4306
4307
4308
4309static void i40e_watchdog_subtask(struct i40e_pf *pf)
4310{
4311 int i;
4312
4313
4314 if (test_bit(__I40E_DOWN, &pf->state) ||
4315 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4316 return;
4317
4318
4319
4320
4321 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4322 if (pf->vsi[i] && pf->vsi[i]->netdev)
4323 i40e_update_stats(pf->vsi[i]);
4324
4325
4326 for (i = 0; i < I40E_MAX_VEB; i++)
4327 if (pf->veb[i])
4328 i40e_update_veb_stats(pf->veb[i]);
4329}
4330
4331
4332
4333
4334
4335static void i40e_reset_subtask(struct i40e_pf *pf)
4336{
4337 u32 reset_flags = 0;
4338
4339 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4340 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4341 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4342 }
4343 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4344 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4345 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4346 }
4347 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4348 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4349 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4350 }
4351 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4352 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4353 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4354 }
4355
4356
4357
4358
4359 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4360 i40e_handle_reset_warning(pf);
4361 return;
4362 }
4363
4364
4365 if (reset_flags &&
4366 !test_bit(__I40E_DOWN, &pf->state) &&
4367 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4368 i40e_do_reset(pf, reset_flags);
4369}
4370
4371
4372
4373
4374
4375
4376static void i40e_handle_link_event(struct i40e_pf *pf,
4377 struct i40e_arq_event_info *e)
4378{
4379 struct i40e_hw *hw = &pf->hw;
4380 struct i40e_aqc_get_link_status *status =
4381 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4382 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4383
4384
4385 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4386 sizeof(pf->hw.phy.link_info_old));
4387
4388
4389 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4390 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4391 hw_link_info->link_info = status->link_info;
4392 hw_link_info->an_info = status->an_info;
4393 hw_link_info->ext_info = status->ext_info;
4394 hw_link_info->lse_enable =
4395 le16_to_cpu(status->command_flags) &
4396 I40E_AQ_LSE_ENABLE;
4397
4398
4399 i40e_link_event(pf);
4400
4401
4402
4403
4404
4405
4406 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4407 i40e_link_event(pf);
4408}
4409
4410
4411
4412
4413
4414static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4415{
4416 struct i40e_arq_event_info event;
4417 struct i40e_hw *hw = &pf->hw;
4418 u16 pending, i = 0;
4419 i40e_status ret;
4420 u16 opcode;
4421 u32 val;
4422
4423 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4424 return;
4425
4426 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
4427 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4428 if (!event.msg_buf)
4429 return;
4430
4431 do {
4432 ret = i40e_clean_arq_element(hw, &event, &pending);
4433 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4434 dev_info(&pf->pdev->dev, "No ARQ event found\n");
4435 break;
4436 } else if (ret) {
4437 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4438 break;
4439 }
4440
4441 opcode = le16_to_cpu(event.desc.opcode);
4442 switch (opcode) {
4443
4444 case i40e_aqc_opc_get_link_status:
4445 i40e_handle_link_event(pf, &event);
4446 break;
4447 case i40e_aqc_opc_send_msg_to_pf:
4448 ret = i40e_vc_process_vf_msg(pf,
4449 le16_to_cpu(event.desc.retval),
4450 le32_to_cpu(event.desc.cookie_high),
4451 le32_to_cpu(event.desc.cookie_low),
4452 event.msg_buf,
4453 event.msg_size);
4454 break;
4455 case i40e_aqc_opc_lldp_update_mib:
4456 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4457 break;
4458 case i40e_aqc_opc_event_lan_overflow:
4459 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4460 i40e_handle_lan_overflow_event(pf, &event);
4461 break;
4462 default:
4463 dev_info(&pf->pdev->dev,
4464 "ARQ Error: Unknown event %d received\n",
4465 event.desc.opcode);
4466 break;
4467 }
4468 } while (pending && (i++ < pf->adminq_work_limit));
4469
4470 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4471
4472 val = rd32(hw, I40E_PFINT_ICR0_ENA);
4473 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4474 wr32(hw, I40E_PFINT_ICR0_ENA, val);
4475 i40e_flush(hw);
4476
4477 kfree(event.msg_buf);
4478}
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489static int i40e_reconstitute_veb(struct i40e_veb *veb)
4490{
4491 struct i40e_vsi *ctl_vsi = NULL;
4492 struct i40e_pf *pf = veb->pf;
4493 int v, veb_idx;
4494 int ret;
4495
4496
4497 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4498 if (pf->vsi[v] &&
4499 pf->vsi[v]->veb_idx == veb->idx &&
4500 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4501 ctl_vsi = pf->vsi[v];
4502 break;
4503 }
4504 }
4505 if (!ctl_vsi) {
4506 dev_info(&pf->pdev->dev,
4507 "missing owner VSI for veb_idx %d\n", veb->idx);
4508 ret = -ENOENT;
4509 goto end_reconstitute;
4510 }
4511 if (ctl_vsi != pf->vsi[pf->lan_vsi])
4512 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4513 ret = i40e_add_vsi(ctl_vsi);
4514 if (ret) {
4515 dev_info(&pf->pdev->dev,
4516 "rebuild of owner VSI failed: %d\n", ret);
4517 goto end_reconstitute;
4518 }
4519 i40e_vsi_reset_stats(ctl_vsi);
4520
4521
4522 ret = i40e_add_veb(veb, ctl_vsi);
4523 if (ret)
4524 goto end_reconstitute;
4525
4526
4527 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4528 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4529 continue;
4530
4531 if (pf->vsi[v]->veb_idx == veb->idx) {
4532 struct i40e_vsi *vsi = pf->vsi[v];
4533 vsi->uplink_seid = veb->seid;
4534 ret = i40e_add_vsi(vsi);
4535 if (ret) {
4536 dev_info(&pf->pdev->dev,
4537 "rebuild of vsi_idx %d failed: %d\n",
4538 v, ret);
4539 goto end_reconstitute;
4540 }
4541 i40e_vsi_reset_stats(vsi);
4542 }
4543 }
4544
4545
4546 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
4547 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
4548 pf->veb[veb_idx]->uplink_seid = veb->seid;
4549 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
4550 if (ret)
4551 break;
4552 }
4553 }
4554
4555end_reconstitute:
4556 return ret;
4557}
4558
4559
4560
4561
4562
4563static int i40e_get_capabilities(struct i40e_pf *pf)
4564{
4565 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
4566 u16 data_size;
4567 int buf_len;
4568 int err;
4569
4570 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
4571 do {
4572 cap_buf = kzalloc(buf_len, GFP_KERNEL);
4573 if (!cap_buf)
4574 return -ENOMEM;
4575
4576
4577 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
4578 &data_size,
4579 i40e_aqc_opc_list_func_capabilities,
4580 NULL);
4581
4582 kfree(cap_buf);
4583
4584 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
4585
4586 buf_len = data_size;
4587 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
4588 dev_info(&pf->pdev->dev,
4589 "capability discovery failed: aq=%d\n",
4590 pf->hw.aq.asq_last_status);
4591 return -ENODEV;
4592 }
4593 } while (err);
4594
4595 if (pf->hw.debug_mask & I40E_DEBUG_USER)
4596 dev_info(&pf->pdev->dev,
4597 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
4598 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
4599 pf->hw.func_caps.num_msix_vectors,
4600 pf->hw.func_caps.num_msix_vectors_vf,
4601 pf->hw.func_caps.fd_filters_guaranteed,
4602 pf->hw.func_caps.fd_filters_best_effort,
4603 pf->hw.func_caps.num_tx_qp,
4604 pf->hw.func_caps.num_vsis);
4605
4606 return 0;
4607}
4608
4609
4610
4611
4612
4613static void i40e_fdir_setup(struct i40e_pf *pf)
4614{
4615 struct i40e_vsi *vsi;
4616 bool new_vsi = false;
4617 int err, i;
4618
4619 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
4620 I40E_FLAG_FDIR_ATR_ENABLED)))
4621 return;
4622
4623 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
4624
4625
4626 vsi = NULL;
4627 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4628 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
4629 vsi = pf->vsi[i];
4630 if (!vsi) {
4631 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
4632 if (!vsi) {
4633 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
4634 pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
4635 return;
4636 }
4637 new_vsi = true;
4638 }
4639 WARN_ON(vsi->base_queue != I40E_FDIR_RING);
4640 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
4641
4642 err = i40e_vsi_setup_tx_resources(vsi);
4643 if (!err)
4644 err = i40e_vsi_setup_rx_resources(vsi);
4645 if (!err)
4646 err = i40e_vsi_configure(vsi);
4647 if (!err && new_vsi) {
4648 char int_name[IFNAMSIZ + 9];
4649 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4650 dev_driver_string(&pf->pdev->dev));
4651 err = i40e_vsi_request_irq(vsi, int_name);
4652 }
4653 if (!err)
4654 err = i40e_up_complete(vsi);
4655
4656 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4657}
4658
4659
4660
4661
4662
4663static void i40e_fdir_teardown(struct i40e_pf *pf)
4664{
4665 int i;
4666
4667 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
4668 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
4669 i40e_vsi_release(pf->vsi[i]);
4670 break;
4671 }
4672 }
4673}
4674
4675
4676
4677
4678
4679
4680
4681
4682static void i40e_handle_reset_warning(struct i40e_pf *pf)
4683{
4684 struct i40e_driver_version dv;
4685 struct i40e_hw *hw = &pf->hw;
4686 i40e_status ret;
4687 u32 v;
4688
4689 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
4690 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
4691 return;
4692
4693 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
4694
4695 i40e_vc_notify_reset(pf);
4696
4697
4698 i40e_pf_quiesce_all_vsi(pf);
4699
4700 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4701 if (pf->vsi[v])
4702 pf->vsi[v]->seid = 0;
4703 }
4704
4705 i40e_shutdown_adminq(&pf->hw);
4706
4707
4708
4709
4710
4711 ret = i40e_pf_reset(hw);
4712 if (ret)
4713 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
4714 pf->pfr_count++;
4715
4716 if (test_bit(__I40E_DOWN, &pf->state))
4717 goto end_core_reset;
4718 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
4719
4720
4721 ret = i40e_init_adminq(&pf->hw);
4722 if (ret) {
4723 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
4724 goto end_core_reset;
4725 }
4726
4727 ret = i40e_get_capabilities(pf);
4728 if (ret) {
4729 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
4730 ret);
4731 goto end_core_reset;
4732 }
4733
4734
4735 ret = i40e_shutdown_lan_hmc(hw);
4736 if (ret) {
4737 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
4738 goto end_core_reset;
4739 }
4740
4741 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4742 hw->func_caps.num_rx_qp,
4743 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
4744 if (ret) {
4745 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
4746 goto end_core_reset;
4747 }
4748 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4749 if (ret) {
4750 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
4751 goto end_core_reset;
4752 }
4753
4754
4755 ret = i40e_setup_pf_switch(pf);
4756 if (ret)
4757 goto end_core_reset;
4758
4759
4760
4761
4762
4763
4764
4765
4766 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
4767 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
4768
4769 for (v = 0; v < I40E_MAX_VEB; v++) {
4770 if (!pf->veb[v])
4771 continue;
4772
4773 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
4774 pf->veb[v]->uplink_seid == 0) {
4775 ret = i40e_reconstitute_veb(pf->veb[v]);
4776
4777 if (!ret)
4778 continue;
4779
4780
4781
4782
4783
4784
4785
4786 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
4787 dev_info(&pf->pdev->dev,
4788 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
4789 ret);
4790 pf->vsi[pf->lan_vsi]->uplink_seid
4791 = pf->mac_seid;
4792 break;
4793 } else if (pf->veb[v]->uplink_seid == 0) {
4794 dev_info(&pf->pdev->dev,
4795 "rebuild of orphan VEB failed: %d\n",
4796 ret);
4797 }
4798 }
4799 }
4800 }
4801
4802 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
4803 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
4804
4805 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
4806 if (ret) {
4807 dev_info(&pf->pdev->dev,
4808 "rebuild of Main VSI failed: %d\n", ret);
4809 goto end_core_reset;
4810 }
4811 }
4812
4813
4814 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4815 ret = i40e_setup_misc_vector(pf);
4816
4817
4818 i40e_pf_unquiesce_all_vsi(pf);
4819
4820
4821 dv.major_version = DRV_VERSION_MAJOR;
4822 dv.minor_version = DRV_VERSION_MINOR;
4823 dv.build_version = DRV_VERSION_BUILD;
4824 dv.subbuild_version = 0;
4825 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
4826
4827 dev_info(&pf->pdev->dev, "PF reset done\n");
4828
4829end_core_reset:
4830 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4831}
4832
4833
4834
4835
4836
4837
4838
4839static void i40e_handle_mdd_event(struct i40e_pf *pf)
4840{
4841 struct i40e_hw *hw = &pf->hw;
4842 bool mdd_detected = false;
4843 struct i40e_vf *vf;
4844 u32 reg;
4845 int i;
4846
4847 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
4848 return;
4849
4850
4851 reg = rd32(hw, I40E_GL_MDET_TX);
4852 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4853 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
4854 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
4855 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
4856 >> I40E_GL_MDET_TX_EVENT_SHIFT;
4857 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
4858 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
4859 dev_info(&pf->pdev->dev,
4860 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
4861 event, queue, func);
4862 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4863 mdd_detected = true;
4864 }
4865 reg = rd32(hw, I40E_GL_MDET_RX);
4866 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4867 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
4868 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
4869 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
4870 >> I40E_GL_MDET_RX_EVENT_SHIFT;
4871 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
4872 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
4873 dev_info(&pf->pdev->dev,
4874 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
4875 event, queue, func);
4876 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4877 mdd_detected = true;
4878 }
4879
4880
4881 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
4882 vf = &(pf->vf[i]);
4883 reg = rd32(hw, I40E_VP_MDET_TX(i));
4884 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
4885 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
4886 vf->num_mdd_events++;
4887 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
4888 }
4889
4890 reg = rd32(hw, I40E_VP_MDET_RX(i));
4891 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
4892 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
4893 vf->num_mdd_events++;
4894 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
4895 }
4896
4897 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
4898 dev_info(&pf->pdev->dev,
4899 "Too many MDD events on VF %d, disabled\n", i);
4900 dev_info(&pf->pdev->dev,
4901 "Use PF Control I/F to re-enable the VF\n");
4902 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
4903 }
4904 }
4905
4906
4907 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
4908 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4909 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4910 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4911 i40e_flush(hw);
4912}
4913
4914
4915
4916
4917
4918static void i40e_service_task(struct work_struct *work)
4919{
4920 struct i40e_pf *pf = container_of(work,
4921 struct i40e_pf,
4922 service_task);
4923 unsigned long start_time = jiffies;
4924
4925 i40e_reset_subtask(pf);
4926 i40e_handle_mdd_event(pf);
4927 i40e_vc_process_vflr_event(pf);
4928 i40e_watchdog_subtask(pf);
4929 i40e_fdir_reinit_subtask(pf);
4930 i40e_check_hang_subtask(pf);
4931 i40e_sync_filters_subtask(pf);
4932 i40e_clean_adminq_subtask(pf);
4933
4934 i40e_service_event_complete(pf);
4935
4936
4937
4938
4939
4940 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
4941 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
4942 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
4943 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
4944 i40e_service_event_schedule(pf);
4945}
4946
4947
4948
4949
4950
4951static void i40e_service_timer(unsigned long data)
4952{
4953 struct i40e_pf *pf = (struct i40e_pf *)data;
4954
4955 mod_timer(&pf->service_timer,
4956 round_jiffies(jiffies + pf->service_timer_period));
4957 i40e_service_event_schedule(pf);
4958}
4959
4960
4961
4962
4963
4964static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
4965{
4966 struct i40e_pf *pf = vsi->back;
4967
4968 switch (vsi->type) {
4969 case I40E_VSI_MAIN:
4970 vsi->alloc_queue_pairs = pf->num_lan_qps;
4971 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4972 I40E_REQ_DESCRIPTOR_MULTIPLE);
4973 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4974 vsi->num_q_vectors = pf->num_lan_msix;
4975 else
4976 vsi->num_q_vectors = 1;
4977
4978 break;
4979
4980 case I40E_VSI_FDIR:
4981 vsi->alloc_queue_pairs = 1;
4982 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
4983 I40E_REQ_DESCRIPTOR_MULTIPLE);
4984 vsi->num_q_vectors = 1;
4985 break;
4986
4987 case I40E_VSI_VMDQ2:
4988 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
4989 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4990 I40E_REQ_DESCRIPTOR_MULTIPLE);
4991 vsi->num_q_vectors = pf->num_vmdq_msix;
4992 break;
4993
4994 case I40E_VSI_SRIOV:
4995 vsi->alloc_queue_pairs = pf->num_vf_qps;
4996 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4997 I40E_REQ_DESCRIPTOR_MULTIPLE);
4998 break;
4999
5000 default:
5001 WARN_ON(1);
5002 return -ENODATA;
5003 }
5004
5005 return 0;
5006}
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5017{
5018 int ret = -ENODEV;
5019 struct i40e_vsi *vsi;
5020 int sz_vectors;
5021 int sz_rings;
5022 int vsi_idx;
5023 int i;
5024
5025
5026 mutex_lock(&pf->switch_mutex);
5027
5028
5029
5030
5031
5032
5033
5034 i = pf->next_vsi;
5035 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
5036 i++;
5037 if (i >= pf->hw.func_caps.num_vsis) {
5038 i = 0;
5039 while (i < pf->next_vsi && pf->vsi[i])
5040 i++;
5041 }
5042
5043 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
5044 vsi_idx = i;
5045 } else {
5046 ret = -ENODEV;
5047 goto unlock_pf;
5048 }
5049 pf->next_vsi = ++i;
5050
5051 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
5052 if (!vsi) {
5053 ret = -ENOMEM;
5054 goto unlock_pf;
5055 }
5056 vsi->type = type;
5057 vsi->back = pf;
5058 set_bit(__I40E_DOWN, &vsi->state);
5059 vsi->flags = 0;
5060 vsi->idx = vsi_idx;
5061 vsi->rx_itr_setting = pf->rx_itr_default;
5062 vsi->tx_itr_setting = pf->tx_itr_default;
5063 vsi->netdev_registered = false;
5064 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5065 INIT_LIST_HEAD(&vsi->mac_filter_list);
5066
5067 ret = i40e_set_num_rings_in_vsi(vsi);
5068 if (ret)
5069 goto err_rings;
5070
5071
5072 sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5073 vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
5074 if (!vsi->tx_rings) {
5075 ret = -ENOMEM;
5076 goto err_rings;
5077 }
5078 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5079
5080
5081 sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5082 vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
5083 if (!vsi->q_vectors) {
5084 ret = -ENOMEM;
5085 goto err_vectors;
5086 }
5087
5088
5089 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
5090
5091 pf->vsi[vsi_idx] = vsi;
5092 ret = vsi_idx;
5093 goto unlock_pf;
5094
5095err_vectors:
5096 kfree(vsi->tx_rings);
5097err_rings:
5098 pf->next_vsi = i - 1;
5099 kfree(vsi);
5100unlock_pf:
5101 mutex_unlock(&pf->switch_mutex);
5102 return ret;
5103}
5104
5105
5106
5107
5108
5109static int i40e_vsi_clear(struct i40e_vsi *vsi)
5110{
5111 struct i40e_pf *pf;
5112
5113 if (!vsi)
5114 return 0;
5115
5116 if (!vsi->back)
5117 goto free_vsi;
5118 pf = vsi->back;
5119
5120 mutex_lock(&pf->switch_mutex);
5121 if (!pf->vsi[vsi->idx]) {
5122 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5123 vsi->idx, vsi->idx, vsi, vsi->type);
5124 goto unlock_vsi;
5125 }
5126
5127 if (pf->vsi[vsi->idx] != vsi) {
5128 dev_err(&pf->pdev->dev,
5129 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5130 pf->vsi[vsi->idx]->idx,
5131 pf->vsi[vsi->idx],
5132 pf->vsi[vsi->idx]->type,
5133 vsi->idx, vsi, vsi->type);
5134 goto unlock_vsi;
5135 }
5136
5137
5138 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5139 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5140
5141
5142 kfree(vsi->q_vectors);
5143 kfree(vsi->tx_rings);
5144
5145 pf->vsi[vsi->idx] = NULL;
5146 if (vsi->idx < pf->next_vsi)
5147 pf->next_vsi = vsi->idx;
5148
5149unlock_vsi:
5150 mutex_unlock(&pf->switch_mutex);
5151free_vsi:
5152 kfree(vsi);
5153
5154 return 0;
5155}
5156
5157
5158
5159
5160
5161static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5162{
5163 int i;
5164
5165 if (vsi->tx_rings[0])
5166 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5167 kfree_rcu(vsi->tx_rings[i], rcu);
5168 vsi->tx_rings[i] = NULL;
5169 vsi->rx_rings[i] = NULL;
5170 }
5171
5172 return 0;
5173}
5174
5175
5176
5177
5178
5179static int i40e_alloc_rings(struct i40e_vsi *vsi)
5180{
5181 struct i40e_pf *pf = vsi->back;
5182 int i;
5183
5184
5185 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5186 struct i40e_ring *tx_ring;
5187 struct i40e_ring *rx_ring;
5188
5189 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5190 if (!tx_ring)
5191 goto err_out;
5192
5193 tx_ring->queue_index = i;
5194 tx_ring->reg_idx = vsi->base_queue + i;
5195 tx_ring->ring_active = false;
5196 tx_ring->vsi = vsi;
5197 tx_ring->netdev = vsi->netdev;
5198 tx_ring->dev = &pf->pdev->dev;
5199 tx_ring->count = vsi->num_desc;
5200 tx_ring->size = 0;
5201 tx_ring->dcb_tc = 0;
5202 vsi->tx_rings[i] = tx_ring;
5203
5204 rx_ring = &tx_ring[1];
5205 rx_ring->queue_index = i;
5206 rx_ring->reg_idx = vsi->base_queue + i;
5207 rx_ring->ring_active = false;
5208 rx_ring->vsi = vsi;
5209 rx_ring->netdev = vsi->netdev;
5210 rx_ring->dev = &pf->pdev->dev;
5211 rx_ring->count = vsi->num_desc;
5212 rx_ring->size = 0;
5213 rx_ring->dcb_tc = 0;
5214 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5215 set_ring_16byte_desc_enabled(rx_ring);
5216 else
5217 clear_ring_16byte_desc_enabled(rx_ring);
5218 vsi->rx_rings[i] = rx_ring;
5219 }
5220
5221 return 0;
5222
5223err_out:
5224 i40e_vsi_clear_rings(vsi);
5225 return -ENOMEM;
5226}
5227
5228
5229
5230
5231
5232
5233
5234
5235static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5236{
5237 int err = 0;
5238
5239 pf->num_msix_entries = 0;
5240 while (vectors >= I40E_MIN_MSIX) {
5241 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5242 if (err == 0) {
5243
5244 pf->num_msix_entries = vectors;
5245 break;
5246 } else if (err < 0) {
5247
5248 dev_info(&pf->pdev->dev,
5249 "MSI-X vector reservation failed: %d\n", err);
5250 vectors = 0;
5251 break;
5252 } else {
5253
5254 dev_info(&pf->pdev->dev,
5255 "MSI-X vectors wanted %d, retrying with %d\n",
5256 vectors, err);
5257 vectors = err;
5258 }
5259 }
5260
5261 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5262 dev_info(&pf->pdev->dev,
5263 "Couldn't get enough vectors, only %d available\n",
5264 vectors);
5265 vectors = 0;
5266 }
5267
5268 return vectors;
5269}
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279static int i40e_init_msix(struct i40e_pf *pf)
5280{
5281 i40e_status err = 0;
5282 struct i40e_hw *hw = &pf->hw;
5283 int v_budget, i;
5284 int vec;
5285
5286 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5287 return -ENODEV;
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300 pf->num_lan_msix = pf->num_lan_qps;
5301 pf->num_vmdq_msix = pf->num_vmdq_qps;
5302 v_budget = 1 + pf->num_lan_msix;
5303 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5304 if (pf->flags & I40E_FLAG_FDIR_ENABLED)
5305 v_budget++;
5306
5307
5308 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5309
5310 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5311 GFP_KERNEL);
5312 if (!pf->msix_entries)
5313 return -ENOMEM;
5314
5315 for (i = 0; i < v_budget; i++)
5316 pf->msix_entries[i].entry = i;
5317 vec = i40e_reserve_msix_vectors(pf, v_budget);
5318 if (vec < I40E_MIN_MSIX) {
5319 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5320 kfree(pf->msix_entries);
5321 pf->msix_entries = NULL;
5322 return -ENODEV;
5323
5324 } else if (vec == I40E_MIN_MSIX) {
5325
5326 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5327 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5328 pf->num_vmdq_vsis = 0;
5329 pf->num_vmdq_qps = 0;
5330 pf->num_vmdq_msix = 0;
5331 pf->num_lan_qps = 1;
5332 pf->num_lan_msix = 1;
5333
5334 } else if (vec != v_budget) {
5335
5336 pf->num_vmdq_msix = 1;
5337 vec--;
5338
5339
5340 switch (vec) {
5341 case 2:
5342 pf->num_vmdq_vsis = 1;
5343 pf->num_lan_msix = 1;
5344 break;
5345 case 3:
5346 pf->num_vmdq_vsis = 1;
5347 pf->num_lan_msix = 2;
5348 break;
5349 default:
5350 pf->num_lan_msix = min_t(int, (vec / 2),
5351 pf->num_lan_qps);
5352 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5353 I40E_DEFAULT_NUM_VMDQ_VSI);
5354 break;
5355 }
5356 }
5357
5358 return err;
5359}
5360
5361
5362
5363
5364
5365
5366
5367
5368static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5369{
5370 struct i40e_q_vector *q_vector;
5371
5372
5373 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
5374 if (!q_vector)
5375 return -ENOMEM;
5376
5377 q_vector->vsi = vsi;
5378 q_vector->v_idx = v_idx;
5379 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
5380 if (vsi->netdev)
5381 netif_napi_add(vsi->netdev, &q_vector->napi,
5382 i40e_napi_poll, vsi->work_limit);
5383
5384 q_vector->rx.latency_range = I40E_LOW_LATENCY;
5385 q_vector->tx.latency_range = I40E_LOW_LATENCY;
5386
5387
5388 vsi->q_vectors[v_idx] = q_vector;
5389
5390 return 0;
5391}
5392
5393
5394
5395
5396
5397
5398
5399
5400static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5401{
5402 struct i40e_pf *pf = vsi->back;
5403 int v_idx, num_q_vectors;
5404 int err;
5405
5406
5407 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5408 num_q_vectors = vsi->num_q_vectors;
5409 else if (vsi == pf->vsi[pf->lan_vsi])
5410 num_q_vectors = 1;
5411 else
5412 return -EINVAL;
5413
5414 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
5415 err = i40e_alloc_q_vector(vsi, v_idx);
5416 if (err)
5417 goto err_out;
5418 }
5419
5420 return 0;
5421
5422err_out:
5423 while (v_idx--)
5424 i40e_free_q_vector(vsi, v_idx);
5425
5426 return err;
5427}
5428
5429
5430
5431
5432
5433static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5434{
5435 int err = 0;
5436
5437 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5438 err = i40e_init_msix(pf);
5439 if (err) {
5440 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
5441 I40E_FLAG_RSS_ENABLED |
5442 I40E_FLAG_MQ_ENABLED |
5443 I40E_FLAG_DCB_ENABLED |
5444 I40E_FLAG_SRIOV_ENABLED |
5445 I40E_FLAG_FDIR_ENABLED |
5446 I40E_FLAG_FDIR_ATR_ENABLED |
5447 I40E_FLAG_VMDQ_ENABLED);
5448
5449
5450 i40e_determine_queue_usage(pf);
5451 }
5452 }
5453
5454 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5455 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
5456 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
5457 err = pci_enable_msi(pf->pdev);
5458 if (err) {
5459 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
5460 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5461 }
5462 }
5463
5464 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
5465 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
5466
5467
5468 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5469}
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479static int i40e_setup_misc_vector(struct i40e_pf *pf)
5480{
5481 struct i40e_hw *hw = &pf->hw;
5482 int err = 0;
5483
5484
5485
5486
5487 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
5488 err = request_irq(pf->msix_entries[0].vector,
5489 i40e_intr, 0, pf->misc_int_name, pf);
5490 if (err) {
5491 dev_info(&pf->pdev->dev,
5492 "request_irq for msix_misc failed: %d\n", err);
5493 return -EFAULT;
5494 }
5495 }
5496
5497 i40e_enable_misc_int_causes(hw);
5498
5499
5500 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
5501 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
5502
5503 i40e_flush(hw);
5504
5505 i40e_irq_dynamic_enable_icr0(pf);
5506
5507 return err;
5508}
5509
5510
5511
5512
5513
5514static int i40e_config_rss(struct i40e_pf *pf)
5515{
5516 struct i40e_hw *hw = &pf->hw;
5517 u32 lut = 0;
5518 int i, j;
5519 u64 hena;
5520
5521 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
5522 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
5523 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
5524 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
5525
5526
5527 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5528 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
5529
5530
5531 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
5532 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
5533 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5534 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
5535 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
5536 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
5537 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
5538 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5539 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
5540 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
5541 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
5542 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
5543 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
5544 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
5545
5546
5547 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
5548
5549
5550
5551
5552
5553
5554
5555
5556 if (j == pf->rss_size)
5557 j = 0;
5558
5559 lut = (lut << 8) | (j &
5560 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
5561
5562 if ((i & 3) == 3)
5563 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
5564 }
5565 i40e_flush(hw);
5566
5567 return 0;
5568}
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578static int i40e_sw_init(struct i40e_pf *pf)
5579{
5580 int err = 0;
5581 int size;
5582
5583 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
5584 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
5585 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
5586 if (I40E_DEBUG_USER & debug)
5587 pf->hw.debug_mask = debug;
5588 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
5589 I40E_DEFAULT_MSG_ENABLE);
5590 }
5591
5592
5593 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
5594 I40E_FLAG_MSI_ENABLED |
5595 I40E_FLAG_MSIX_ENABLED |
5596 I40E_FLAG_RX_PS_ENABLED |
5597 I40E_FLAG_MQ_ENABLED |
5598 I40E_FLAG_RX_1BUF_ENABLED;
5599
5600 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
5601 if (pf->hw.func_caps.rss) {
5602 pf->flags |= I40E_FLAG_RSS_ENABLED;
5603 pf->rss_size = min_t(int, pf->rss_size_max,
5604 nr_cpus_node(numa_node_id()));
5605 } else {
5606 pf->rss_size = 1;
5607 }
5608
5609 if (pf->hw.func_caps.dcb)
5610 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
5611 else
5612 pf->num_tc_qps = 0;
5613
5614 if (pf->hw.func_caps.fd) {
5615
5616 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
5617 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
5618 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
5619 dev_info(&pf->pdev->dev,
5620 "Flow Director ATR mode Enabled\n");
5621 pf->flags |= I40E_FLAG_FDIR_ENABLED;
5622 dev_info(&pf->pdev->dev,
5623 "Flow Director Side Band mode Enabled\n");
5624 pf->fdir_pf_filter_count =
5625 pf->hw.func_caps.fd_filters_guaranteed;
5626 }
5627 } else {
5628 pf->fdir_pf_filter_count = 0;
5629 }
5630
5631 if (pf->hw.func_caps.vmdq) {
5632 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
5633 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
5634 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
5635 }
5636
5637
5638 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
5639 pf->flags |= I40E_FLAG_MFP_ENABLED;
5640 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
5641 }
5642
5643#ifdef CONFIG_PCI_IOV
5644 if (pf->hw.func_caps.num_vfs) {
5645 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
5646 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
5647 pf->num_req_vfs = min_t(int,
5648 pf->hw.func_caps.num_vfs,
5649 I40E_MAX_VF_COUNT);
5650 }
5651#endif
5652 pf->eeprom_version = 0xDEAD;
5653 pf->lan_veb = I40E_NO_VEB;
5654 pf->lan_vsi = I40E_NO_VSI;
5655
5656
5657 size = sizeof(struct i40e_lump_tracking)
5658 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
5659 pf->qp_pile = kzalloc(size, GFP_KERNEL);
5660 if (!pf->qp_pile) {
5661 err = -ENOMEM;
5662 goto sw_init_done;
5663 }
5664 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
5665 pf->qp_pile->search_hint = 0;
5666
5667
5668 size = sizeof(struct i40e_lump_tracking)
5669 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
5670 pf->irq_pile = kzalloc(size, GFP_KERNEL);
5671 if (!pf->irq_pile) {
5672 kfree(pf->qp_pile);
5673 err = -ENOMEM;
5674 goto sw_init_done;
5675 }
5676 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
5677 pf->irq_pile->search_hint = 0;
5678
5679 mutex_init(&pf->switch_mutex);
5680
5681sw_init_done:
5682 return err;
5683}
5684
5685
5686
5687
5688
5689
5690static int i40e_set_features(struct net_device *netdev,
5691 netdev_features_t features)
5692{
5693 struct i40e_netdev_priv *np = netdev_priv(netdev);
5694 struct i40e_vsi *vsi = np->vsi;
5695
5696 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5697 i40e_vlan_stripping_enable(vsi);
5698 else
5699 i40e_vlan_stripping_disable(vsi);
5700
5701 return 0;
5702}
5703
5704static const struct net_device_ops i40e_netdev_ops = {
5705 .ndo_open = i40e_open,
5706 .ndo_stop = i40e_close,
5707 .ndo_start_xmit = i40e_lan_xmit_frame,
5708 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
5709 .ndo_set_rx_mode = i40e_set_rx_mode,
5710 .ndo_validate_addr = eth_validate_addr,
5711 .ndo_set_mac_address = i40e_set_mac,
5712 .ndo_change_mtu = i40e_change_mtu,
5713 .ndo_tx_timeout = i40e_tx_timeout,
5714 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
5715 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
5716#ifdef CONFIG_NET_POLL_CONTROLLER
5717 .ndo_poll_controller = i40e_netpoll,
5718#endif
5719 .ndo_setup_tc = i40e_setup_tc,
5720 .ndo_set_features = i40e_set_features,
5721 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
5722 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
5723 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
5724 .ndo_get_vf_config = i40e_ndo_get_vf_config,
5725};
5726
5727
5728
5729
5730
5731
5732
5733static int i40e_config_netdev(struct i40e_vsi *vsi)
5734{
5735 struct i40e_pf *pf = vsi->back;
5736 struct i40e_hw *hw = &pf->hw;
5737 struct i40e_netdev_priv *np;
5738 struct net_device *netdev;
5739 u8 mac_addr[ETH_ALEN];
5740 int etherdev_size;
5741
5742 etherdev_size = sizeof(struct i40e_netdev_priv);
5743 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
5744 if (!netdev)
5745 return -ENOMEM;
5746
5747 vsi->netdev = netdev;
5748 np = netdev_priv(netdev);
5749 np->vsi = vsi;
5750
5751 netdev->hw_enc_features = NETIF_F_IP_CSUM |
5752 NETIF_F_GSO_UDP_TUNNEL |
5753 NETIF_F_TSO |
5754 NETIF_F_SG;
5755
5756 netdev->features = NETIF_F_SG |
5757 NETIF_F_IP_CSUM |
5758 NETIF_F_SCTP_CSUM |
5759 NETIF_F_HIGHDMA |
5760 NETIF_F_GSO_UDP_TUNNEL |
5761 NETIF_F_HW_VLAN_CTAG_TX |
5762 NETIF_F_HW_VLAN_CTAG_RX |
5763 NETIF_F_HW_VLAN_CTAG_FILTER |
5764 NETIF_F_IPV6_CSUM |
5765 NETIF_F_TSO |
5766 NETIF_F_TSO6 |
5767 NETIF_F_RXCSUM |
5768 NETIF_F_RXHASH |
5769 0;
5770
5771
5772 netdev->hw_features |= netdev->features;
5773
5774 if (vsi->type == I40E_VSI_MAIN) {
5775 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
5776 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
5777 } else {
5778
5779 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
5780 pf->vsi[pf->lan_vsi]->netdev->name);
5781 random_ether_addr(mac_addr);
5782 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
5783 }
5784
5785 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
5786 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
5787
5788
5789
5790 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
5791 NETIF_F_HW_VLAN_CTAG_RX |
5792 NETIF_F_HW_VLAN_CTAG_FILTER);
5793 netdev->priv_flags |= IFF_UNICAST_FLT;
5794 netdev->priv_flags |= IFF_SUPP_NOFCS;
5795
5796 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
5797
5798 netdev->netdev_ops = &i40e_netdev_ops;
5799 netdev->watchdog_timeo = 5 * HZ;
5800 i40e_set_ethtool_ops(netdev);
5801
5802 return 0;
5803}
5804
5805
5806
5807
5808
5809
5810
5811static void i40e_vsi_delete(struct i40e_vsi *vsi)
5812{
5813
5814 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
5815 return;
5816
5817
5818 if (vsi->type == I40E_VSI_FDIR)
5819 return;
5820
5821 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
5822 return;
5823}
5824
5825
5826
5827
5828
5829
5830
5831
5832static int i40e_add_vsi(struct i40e_vsi *vsi)
5833{
5834 int ret = -ENODEV;
5835 struct i40e_mac_filter *f, *ftmp;
5836 struct i40e_pf *pf = vsi->back;
5837 struct i40e_hw *hw = &pf->hw;
5838 struct i40e_vsi_context ctxt;
5839 u8 enabled_tc = 0x1;
5840 int f_count = 0;
5841
5842 memset(&ctxt, 0, sizeof(ctxt));
5843 switch (vsi->type) {
5844 case I40E_VSI_MAIN:
5845
5846
5847
5848
5849
5850 ctxt.seid = pf->main_vsi_seid;
5851 ctxt.pf_num = pf->hw.pf_id;
5852 ctxt.vf_num = 0;
5853 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5854 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5855 if (ret) {
5856 dev_info(&pf->pdev->dev,
5857 "couldn't get pf vsi config, err %d, aq_err %d\n",
5858 ret, pf->hw.aq.asq_last_status);
5859 return -ENOENT;
5860 }
5861 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5862 vsi->info.valid_sections = 0;
5863
5864 vsi->seid = ctxt.seid;
5865 vsi->id = ctxt.vsi_number;
5866
5867 enabled_tc = i40e_pf_get_tc_map(pf);
5868
5869
5870 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5871 memset(&ctxt, 0, sizeof(ctxt));
5872 ctxt.seid = pf->main_vsi_seid;
5873 ctxt.pf_num = pf->hw.pf_id;
5874 ctxt.vf_num = 0;
5875 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5876 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5877 if (ret) {
5878 dev_info(&pf->pdev->dev,
5879 "update vsi failed, aq_err=%d\n",
5880 pf->hw.aq.asq_last_status);
5881 ret = -ENOENT;
5882 goto err;
5883 }
5884
5885 i40e_vsi_update_queue_map(vsi, &ctxt);
5886 vsi->info.valid_sections = 0;
5887 } else {
5888
5889
5890
5891
5892 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5893 if (ret) {
5894 dev_info(&pf->pdev->dev,
5895 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
5896 enabled_tc, ret,
5897 pf->hw.aq.asq_last_status);
5898 ret = -ENOENT;
5899 }
5900 }
5901 break;
5902
5903 case I40E_VSI_FDIR:
5904
5905 vsi->info.valid_sections = 0;
5906 vsi->seid = 0;
5907 vsi->id = 0;
5908 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5909 return 0;
5910 break;
5911
5912 case I40E_VSI_VMDQ2:
5913 ctxt.pf_num = hw->pf_id;
5914 ctxt.vf_num = 0;
5915 ctxt.uplink_seid = vsi->uplink_seid;
5916 ctxt.connection_type = 0x1;
5917 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5918
5919 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5920
5921
5922
5923
5924 ctxt.info.switch_id = 0;
5925 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5926 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5927
5928
5929 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5930 break;
5931
5932 case I40E_VSI_SRIOV:
5933 ctxt.pf_num = hw->pf_id;
5934 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
5935 ctxt.uplink_seid = vsi->uplink_seid;
5936 ctxt.connection_type = 0x1;
5937 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5938
5939 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5940
5941
5942
5943
5944 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5945
5946 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
5947 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5948
5949 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5950 break;
5951
5952 default:
5953 return -ENODEV;
5954 }
5955
5956 if (vsi->type != I40E_VSI_MAIN) {
5957 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5958 if (ret) {
5959 dev_info(&vsi->back->pdev->dev,
5960 "add vsi failed, aq_err=%d\n",
5961 vsi->back->hw.aq.asq_last_status);
5962 ret = -ENOENT;
5963 goto err;
5964 }
5965 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5966 vsi->info.valid_sections = 0;
5967 vsi->seid = ctxt.seid;
5968 vsi->id = ctxt.vsi_number;
5969 }
5970
5971
5972 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
5973 f->changed = true;
5974 f_count++;
5975 }
5976 if (f_count) {
5977 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
5978 pf->flags |= I40E_FLAG_FILTER_SYNC;
5979 }
5980
5981
5982 ret = i40e_vsi_get_bw_info(vsi);
5983 if (ret) {
5984 dev_info(&pf->pdev->dev,
5985 "couldn't get vsi bw info, err %d, aq_err %d\n",
5986 ret, pf->hw.aq.asq_last_status);
5987
5988 ret = 0;
5989 }
5990
5991err:
5992 return ret;
5993}
5994
5995
5996
5997
5998
5999
6000
6001int i40e_vsi_release(struct i40e_vsi *vsi)
6002{
6003 struct i40e_mac_filter *f, *ftmp;
6004 struct i40e_veb *veb = NULL;
6005 struct i40e_pf *pf;
6006 u16 uplink_seid;
6007 int i, n;
6008
6009 pf = vsi->back;
6010
6011
6012 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
6013 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
6014 vsi->seid, vsi->uplink_seid);
6015 return -ENODEV;
6016 }
6017 if (vsi == pf->vsi[pf->lan_vsi] &&
6018 !test_bit(__I40E_DOWN, &pf->state)) {
6019 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
6020 return -ENODEV;
6021 }
6022
6023 uplink_seid = vsi->uplink_seid;
6024 if (vsi->type != I40E_VSI_SRIOV) {
6025 if (vsi->netdev_registered) {
6026 vsi->netdev_registered = false;
6027 if (vsi->netdev) {
6028
6029 unregister_netdev(vsi->netdev);
6030 free_netdev(vsi->netdev);
6031 vsi->netdev = NULL;
6032 }
6033 } else {
6034 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
6035 i40e_down(vsi);
6036 i40e_vsi_free_irq(vsi);
6037 i40e_vsi_free_tx_resources(vsi);
6038 i40e_vsi_free_rx_resources(vsi);
6039 }
6040 i40e_vsi_disable_irq(vsi);
6041 }
6042
6043 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
6044 i40e_del_filter(vsi, f->macaddr, f->vlan,
6045 f->is_vf, f->is_netdev);
6046 i40e_sync_vsi_filters(vsi);
6047
6048 i40e_vsi_delete(vsi);
6049 i40e_vsi_free_q_vectors(vsi);
6050 i40e_vsi_clear_rings(vsi);
6051 i40e_vsi_clear(vsi);
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6062 if (pf->vsi[i] &&
6063 pf->vsi[i]->uplink_seid == uplink_seid &&
6064 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6065 n++;
6066 }
6067 }
6068 for (i = 0; i < I40E_MAX_VEB; i++) {
6069 if (!pf->veb[i])
6070 continue;
6071 if (pf->veb[i]->uplink_seid == uplink_seid)
6072 n++;
6073 if (pf->veb[i]->seid == uplink_seid)
6074 veb = pf->veb[i];
6075 }
6076 if (n == 0 && veb && veb->uplink_seid != 0)
6077 i40e_veb_release(veb);
6078
6079 return 0;
6080}
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6093{
6094 int ret = -ENOENT;
6095 struct i40e_pf *pf = vsi->back;
6096
6097 if (vsi->q_vectors[0]) {
6098 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
6099 vsi->seid);
6100 return -EEXIST;
6101 }
6102
6103 if (vsi->base_vector) {
6104 dev_info(&pf->pdev->dev,
6105 "VSI %d has non-zero base vector %d\n",
6106 vsi->seid, vsi->base_vector);
6107 return -EEXIST;
6108 }
6109
6110 ret = i40e_alloc_q_vectors(vsi);
6111 if (ret) {
6112 dev_info(&pf->pdev->dev,
6113 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
6114 vsi->num_q_vectors, vsi->seid, ret);
6115 vsi->num_q_vectors = 0;
6116 goto vector_setup_out;
6117 }
6118
6119 if (vsi->num_q_vectors)
6120 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6121 vsi->num_q_vectors, vsi->idx);
6122 if (vsi->base_vector < 0) {
6123 dev_info(&pf->pdev->dev,
6124 "failed to get q tracking for VSI %d, err=%d\n",
6125 vsi->seid, vsi->base_vector);
6126 i40e_vsi_free_q_vectors(vsi);
6127 ret = -ENOENT;
6128 goto vector_setup_out;
6129 }
6130
6131vector_setup_out:
6132 return ret;
6133}
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6149 u16 uplink_seid, u32 param1)
6150{
6151 struct i40e_vsi *vsi = NULL;
6152 struct i40e_veb *veb = NULL;
6153 int ret, i;
6154 int v_idx;
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169 for (i = 0; i < I40E_MAX_VEB; i++) {
6170 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6171 veb = pf->veb[i];
6172 break;
6173 }
6174 }
6175
6176 if (!veb && uplink_seid != pf->mac_seid) {
6177
6178 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6179 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6180 vsi = pf->vsi[i];
6181 break;
6182 }
6183 }
6184 if (!vsi) {
6185 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6186 uplink_seid);
6187 return NULL;
6188 }
6189
6190 if (vsi->uplink_seid == pf->mac_seid)
6191 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6192 vsi->tc_config.enabled_tc);
6193 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
6194 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
6195 vsi->tc_config.enabled_tc);
6196
6197 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
6198 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
6199 veb = pf->veb[i];
6200 }
6201 if (!veb) {
6202 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
6203 return NULL;
6204 }
6205
6206 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6207 uplink_seid = veb->seid;
6208 }
6209
6210
6211 v_idx = i40e_vsi_mem_alloc(pf, type);
6212 if (v_idx < 0)
6213 goto err_alloc;
6214 vsi = pf->vsi[v_idx];
6215 vsi->type = type;
6216 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
6217
6218 if (type == I40E_VSI_MAIN)
6219 pf->lan_vsi = v_idx;
6220 else if (type == I40E_VSI_SRIOV)
6221 vsi->vf_id = param1;
6222
6223 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6224 if (ret < 0) {
6225 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6226 vsi->seid, ret);
6227 goto err_vsi;
6228 }
6229 vsi->base_queue = ret;
6230
6231
6232 vsi->uplink_seid = uplink_seid;
6233 ret = i40e_add_vsi(vsi);
6234 if (ret)
6235 goto err_vsi;
6236
6237 switch (vsi->type) {
6238
6239 case I40E_VSI_MAIN:
6240 case I40E_VSI_VMDQ2:
6241 ret = i40e_config_netdev(vsi);
6242 if (ret)
6243 goto err_netdev;
6244 ret = register_netdev(vsi->netdev);
6245 if (ret)
6246 goto err_netdev;
6247 vsi->netdev_registered = true;
6248 netif_carrier_off(vsi->netdev);
6249
6250
6251 case I40E_VSI_FDIR:
6252
6253 ret = i40e_vsi_setup_vectors(vsi);
6254 if (ret)
6255 goto err_msix;
6256
6257 ret = i40e_alloc_rings(vsi);
6258 if (ret)
6259 goto err_rings;
6260
6261
6262 i40e_vsi_map_rings_to_vectors(vsi);
6263
6264 i40e_vsi_reset_stats(vsi);
6265 break;
6266
6267 default:
6268
6269 break;
6270 }
6271
6272 return vsi;
6273
6274err_rings:
6275 i40e_vsi_free_q_vectors(vsi);
6276err_msix:
6277 if (vsi->netdev_registered) {
6278 vsi->netdev_registered = false;
6279 unregister_netdev(vsi->netdev);
6280 free_netdev(vsi->netdev);
6281 vsi->netdev = NULL;
6282 }
6283err_netdev:
6284 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6285err_vsi:
6286 i40e_vsi_clear(vsi);
6287err_alloc:
6288 return NULL;
6289}
6290
6291
6292
6293
6294
6295
6296
6297static int i40e_veb_get_bw_info(struct i40e_veb *veb)
6298{
6299 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
6300 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
6301 struct i40e_pf *pf = veb->pf;
6302 struct i40e_hw *hw = &pf->hw;
6303 u32 tc_bw_max;
6304 int ret = 0;
6305 int i;
6306
6307 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
6308 &bw_data, NULL);
6309 if (ret) {
6310 dev_info(&pf->pdev->dev,
6311 "query veb bw config failed, aq_err=%d\n",
6312 hw->aq.asq_last_status);
6313 goto out;
6314 }
6315
6316 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
6317 &ets_data, NULL);
6318 if (ret) {
6319 dev_info(&pf->pdev->dev,
6320 "query veb bw ets config failed, aq_err=%d\n",
6321 hw->aq.asq_last_status);
6322 goto out;
6323 }
6324
6325 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
6326 veb->bw_max_quanta = ets_data.tc_bw_max;
6327 veb->is_abs_credits = bw_data.absolute_credits_enable;
6328 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
6329 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
6330 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6331 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
6332 veb->bw_tc_limit_credits[i] =
6333 le16_to_cpu(bw_data.tc_bw_limits[i]);
6334 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
6335 }
6336
6337out:
6338 return ret;
6339}
6340
6341
6342
6343
6344
6345
6346
6347
6348static int i40e_veb_mem_alloc(struct i40e_pf *pf)
6349{
6350 int ret = -ENOENT;
6351 struct i40e_veb *veb;
6352 int i;
6353
6354
6355 mutex_lock(&pf->switch_mutex);
6356
6357
6358
6359
6360
6361
6362
6363 i = 0;
6364 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
6365 i++;
6366 if (i >= I40E_MAX_VEB) {
6367 ret = -ENOMEM;
6368 goto err_alloc_veb;
6369 }
6370
6371 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
6372 if (!veb) {
6373 ret = -ENOMEM;
6374 goto err_alloc_veb;
6375 }
6376 veb->pf = pf;
6377 veb->idx = i;
6378 veb->enabled_tc = 1;
6379
6380 pf->veb[i] = veb;
6381 ret = i;
6382err_alloc_veb:
6383 mutex_unlock(&pf->switch_mutex);
6384 return ret;
6385}
6386
6387
6388
6389
6390
6391
6392
6393
6394static void i40e_switch_branch_release(struct i40e_veb *branch)
6395{
6396 struct i40e_pf *pf = branch->pf;
6397 u16 branch_seid = branch->seid;
6398 u16 veb_idx = branch->idx;
6399 int i;
6400
6401
6402 for (i = 0; i < I40E_MAX_VEB; i++) {
6403 if (!pf->veb[i])
6404 continue;
6405 if (pf->veb[i]->uplink_seid == branch->seid)
6406 i40e_switch_branch_release(pf->veb[i]);
6407 }
6408
6409
6410
6411
6412
6413
6414 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6415 if (!pf->vsi[i])
6416 continue;
6417 if (pf->vsi[i]->uplink_seid == branch_seid &&
6418 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6419 i40e_vsi_release(pf->vsi[i]);
6420 }
6421 }
6422
6423
6424
6425
6426
6427
6428 if (pf->veb[veb_idx])
6429 i40e_veb_release(pf->veb[veb_idx]);
6430}
6431
6432
6433
6434
6435
6436static void i40e_veb_clear(struct i40e_veb *veb)
6437{
6438 if (!veb)
6439 return;
6440
6441 if (veb->pf) {
6442 struct i40e_pf *pf = veb->pf;
6443
6444 mutex_lock(&pf->switch_mutex);
6445 if (pf->veb[veb->idx] == veb)
6446 pf->veb[veb->idx] = NULL;
6447 mutex_unlock(&pf->switch_mutex);
6448 }
6449
6450 kfree(veb);
6451}
6452
6453
6454
6455
6456
6457void i40e_veb_release(struct i40e_veb *veb)
6458{
6459 struct i40e_vsi *vsi = NULL;
6460 struct i40e_pf *pf;
6461 int i, n = 0;
6462
6463 pf = veb->pf;
6464
6465
6466 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6467 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
6468 n++;
6469 vsi = pf->vsi[i];
6470 }
6471 }
6472 if (n != 1) {
6473 dev_info(&pf->pdev->dev,
6474 "can't remove VEB %d with %d VSIs left\n",
6475 veb->seid, n);
6476 return;
6477 }
6478
6479
6480 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
6481 if (veb->uplink_seid) {
6482 vsi->uplink_seid = veb->uplink_seid;
6483 if (veb->uplink_seid == pf->mac_seid)
6484 vsi->veb_idx = I40E_NO_VEB;
6485 else
6486 vsi->veb_idx = veb->veb_idx;
6487 } else {
6488
6489 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6490 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
6491 }
6492
6493 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
6494 i40e_veb_clear(veb);
6495
6496 return;
6497}
6498
6499
6500
6501
6502
6503
6504static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
6505{
6506 bool is_default = (vsi->idx == vsi->back->lan_vsi);
6507 int ret;
6508
6509
6510 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
6511 veb->enabled_tc, is_default, &veb->seid, NULL);
6512 if (ret) {
6513 dev_info(&veb->pf->pdev->dev,
6514 "couldn't add VEB, err %d, aq_err %d\n",
6515 ret, veb->pf->hw.aq.asq_last_status);
6516 return -EPERM;
6517 }
6518
6519
6520 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
6521 &veb->stats_idx, NULL, NULL, NULL);
6522 if (ret) {
6523 dev_info(&veb->pf->pdev->dev,
6524 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
6525 ret, veb->pf->hw.aq.asq_last_status);
6526 return -EPERM;
6527 }
6528 ret = i40e_veb_get_bw_info(veb);
6529 if (ret) {
6530 dev_info(&veb->pf->pdev->dev,
6531 "couldn't get VEB bw info, err %d, aq_err %d\n",
6532 ret, veb->pf->hw.aq.asq_last_status);
6533 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
6534 return -ENOENT;
6535 }
6536
6537 vsi->uplink_seid = veb->seid;
6538 vsi->veb_idx = veb->idx;
6539 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6540
6541 return 0;
6542}
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
6561 u16 uplink_seid, u16 vsi_seid,
6562 u8 enabled_tc)
6563{
6564 struct i40e_veb *veb, *uplink_veb = NULL;
6565 int vsi_idx, veb_idx;
6566 int ret;
6567
6568
6569 if ((uplink_seid == 0 || vsi_seid == 0) &&
6570 (uplink_seid + vsi_seid != 0)) {
6571 dev_info(&pf->pdev->dev,
6572 "one, not both seid's are 0: uplink=%d vsi=%d\n",
6573 uplink_seid, vsi_seid);
6574 return NULL;
6575 }
6576
6577
6578 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
6579 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
6580 break;
6581 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
6582 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
6583 vsi_seid);
6584 return NULL;
6585 }
6586
6587 if (uplink_seid && uplink_seid != pf->mac_seid) {
6588 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6589 if (pf->veb[veb_idx] &&
6590 pf->veb[veb_idx]->seid == uplink_seid) {
6591 uplink_veb = pf->veb[veb_idx];
6592 break;
6593 }
6594 }
6595 if (!uplink_veb) {
6596 dev_info(&pf->pdev->dev,
6597 "uplink seid %d not found\n", uplink_seid);
6598 return NULL;
6599 }
6600 }
6601
6602
6603 veb_idx = i40e_veb_mem_alloc(pf);
6604 if (veb_idx < 0)
6605 goto err_alloc;
6606 veb = pf->veb[veb_idx];
6607 veb->flags = flags;
6608 veb->uplink_seid = uplink_seid;
6609 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
6610 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
6611
6612
6613 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
6614 if (ret)
6615 goto err_veb;
6616
6617 return veb;
6618
6619err_veb:
6620 i40e_veb_clear(veb);
6621err_alloc:
6622 return NULL;
6623}
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
6635 struct i40e_aqc_switch_config_element_resp *ele,
6636 u16 num_reported, bool printconfig)
6637{
6638 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
6639 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
6640 u8 element_type = ele->element_type;
6641 u16 seid = le16_to_cpu(ele->seid);
6642
6643 if (printconfig)
6644 dev_info(&pf->pdev->dev,
6645 "type=%d seid=%d uplink=%d downlink=%d\n",
6646 element_type, seid, uplink_seid, downlink_seid);
6647
6648 switch (element_type) {
6649 case I40E_SWITCH_ELEMENT_TYPE_MAC:
6650 pf->mac_seid = seid;
6651 break;
6652 case I40E_SWITCH_ELEMENT_TYPE_VEB:
6653
6654 if (uplink_seid != pf->mac_seid)
6655 break;
6656 if (pf->lan_veb == I40E_NO_VEB) {
6657 int v;
6658
6659
6660 for (v = 0; v < I40E_MAX_VEB; v++) {
6661 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
6662 pf->lan_veb = v;
6663 break;
6664 }
6665 }
6666 if (pf->lan_veb == I40E_NO_VEB) {
6667 v = i40e_veb_mem_alloc(pf);
6668 if (v < 0)
6669 break;
6670 pf->lan_veb = v;
6671 }
6672 }
6673
6674 pf->veb[pf->lan_veb]->seid = seid;
6675 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
6676 pf->veb[pf->lan_veb]->pf = pf;
6677 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
6678 break;
6679 case I40E_SWITCH_ELEMENT_TYPE_VSI:
6680 if (num_reported != 1)
6681 break;
6682
6683
6684
6685 pf->mac_seid = uplink_seid;
6686 pf->pf_seid = downlink_seid;
6687 pf->main_vsi_seid = seid;
6688 if (printconfig)
6689 dev_info(&pf->pdev->dev,
6690 "pf_seid=%d main_vsi_seid=%d\n",
6691 pf->pf_seid, pf->main_vsi_seid);
6692 break;
6693 case I40E_SWITCH_ELEMENT_TYPE_PF:
6694 case I40E_SWITCH_ELEMENT_TYPE_VF:
6695 case I40E_SWITCH_ELEMENT_TYPE_EMP:
6696 case I40E_SWITCH_ELEMENT_TYPE_BMC:
6697 case I40E_SWITCH_ELEMENT_TYPE_PE:
6698 case I40E_SWITCH_ELEMENT_TYPE_PA:
6699
6700 break;
6701 default:
6702 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
6703 element_type, seid);
6704 break;
6705 }
6706}
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
6717{
6718 struct i40e_aqc_get_switch_config_resp *sw_config;
6719 u16 next_seid = 0;
6720 int ret = 0;
6721 u8 *aq_buf;
6722 int i;
6723
6724 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
6725 if (!aq_buf)
6726 return -ENOMEM;
6727
6728 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6729 do {
6730 u16 num_reported, num_total;
6731
6732 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
6733 I40E_AQ_LARGE_BUF,
6734 &next_seid, NULL);
6735 if (ret) {
6736 dev_info(&pf->pdev->dev,
6737 "get switch config failed %d aq_err=%x\n",
6738 ret, pf->hw.aq.asq_last_status);
6739 kfree(aq_buf);
6740 return -ENOENT;
6741 }
6742
6743 num_reported = le16_to_cpu(sw_config->header.num_reported);
6744 num_total = le16_to_cpu(sw_config->header.num_total);
6745
6746 if (printconfig)
6747 dev_info(&pf->pdev->dev,
6748 "header: %d reported %d total\n",
6749 num_reported, num_total);
6750
6751 if (num_reported) {
6752 int sz = sizeof(*sw_config) * num_reported;
6753
6754 kfree(pf->sw_config);
6755 pf->sw_config = kzalloc(sz, GFP_KERNEL);
6756 if (pf->sw_config)
6757 memcpy(pf->sw_config, sw_config, sz);
6758 }
6759
6760 for (i = 0; i < num_reported; i++) {
6761 struct i40e_aqc_switch_config_element_resp *ele =
6762 &sw_config->element[i];
6763
6764 i40e_setup_pf_switch_element(pf, ele, num_reported,
6765 printconfig);
6766 }
6767 } while (next_seid != 0);
6768
6769 kfree(aq_buf);
6770 return ret;
6771}
6772
6773
6774
6775
6776
6777
6778
6779static int i40e_setup_pf_switch(struct i40e_pf *pf)
6780{
6781 int ret;
6782
6783
6784 ret = i40e_fetch_switch_configuration(pf, false);
6785 if (ret) {
6786 dev_info(&pf->pdev->dev,
6787 "couldn't fetch switch config, err %d, aq_err %d\n",
6788 ret, pf->hw.aq.asq_last_status);
6789 return ret;
6790 }
6791 i40e_pf_reset_stats(pf);
6792
6793
6794
6795
6796 if (pf->num_lan_qps > 1)
6797 i40e_fdir_setup(pf);
6798
6799
6800 if (pf->lan_vsi == I40E_NO_VSI) {
6801 struct i40e_vsi *vsi = NULL;
6802 u16 uplink_seid;
6803
6804
6805
6806
6807 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6808 uplink_seid = pf->veb[pf->lan_veb]->seid;
6809 else
6810 uplink_seid = pf->mac_seid;
6811
6812 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
6813 if (!vsi) {
6814 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
6815 i40e_fdir_teardown(pf);
6816 return -EAGAIN;
6817 }
6818
6819
6820
6821
6822 pf->num_rx_queues = vsi->alloc_queue_pairs;
6823 pf->num_tx_queues = vsi->alloc_queue_pairs;
6824 } else {
6825
6826 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6827 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6828 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6829 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6830 }
6831 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
6832
6833
6834 ret = i40e_setup_pf_filter_control(pf);
6835 if (ret) {
6836 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
6837 ret);
6838
6839 }
6840
6841
6842
6843
6844 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
6845 i40e_config_rss(pf);
6846
6847
6848 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
6849 i40e_link_event(pf);
6850
6851
6852 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
6853 I40E_AQ_AN_COMPLETED) ? true : false);
6854 pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
6855 if (pf->hw.phy.link_info.an_info &
6856 (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
6857 pf->hw.fc.current_mode = I40E_FC_FULL;
6858 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
6859 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
6860 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
6861 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
6862 else
6863 pf->hw.fc.current_mode = I40E_FC_DEFAULT;
6864
6865 return ret;
6866}
6867
6868
6869
6870
6871
6872
6873static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
6874{
6875 int num_tc0;
6876
6877 num_tc0 = min_t(int, queues_left, pf->rss_size_max);
6878 num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
6879 num_tc0 = rounddown_pow_of_two(num_tc0);
6880
6881 return num_tc0;
6882}
6883
6884
6885
6886
6887
6888static void i40e_determine_queue_usage(struct i40e_pf *pf)
6889{
6890 int accum_tc_size;
6891 int queues_left;
6892
6893 pf->num_lan_qps = 0;
6894 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
6895 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
6896
6897
6898
6899
6900
6901 queues_left = pf->hw.func_caps.num_tx_qp;
6902
6903 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6904 (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
6905 !(pf->flags & (I40E_FLAG_RSS_ENABLED |
6906 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
6907 (queues_left == 1)) {
6908
6909
6910 queues_left = 0;
6911 pf->rss_size = pf->num_lan_qps = 1;
6912
6913
6914 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
6915 I40E_FLAG_MQ_ENABLED |
6916 I40E_FLAG_FDIR_ENABLED |
6917 I40E_FLAG_FDIR_ATR_ENABLED |
6918 I40E_FLAG_DCB_ENABLED |
6919 I40E_FLAG_SRIOV_ENABLED |
6920 I40E_FLAG_VMDQ_ENABLED);
6921
6922 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6923 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6924 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6925
6926 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6927
6928 queues_left -= pf->rss_size;
6929 pf->num_lan_qps = pf->rss_size;
6930
6931 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6932 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6933 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6934
6935
6936
6937
6938 queues_left -= accum_tc_size;
6939
6940 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6941
6942 queues_left -= pf->rss_size;
6943 if (queues_left < 0) {
6944 dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
6945 return;
6946 }
6947
6948 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6949
6950 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6951 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6952 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6953
6954 queues_left -= 1;
6955
6956 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6957
6958 queues_left -= pf->rss_size;
6959 if (queues_left < 0) {
6960 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
6961 return;
6962 }
6963
6964 pf->num_lan_qps = pf->rss_size;
6965
6966 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6967 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6968 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6969
6970
6971
6972
6973
6974 queues_left -= 1;
6975 queues_left -= accum_tc_size;
6976
6977 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6978 queues_left -= pf->rss_size;
6979 if (queues_left < 0) {
6980 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
6981 return;
6982 }
6983
6984 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6985
6986 } else {
6987 dev_info(&pf->pdev->dev,
6988 "Invalid configuration, flags=0x%08llx\n", pf->flags);
6989 return;
6990 }
6991
6992 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
6993 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
6994 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
6995 pf->num_vf_qps));
6996 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
6997 }
6998
6999 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7000 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
7001 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
7002 (queues_left / pf->num_vmdq_qps));
7003 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
7004 }
7005
7006 return;
7007}
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
7021{
7022 struct i40e_filter_control_settings *settings = &pf->filter_settings;
7023
7024 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
7025
7026
7027 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
7028 settings->enable_fdir = true;
7029
7030
7031 settings->enable_ethtype = true;
7032 settings->enable_macvlan = true;
7033
7034 if (i40e_set_filter_control(&pf->hw, settings))
7035 return -ENOENT;
7036
7037 return 0;
7038}
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7052{
7053 struct i40e_driver_version dv;
7054 struct i40e_pf *pf;
7055 struct i40e_hw *hw;
7056 int err = 0;
7057 u32 len;
7058
7059 err = pci_enable_device_mem(pdev);
7060 if (err)
7061 return err;
7062
7063
7064 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7065
7066
7067
7068 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
7069 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7070 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7071 } else {
7072 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
7073 err = -EIO;
7074 goto err_dma;
7075 }
7076
7077
7078 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7079 IORESOURCE_MEM), i40e_driver_name);
7080 if (err) {
7081 dev_info(&pdev->dev,
7082 "pci_request_selected_regions failed %d\n", err);
7083 goto err_pci_reg;
7084 }
7085
7086 pci_enable_pcie_error_reporting(pdev);
7087 pci_set_master(pdev);
7088
7089
7090
7091
7092
7093
7094 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
7095 if (!pf) {
7096 err = -ENOMEM;
7097 goto err_pf_alloc;
7098 }
7099 pf->next_vsi = 0;
7100 pf->pdev = pdev;
7101 set_bit(__I40E_DOWN, &pf->state);
7102
7103 hw = &pf->hw;
7104 hw->back = pf;
7105 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7106 pci_resource_len(pdev, 0));
7107 if (!hw->hw_addr) {
7108 err = -EIO;
7109 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
7110 (unsigned int)pci_resource_start(pdev, 0),
7111 (unsigned int)pci_resource_len(pdev, 0), err);
7112 goto err_ioremap;
7113 }
7114 hw->vendor_id = pdev->vendor;
7115 hw->device_id = pdev->device;
7116 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
7117 hw->subsystem_vendor_id = pdev->subsystem_vendor;
7118 hw->subsystem_device_id = pdev->subsystem_device;
7119 hw->bus.device = PCI_SLOT(pdev->devfn);
7120 hw->bus.func = PCI_FUNC(pdev->devfn);
7121
7122
7123 err = i40e_pf_reset(hw);
7124 if (err) {
7125 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
7126 goto err_pf_reset;
7127 }
7128 pf->pfr_count++;
7129
7130 hw->aq.num_arq_entries = I40E_AQ_LEN;
7131 hw->aq.num_asq_entries = I40E_AQ_LEN;
7132 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7133 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7134 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
7135 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
7136 "%s-pf%d:misc",
7137 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
7138
7139 err = i40e_init_shared_code(hw);
7140 if (err) {
7141 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
7142 goto err_pf_reset;
7143 }
7144
7145 err = i40e_init_adminq(hw);
7146 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7147 if (err) {
7148 dev_info(&pdev->dev,
7149 "init_adminq failed: %d expecting API %02x.%02x\n",
7150 err,
7151 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7152 goto err_pf_reset;
7153 }
7154
7155 err = i40e_get_capabilities(pf);
7156 if (err)
7157 goto err_adminq_setup;
7158
7159 err = i40e_sw_init(pf);
7160 if (err) {
7161 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7162 goto err_sw_init;
7163 }
7164
7165 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7166 hw->func_caps.num_rx_qp,
7167 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7168 if (err) {
7169 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7170 goto err_init_lan_hmc;
7171 }
7172
7173 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7174 if (err) {
7175 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7176 err = -ENOENT;
7177 goto err_configure_lan_hmc;
7178 }
7179
7180 i40e_get_mac_addr(hw, hw->mac.addr);
7181 if (i40e_validate_mac_addr(hw->mac.addr)) {
7182 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7183 err = -EIO;
7184 goto err_mac_addr;
7185 }
7186 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7187 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7188
7189 pci_set_drvdata(pdev, pf);
7190 pci_save_state(pdev);
7191
7192
7193 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
7194 pf->service_timer_period = HZ;
7195
7196 INIT_WORK(&pf->service_task, i40e_service_task);
7197 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7198 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
7199 pf->link_check_timeout = jiffies;
7200
7201
7202 i40e_determine_queue_usage(pf);
7203 i40e_init_interrupt_scheme(pf);
7204
7205
7206
7207
7208 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7209 pf->vsi = kzalloc(len, GFP_KERNEL);
7210 if (!pf->vsi) {
7211 err = -ENOMEM;
7212 goto err_switch_setup;
7213 }
7214
7215 err = i40e_setup_pf_switch(pf);
7216 if (err) {
7217 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
7218 goto err_vsis;
7219 }
7220
7221
7222
7223
7224
7225 clear_bit(__I40E_DOWN, &pf->state);
7226
7227
7228
7229
7230
7231
7232 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7233 err = i40e_setup_misc_vector(pf);
7234 if (err) {
7235 dev_info(&pdev->dev,
7236 "setup of misc vector failed: %d\n", err);
7237 goto err_vsis;
7238 }
7239 }
7240
7241
7242 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7243 (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7244 u32 val;
7245
7246
7247 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
7248 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
7249 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
7250 i40e_flush(hw);
7251 }
7252
7253 i40e_dbg_pf_init(pf);
7254
7255
7256 dv.major_version = DRV_VERSION_MAJOR;
7257 dv.minor_version = DRV_VERSION_MINOR;
7258 dv.build_version = DRV_VERSION_BUILD;
7259 dv.subbuild_version = 0;
7260 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7261
7262
7263 mod_timer(&pf->service_timer,
7264 round_jiffies(jiffies + pf->service_timer_period));
7265
7266 return 0;
7267
7268
7269err_vsis:
7270 set_bit(__I40E_DOWN, &pf->state);
7271err_switch_setup:
7272 i40e_clear_interrupt_scheme(pf);
7273 kfree(pf->vsi);
7274 del_timer_sync(&pf->service_timer);
7275err_mac_addr:
7276err_configure_lan_hmc:
7277 (void)i40e_shutdown_lan_hmc(hw);
7278err_init_lan_hmc:
7279 kfree(pf->qp_pile);
7280 kfree(pf->irq_pile);
7281err_sw_init:
7282err_adminq_setup:
7283 (void)i40e_shutdown_adminq(hw);
7284err_pf_reset:
7285 iounmap(hw->hw_addr);
7286err_ioremap:
7287 kfree(pf);
7288err_pf_alloc:
7289 pci_disable_pcie_error_reporting(pdev);
7290 pci_release_selected_regions(pdev,
7291 pci_select_bars(pdev, IORESOURCE_MEM));
7292err_pci_reg:
7293err_dma:
7294 pci_disable_device(pdev);
7295 return err;
7296}
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307static void i40e_remove(struct pci_dev *pdev)
7308{
7309 struct i40e_pf *pf = pci_get_drvdata(pdev);
7310 i40e_status ret_code;
7311 u32 reg;
7312 int i;
7313
7314 i40e_dbg_pf_exit(pf);
7315
7316 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7317 i40e_free_vfs(pf);
7318 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
7319 }
7320
7321
7322 set_bit(__I40E_DOWN, &pf->state);
7323 del_timer_sync(&pf->service_timer);
7324 cancel_work_sync(&pf->service_task);
7325
7326 i40e_fdir_teardown(pf);
7327
7328
7329
7330
7331 for (i = 0; i < I40E_MAX_VEB; i++) {
7332 if (!pf->veb[i])
7333 continue;
7334
7335 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
7336 pf->veb[i]->uplink_seid == 0)
7337 i40e_switch_branch_release(pf->veb[i]);
7338 }
7339
7340
7341
7342
7343 if (pf->vsi[pf->lan_vsi])
7344 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
7345
7346 i40e_stop_misc_vector(pf);
7347 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7348 synchronize_irq(pf->msix_entries[0].vector);
7349 free_irq(pf->msix_entries[0].vector, pf);
7350 }
7351
7352
7353 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
7354 if (ret_code)
7355 dev_warn(&pdev->dev,
7356 "Failed to destroy the HMC resources: %d\n", ret_code);
7357
7358
7359 i40e_aq_queue_shutdown(&pf->hw, true);
7360 ret_code = i40e_shutdown_adminq(&pf->hw);
7361 if (ret_code)
7362 dev_warn(&pdev->dev,
7363 "Failed to destroy the Admin Queue resources: %d\n",
7364 ret_code);
7365
7366
7367 i40e_clear_interrupt_scheme(pf);
7368 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7369 if (pf->vsi[i]) {
7370 i40e_vsi_clear_rings(pf->vsi[i]);
7371 i40e_vsi_clear(pf->vsi[i]);
7372 pf->vsi[i] = NULL;
7373 }
7374 }
7375
7376 for (i = 0; i < I40E_MAX_VEB; i++) {
7377 kfree(pf->veb[i]);
7378 pf->veb[i] = NULL;
7379 }
7380
7381 kfree(pf->qp_pile);
7382 kfree(pf->irq_pile);
7383 kfree(pf->sw_config);
7384 kfree(pf->vsi);
7385
7386
7387 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
7388 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
7389 i40e_flush(&pf->hw);
7390
7391 iounmap(pf->hw.hw_addr);
7392 kfree(pf);
7393 pci_release_selected_regions(pdev,
7394 pci_select_bars(pdev, IORESOURCE_MEM));
7395
7396 pci_disable_pcie_error_reporting(pdev);
7397 pci_disable_device(pdev);
7398}
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
7409 enum pci_channel_state error)
7410{
7411 struct i40e_pf *pf = pci_get_drvdata(pdev);
7412
7413 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
7414
7415
7416 i40e_pf_quiesce_all_vsi(pf);
7417
7418
7419 return PCI_ERS_RESULT_NEED_RESET;
7420}
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
7432{
7433 struct i40e_pf *pf = pci_get_drvdata(pdev);
7434 pci_ers_result_t result;
7435 int err;
7436 u32 reg;
7437
7438 dev_info(&pdev->dev, "%s\n", __func__);
7439 if (pci_enable_device_mem(pdev)) {
7440 dev_info(&pdev->dev,
7441 "Cannot re-enable PCI device after reset.\n");
7442 result = PCI_ERS_RESULT_DISCONNECT;
7443 } else {
7444 pci_set_master(pdev);
7445 pci_restore_state(pdev);
7446 pci_save_state(pdev);
7447 pci_wake_from_d3(pdev, false);
7448
7449 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7450 if (reg == 0)
7451 result = PCI_ERS_RESULT_RECOVERED;
7452 else
7453 result = PCI_ERS_RESULT_DISCONNECT;
7454 }
7455
7456 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7457 if (err) {
7458 dev_info(&pdev->dev,
7459 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7460 err);
7461
7462 }
7463
7464 return result;
7465}
7466
7467
7468
7469
7470
7471
7472
7473
7474static void i40e_pci_error_resume(struct pci_dev *pdev)
7475{
7476 struct i40e_pf *pf = pci_get_drvdata(pdev);
7477
7478 dev_info(&pdev->dev, "%s\n", __func__);
7479 i40e_handle_reset_warning(pf);
7480}
7481
7482static const struct pci_error_handlers i40e_err_handler = {
7483 .error_detected = i40e_pci_error_detected,
7484 .slot_reset = i40e_pci_error_slot_reset,
7485 .resume = i40e_pci_error_resume,
7486};
7487
7488static struct pci_driver i40e_driver = {
7489 .name = i40e_driver_name,
7490 .id_table = i40e_pci_tbl,
7491 .probe = i40e_probe,
7492 .remove = i40e_remove,
7493 .err_handler = &i40e_err_handler,
7494 .sriov_configure = i40e_pci_sriov_configure,
7495};
7496
7497
7498
7499
7500
7501
7502
7503static int __init i40e_init_module(void)
7504{
7505 pr_info("%s: %s - version %s\n", i40e_driver_name,
7506 i40e_driver_string, i40e_driver_version_str);
7507 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
7508 i40e_dbg_init();
7509 return pci_register_driver(&i40e_driver);
7510}
7511module_init(i40e_init_module);
7512
7513
7514
7515
7516
7517
7518
7519static void __exit i40e_exit_module(void)
7520{
7521 pci_unregister_driver(&i40e_driver);
7522 i40e_dbg_exit();
7523}
7524module_exit(i40e_exit_module);
7525