1
2
3
4#include <linux/etherdevice.h>
5#include <linux/of_net.h>
6#include <linux/pci.h>
7#include <linux/bpf.h>
8
9
10#include "i40e.h"
11#include "i40e_diag.h"
12#include "i40e_xsk.h"
13#include <net/udp_tunnel.h>
14#include <net/xdp_sock.h>
15
16
17
18
19#define CREATE_TRACE_POINTS
20#include "i40e_trace.h"
21
22const char i40e_driver_name[] = "i40e";
23static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
25
26#define DRV_KERN "-k"
27
28#define DRV_VERSION_MAJOR 2
29#define DRV_VERSION_MINOR 8
30#define DRV_VERSION_BUILD 20
31#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34const char i40e_driver_version_str[] = DRV_VERSION;
35static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
36
37
38static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40static int i40e_add_vsi(struct i40e_vsi *vsi);
41static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43static int i40e_setup_misc_vector(struct i40e_pf *pf);
44static void i40e_determine_queue_usage(struct i40e_pf *pf);
45static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47static int i40e_reset(struct i40e_pf *pf);
48static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
50static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
51static bool i40e_check_recovery_mode(struct i40e_pf *pf);
52static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
53static void i40e_fdir_sb_setup(struct i40e_pf *pf);
54static int i40e_veb_get_bw_info(struct i40e_veb *veb);
55static int i40e_get_capabilities(struct i40e_pf *pf,
56 enum i40e_admin_queue_opc list_type);
57
58
59
60
61
62
63
64
65
66static const struct pci_device_id i40e_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
91
92 {0, }
93};
94MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
95
96#define I40E_MAX_VF_COUNT 128
97static int debug = -1;
98module_param(debug, uint, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
100
101MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103MODULE_LICENSE("GPL v2");
104MODULE_VERSION(DRV_VERSION);
105
106static struct workqueue_struct *i40e_wq;
107
108
109
110
111
112
113
114
115int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
116 u64 size, u32 alignment)
117{
118 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
119
120 mem->size = ALIGN(size, alignment);
121 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
122 GFP_KERNEL);
123 if (!mem->va)
124 return -ENOMEM;
125
126 return 0;
127}
128
129
130
131
132
133
134int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
135{
136 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
137
138 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
139 mem->va = NULL;
140 mem->pa = 0;
141 mem->size = 0;
142
143 return 0;
144}
145
146
147
148
149
150
151
152int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
153 u32 size)
154{
155 mem->size = size;
156 mem->va = kzalloc(size, GFP_KERNEL);
157
158 if (!mem->va)
159 return -ENOMEM;
160
161 return 0;
162}
163
164
165
166
167
168
169int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
170{
171
172 kfree(mem->va);
173 mem->va = NULL;
174 mem->size = 0;
175
176 return 0;
177}
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
193 u16 needed, u16 id)
194{
195 int ret = -ENOMEM;
196 int i, j;
197
198 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
199 dev_info(&pf->pdev->dev,
200 "param err: pile=%s needed=%d id=0x%04x\n",
201 pile ? "<valid>" : "<null>", needed, id);
202 return -EINVAL;
203 }
204
205
206 i = pile->search_hint;
207 while (i < pile->num_entries) {
208
209 if (pile->list[i] & I40E_PILE_VALID_BIT) {
210 i++;
211 continue;
212 }
213
214
215 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
216 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
217 break;
218 }
219
220 if (j == needed) {
221
222 for (j = 0; j < needed; j++)
223 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
224 ret = i;
225 pile->search_hint = i + j;
226 break;
227 }
228
229
230 i += j;
231 }
232
233 return ret;
234}
235
236
237
238
239
240
241
242
243
244static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
245{
246 int valid_id = (id | I40E_PILE_VALID_BIT);
247 int count = 0;
248 int i;
249
250 if (!pile || index >= pile->num_entries)
251 return -EINVAL;
252
253 for (i = index;
254 i < pile->num_entries && pile->list[i] == valid_id;
255 i++) {
256 pile->list[i] = 0;
257 count++;
258 }
259
260 if (count && index < pile->search_hint)
261 pile->search_hint = index;
262
263 return count;
264}
265
266
267
268
269
270
271struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
272{
273 int i;
274
275 for (i = 0; i < pf->num_alloc_vsi; i++)
276 if (pf->vsi[i] && (pf->vsi[i]->id == id))
277 return pf->vsi[i];
278
279 return NULL;
280}
281
282
283
284
285
286
287
288void i40e_service_event_schedule(struct i40e_pf *pf)
289{
290 if ((!test_bit(__I40E_DOWN, pf->state) &&
291 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
292 test_bit(__I40E_RECOVERY_MODE, pf->state))
293 queue_work(i40e_wq, &pf->service_task);
294}
295
296
297
298
299
300
301
302
303
304static void i40e_tx_timeout(struct net_device *netdev)
305{
306 struct i40e_netdev_priv *np = netdev_priv(netdev);
307 struct i40e_vsi *vsi = np->vsi;
308 struct i40e_pf *pf = vsi->back;
309 struct i40e_ring *tx_ring = NULL;
310 unsigned int i, hung_queue = 0;
311 u32 head, val;
312
313 pf->tx_timeout_count++;
314
315
316 for (i = 0; i < netdev->num_tx_queues; i++) {
317 struct netdev_queue *q;
318 unsigned long trans_start;
319
320 q = netdev_get_tx_queue(netdev, i);
321 trans_start = q->trans_start;
322 if (netif_xmit_stopped(q) &&
323 time_after(jiffies,
324 (trans_start + netdev->watchdog_timeo))) {
325 hung_queue = i;
326 break;
327 }
328 }
329
330 if (i == netdev->num_tx_queues) {
331 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
332 } else {
333
334 for (i = 0; i < vsi->num_queue_pairs; i++) {
335 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
336 if (hung_queue ==
337 vsi->tx_rings[i]->queue_index) {
338 tx_ring = vsi->tx_rings[i];
339 break;
340 }
341 }
342 }
343 }
344
345 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
346 pf->tx_timeout_recovery_level = 1;
347 else if (time_before(jiffies,
348 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
349 return;
350
351
352 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
353 return;
354
355 if (tx_ring) {
356 head = i40e_get_head(tx_ring);
357
358 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
359 val = rd32(&pf->hw,
360 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
361 tx_ring->vsi->base_vector - 1));
362 else
363 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
364
365 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
366 vsi->seid, hung_queue, tx_ring->next_to_clean,
367 head, tx_ring->next_to_use,
368 readl(tx_ring->tail), val);
369 }
370
371 pf->tx_timeout_last_recovery = jiffies;
372 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
373 pf->tx_timeout_recovery_level, hung_queue);
374
375 switch (pf->tx_timeout_recovery_level) {
376 case 1:
377 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
378 break;
379 case 2:
380 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
381 break;
382 case 3:
383 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
384 break;
385 default:
386 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
387 break;
388 }
389
390 i40e_service_event_schedule(pf);
391 pf->tx_timeout_recovery_level++;
392}
393
394
395
396
397
398
399
400
401struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
402{
403 return &vsi->net_stats;
404}
405
406
407
408
409
410
411static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
412 struct rtnl_link_stats64 *stats)
413{
414 u64 bytes, packets;
415 unsigned int start;
416
417 do {
418 start = u64_stats_fetch_begin_irq(&ring->syncp);
419 packets = ring->stats.packets;
420 bytes = ring->stats.bytes;
421 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
422
423 stats->tx_packets += packets;
424 stats->tx_bytes += bytes;
425}
426
427
428
429
430
431
432
433
434
435static void i40e_get_netdev_stats_struct(struct net_device *netdev,
436 struct rtnl_link_stats64 *stats)
437{
438 struct i40e_netdev_priv *np = netdev_priv(netdev);
439 struct i40e_vsi *vsi = np->vsi;
440 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
441 struct i40e_ring *ring;
442 int i;
443
444 if (test_bit(__I40E_VSI_DOWN, vsi->state))
445 return;
446
447 if (!vsi->tx_rings)
448 return;
449
450 rcu_read_lock();
451 for (i = 0; i < vsi->num_queue_pairs; i++) {
452 u64 bytes, packets;
453 unsigned int start;
454
455 ring = READ_ONCE(vsi->tx_rings[i]);
456 if (!ring)
457 continue;
458 i40e_get_netdev_stats_struct_tx(ring, stats);
459
460 if (i40e_enabled_xdp_vsi(vsi)) {
461 ring++;
462 i40e_get_netdev_stats_struct_tx(ring, stats);
463 }
464
465 ring++;
466 do {
467 start = u64_stats_fetch_begin_irq(&ring->syncp);
468 packets = ring->stats.packets;
469 bytes = ring->stats.bytes;
470 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
471
472 stats->rx_packets += packets;
473 stats->rx_bytes += bytes;
474
475 }
476 rcu_read_unlock();
477
478
479 stats->multicast = vsi_stats->multicast;
480 stats->tx_errors = vsi_stats->tx_errors;
481 stats->tx_dropped = vsi_stats->tx_dropped;
482 stats->rx_errors = vsi_stats->rx_errors;
483 stats->rx_dropped = vsi_stats->rx_dropped;
484 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
485 stats->rx_length_errors = vsi_stats->rx_length_errors;
486}
487
488
489
490
491
492void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
493{
494 struct rtnl_link_stats64 *ns;
495 int i;
496
497 if (!vsi)
498 return;
499
500 ns = i40e_get_vsi_stats_struct(vsi);
501 memset(ns, 0, sizeof(*ns));
502 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
503 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
504 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
505 if (vsi->rx_rings && vsi->rx_rings[0]) {
506 for (i = 0; i < vsi->num_queue_pairs; i++) {
507 memset(&vsi->rx_rings[i]->stats, 0,
508 sizeof(vsi->rx_rings[i]->stats));
509 memset(&vsi->rx_rings[i]->rx_stats, 0,
510 sizeof(vsi->rx_rings[i]->rx_stats));
511 memset(&vsi->tx_rings[i]->stats, 0,
512 sizeof(vsi->tx_rings[i]->stats));
513 memset(&vsi->tx_rings[i]->tx_stats, 0,
514 sizeof(vsi->tx_rings[i]->tx_stats));
515 }
516 }
517 vsi->stat_offsets_loaded = false;
518}
519
520
521
522
523
524void i40e_pf_reset_stats(struct i40e_pf *pf)
525{
526 int i;
527
528 memset(&pf->stats, 0, sizeof(pf->stats));
529 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
530 pf->stat_offsets_loaded = false;
531
532 for (i = 0; i < I40E_MAX_VEB; i++) {
533 if (pf->veb[i]) {
534 memset(&pf->veb[i]->stats, 0,
535 sizeof(pf->veb[i]->stats));
536 memset(&pf->veb[i]->stats_offsets, 0,
537 sizeof(pf->veb[i]->stats_offsets));
538 memset(&pf->veb[i]->tc_stats, 0,
539 sizeof(pf->veb[i]->tc_stats));
540 memset(&pf->veb[i]->tc_stats_offsets, 0,
541 sizeof(pf->veb[i]->tc_stats_offsets));
542 pf->veb[i]->stat_offsets_loaded = false;
543 }
544 }
545 pf->hw_csum_rx_error = 0;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
564 bool offset_loaded, u64 *offset, u64 *stat)
565{
566 u64 new_data;
567
568 if (hw->device_id == I40E_DEV_ID_QEMU) {
569 new_data = rd32(hw, loreg);
570 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
571 } else {
572 new_data = rd64(hw, loreg);
573 }
574 if (!offset_loaded)
575 *offset = new_data;
576 if (likely(new_data >= *offset))
577 *stat = new_data - *offset;
578 else
579 *stat = (new_data + BIT_ULL(48)) - *offset;
580 *stat &= 0xFFFFFFFFFFFFULL;
581}
582
583
584
585
586
587
588
589
590
591static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
592 bool offset_loaded, u64 *offset, u64 *stat)
593{
594 u32 new_data;
595
596 new_data = rd32(hw, reg);
597 if (!offset_loaded)
598 *offset = new_data;
599 if (likely(new_data >= *offset))
600 *stat = (u32)(new_data - *offset);
601 else
602 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
603}
604
605
606
607
608
609
610
611static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
612{
613 u32 new_data = rd32(hw, reg);
614
615 wr32(hw, reg, 1);
616 *stat += new_data;
617}
618
619
620
621
622
623void i40e_update_eth_stats(struct i40e_vsi *vsi)
624{
625 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
626 struct i40e_pf *pf = vsi->back;
627 struct i40e_hw *hw = &pf->hw;
628 struct i40e_eth_stats *oes;
629 struct i40e_eth_stats *es;
630
631 es = &vsi->eth_stats;
632 oes = &vsi->eth_stats_offsets;
633
634
635 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->tx_errors, &es->tx_errors);
638 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
639 vsi->stat_offsets_loaded,
640 &oes->rx_discards, &es->rx_discards);
641 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
644
645 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
646 I40E_GLV_GORCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->rx_bytes, &es->rx_bytes);
649 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
650 I40E_GLV_UPRCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->rx_unicast, &es->rx_unicast);
653 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
654 I40E_GLV_MPRCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->rx_multicast, &es->rx_multicast);
657 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
658 I40E_GLV_BPRCL(stat_idx),
659 vsi->stat_offsets_loaded,
660 &oes->rx_broadcast, &es->rx_broadcast);
661
662 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
663 I40E_GLV_GOTCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->tx_bytes, &es->tx_bytes);
666 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
667 I40E_GLV_UPTCL(stat_idx),
668 vsi->stat_offsets_loaded,
669 &oes->tx_unicast, &es->tx_unicast);
670 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
671 I40E_GLV_MPTCL(stat_idx),
672 vsi->stat_offsets_loaded,
673 &oes->tx_multicast, &es->tx_multicast);
674 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
675 I40E_GLV_BPTCL(stat_idx),
676 vsi->stat_offsets_loaded,
677 &oes->tx_broadcast, &es->tx_broadcast);
678 vsi->stat_offsets_loaded = true;
679}
680
681
682
683
684
685void i40e_update_veb_stats(struct i40e_veb *veb)
686{
687 struct i40e_pf *pf = veb->pf;
688 struct i40e_hw *hw = &pf->hw;
689 struct i40e_eth_stats *oes;
690 struct i40e_eth_stats *es;
691 struct i40e_veb_tc_stats *veb_oes;
692 struct i40e_veb_tc_stats *veb_es;
693 int i, idx = 0;
694
695 idx = veb->stats_idx;
696 es = &veb->stats;
697 oes = &veb->stats_offsets;
698 veb_es = &veb->tc_stats;
699 veb_oes = &veb->tc_stats_offsets;
700
701
702 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_discards, &es->tx_discards);
705 if (hw->revision_id > 0)
706 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
707 veb->stat_offsets_loaded,
708 &oes->rx_unknown_protocol,
709 &es->rx_unknown_protocol);
710 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
711 veb->stat_offsets_loaded,
712 &oes->rx_bytes, &es->rx_bytes);
713 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
714 veb->stat_offsets_loaded,
715 &oes->rx_unicast, &es->rx_unicast);
716 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
717 veb->stat_offsets_loaded,
718 &oes->rx_multicast, &es->rx_multicast);
719 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->rx_broadcast, &es->rx_broadcast);
722
723 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
724 veb->stat_offsets_loaded,
725 &oes->tx_bytes, &es->tx_bytes);
726 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
727 veb->stat_offsets_loaded,
728 &oes->tx_unicast, &es->tx_unicast);
729 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
730 veb->stat_offsets_loaded,
731 &oes->tx_multicast, &es->tx_multicast);
732 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->tx_broadcast, &es->tx_broadcast);
735 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
736 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
737 I40E_GLVEBTC_RPCL(i, idx),
738 veb->stat_offsets_loaded,
739 &veb_oes->tc_rx_packets[i],
740 &veb_es->tc_rx_packets[i]);
741 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
742 I40E_GLVEBTC_RBCL(i, idx),
743 veb->stat_offsets_loaded,
744 &veb_oes->tc_rx_bytes[i],
745 &veb_es->tc_rx_bytes[i]);
746 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
747 I40E_GLVEBTC_TPCL(i, idx),
748 veb->stat_offsets_loaded,
749 &veb_oes->tc_tx_packets[i],
750 &veb_es->tc_tx_packets[i]);
751 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
752 I40E_GLVEBTC_TBCL(i, idx),
753 veb->stat_offsets_loaded,
754 &veb_oes->tc_tx_bytes[i],
755 &veb_es->tc_tx_bytes[i]);
756 }
757 veb->stat_offsets_loaded = true;
758}
759
760
761
762
763
764
765
766
767
768
769
770static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
771{
772 struct i40e_pf *pf = vsi->back;
773 struct rtnl_link_stats64 *ons;
774 struct rtnl_link_stats64 *ns;
775 struct i40e_eth_stats *oes;
776 struct i40e_eth_stats *es;
777 u32 tx_restart, tx_busy;
778 struct i40e_ring *p;
779 u32 rx_page, rx_buf;
780 u64 bytes, packets;
781 unsigned int start;
782 u64 tx_linearize;
783 u64 tx_force_wb;
784 u64 rx_p, rx_b;
785 u64 tx_p, tx_b;
786 u16 q;
787
788 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
789 test_bit(__I40E_CONFIG_BUSY, pf->state))
790 return;
791
792 ns = i40e_get_vsi_stats_struct(vsi);
793 ons = &vsi->net_stats_offsets;
794 es = &vsi->eth_stats;
795 oes = &vsi->eth_stats_offsets;
796
797
798
799
800 rx_b = rx_p = 0;
801 tx_b = tx_p = 0;
802 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
803 rx_page = 0;
804 rx_buf = 0;
805 rcu_read_lock();
806 for (q = 0; q < vsi->num_queue_pairs; q++) {
807
808 p = READ_ONCE(vsi->tx_rings[q]);
809
810 do {
811 start = u64_stats_fetch_begin_irq(&p->syncp);
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 tx_b += bytes;
816 tx_p += packets;
817 tx_restart += p->tx_stats.restart_queue;
818 tx_busy += p->tx_stats.tx_busy;
819 tx_linearize += p->tx_stats.tx_linearize;
820 tx_force_wb += p->tx_stats.tx_force_wb;
821
822
823 p = &p[1];
824 do {
825 start = u64_stats_fetch_begin_irq(&p->syncp);
826 packets = p->stats.packets;
827 bytes = p->stats.bytes;
828 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
829 rx_b += bytes;
830 rx_p += packets;
831 rx_buf += p->rx_stats.alloc_buff_failed;
832 rx_page += p->rx_stats.alloc_page_failed;
833 }
834 rcu_read_unlock();
835 vsi->tx_restart = tx_restart;
836 vsi->tx_busy = tx_busy;
837 vsi->tx_linearize = tx_linearize;
838 vsi->tx_force_wb = tx_force_wb;
839 vsi->rx_page_failed = rx_page;
840 vsi->rx_buf_failed = rx_buf;
841
842 ns->rx_packets = rx_p;
843 ns->rx_bytes = rx_b;
844 ns->tx_packets = tx_p;
845 ns->tx_bytes = tx_b;
846
847
848 i40e_update_eth_stats(vsi);
849 ons->tx_errors = oes->tx_errors;
850 ns->tx_errors = es->tx_errors;
851 ons->multicast = oes->rx_multicast;
852 ns->multicast = es->rx_multicast;
853 ons->rx_dropped = oes->rx_discards;
854 ns->rx_dropped = es->rx_discards;
855 ons->tx_dropped = oes->tx_discards;
856 ns->tx_dropped = es->tx_discards;
857
858
859 if (vsi == pf->vsi[pf->lan_vsi]) {
860 ns->rx_crc_errors = pf->stats.crc_errors;
861 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
862 ns->rx_length_errors = pf->stats.rx_length_errors;
863 }
864}
865
866
867
868
869
870static void i40e_update_pf_stats(struct i40e_pf *pf)
871{
872 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
873 struct i40e_hw_port_stats *nsd = &pf->stats;
874 struct i40e_hw *hw = &pf->hw;
875 u32 val;
876 int i;
877
878 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
879 I40E_GLPRT_GORCL(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
882 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
883 I40E_GLPRT_GOTCL(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
886 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.rx_discards,
889 &nsd->eth.rx_discards);
890 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
891 I40E_GLPRT_UPRCL(hw->port),
892 pf->stat_offsets_loaded,
893 &osd->eth.rx_unicast,
894 &nsd->eth.rx_unicast);
895 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
896 I40E_GLPRT_MPRCL(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->eth.rx_multicast,
899 &nsd->eth.rx_multicast);
900 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
901 I40E_GLPRT_BPRCL(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->eth.rx_broadcast,
904 &nsd->eth.rx_broadcast);
905 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
906 I40E_GLPRT_UPTCL(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->eth.tx_unicast,
909 &nsd->eth.tx_unicast);
910 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
911 I40E_GLPRT_MPTCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.tx_multicast,
914 &nsd->eth.tx_multicast);
915 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
916 I40E_GLPRT_BPTCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.tx_broadcast,
919 &nsd->eth.tx_broadcast);
920
921 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->tx_dropped_link_down,
924 &nsd->tx_dropped_link_down);
925
926 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->crc_errors, &nsd->crc_errors);
929
930 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->illegal_bytes, &nsd->illegal_bytes);
933
934 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->mac_local_faults,
937 &nsd->mac_local_faults);
938 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->mac_remote_faults,
941 &nsd->mac_remote_faults);
942
943 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->rx_length_errors,
946 &nsd->rx_length_errors);
947
948 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->link_xon_rx, &nsd->link_xon_rx);
951 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_tx, &nsd->link_xon_tx);
954 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xoff_rx, &nsd->link_xoff_rx);
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_tx, &nsd->link_xoff_tx);
960
961 for (i = 0; i < 8; i++) {
962 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
963 pf->stat_offsets_loaded,
964 &osd->priority_xoff_rx[i],
965 &nsd->priority_xoff_rx[i]);
966 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 pf->stat_offsets_loaded,
968 &osd->priority_xon_rx[i],
969 &nsd->priority_xon_rx[i]);
970 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
971 pf->stat_offsets_loaded,
972 &osd->priority_xon_tx[i],
973 &nsd->priority_xon_tx[i]);
974 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded,
976 &osd->priority_xoff_tx[i],
977 &nsd->priority_xoff_tx[i]);
978 i40e_stat_update32(hw,
979 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 pf->stat_offsets_loaded,
981 &osd->priority_xon_2_xoff[i],
982 &nsd->priority_xon_2_xoff[i]);
983 }
984
985 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
986 I40E_GLPRT_PRC64L(hw->port),
987 pf->stat_offsets_loaded,
988 &osd->rx_size_64, &nsd->rx_size_64);
989 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
990 I40E_GLPRT_PRC127L(hw->port),
991 pf->stat_offsets_loaded,
992 &osd->rx_size_127, &nsd->rx_size_127);
993 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
994 I40E_GLPRT_PRC255L(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_size_255, &nsd->rx_size_255);
997 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
998 I40E_GLPRT_PRC511L(hw->port),
999 pf->stat_offsets_loaded,
1000 &osd->rx_size_511, &nsd->rx_size_511);
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1002 I40E_GLPRT_PRC1023L(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->rx_size_1023, &nsd->rx_size_1023);
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1006 I40E_GLPRT_PRC1522L(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_size_1522, &nsd->rx_size_1522);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1010 I40E_GLPRT_PRC9522L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_big, &nsd->rx_size_big);
1013
1014 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1015 I40E_GLPRT_PTC64L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->tx_size_64, &nsd->tx_size_64);
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1019 I40E_GLPRT_PTC127L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->tx_size_127, &nsd->tx_size_127);
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1023 I40E_GLPRT_PTC255L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->tx_size_255, &nsd->tx_size_255);
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1027 I40E_GLPRT_PTC511L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->tx_size_511, &nsd->tx_size_511);
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1031 I40E_GLPRT_PTC1023L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->tx_size_1023, &nsd->tx_size_1023);
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1035 I40E_GLPRT_PTC1522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->tx_size_1522, &nsd->tx_size_1522);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1039 I40E_GLPRT_PTC9522L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_big, &nsd->tx_size_big);
1042
1043 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_undersize, &nsd->rx_undersize);
1046 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_fragments, &nsd->rx_fragments);
1049 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_oversize, &nsd->rx_oversize);
1052 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_jabber, &nsd->rx_jabber);
1055
1056
1057 i40e_stat_update_and_clear32(hw,
1058 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1059 &nsd->fd_atr_match);
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1062 &nsd->fd_sb_match);
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1065 &nsd->fd_atr_tunnel_match);
1066
1067 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1068 nsd->tx_lpi_status =
1069 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1070 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1071 nsd->rx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1074 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1075 pf->stat_offsets_loaded,
1076 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1077 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1080
1081 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1082 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1083 nsd->fd_sb_status = true;
1084 else
1085 nsd->fd_sb_status = false;
1086
1087 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1088 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1089 nsd->fd_atr_status = true;
1090 else
1091 nsd->fd_atr_status = false;
1092
1093 pf->stat_offsets_loaded = true;
1094}
1095
1096
1097
1098
1099
1100
1101
1102void i40e_update_stats(struct i40e_vsi *vsi)
1103{
1104 struct i40e_pf *pf = vsi->back;
1105
1106 if (vsi == pf->vsi[pf->lan_vsi])
1107 i40e_update_pf_stats(pf);
1108
1109 i40e_update_vsi_stats(vsi);
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1121 const u8 *macaddr, s16 vlan)
1122{
1123 struct i40e_mac_filter *f;
1124 u64 key;
1125
1126 if (!vsi || !macaddr)
1127 return NULL;
1128
1129 key = i40e_addr_to_hkey(macaddr);
1130 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1131 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1132 (vlan == f->vlan))
1133 return f;
1134 }
1135 return NULL;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1147{
1148 struct i40e_mac_filter *f;
1149 u64 key;
1150
1151 if (!vsi || !macaddr)
1152 return NULL;
1153
1154 key = i40e_addr_to_hkey(macaddr);
1155 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1156 if ((ether_addr_equal(macaddr, f->macaddr)))
1157 return f;
1158 }
1159 return NULL;
1160}
1161
1162
1163
1164
1165
1166
1167
1168bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1169{
1170
1171 if (vsi->info.pvid)
1172 return true;
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 return vsi->has_vlan_filter;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1227 struct hlist_head *tmp_add_list,
1228 struct hlist_head *tmp_del_list,
1229 int vlan_filters)
1230{
1231 s16 pvid = le16_to_cpu(vsi->info.pvid);
1232 struct i40e_mac_filter *f, *add_head;
1233 struct i40e_new_mac_filter *new;
1234 struct hlist_node *h;
1235 int bkt, new_vlan;
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 hlist_for_each_entry(new, tmp_add_list, hlist) {
1253 if (pvid && new->f->vlan != pvid)
1254 new->f->vlan = pvid;
1255 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1256 new->f->vlan = 0;
1257 else if (!vlan_filters && new->f->vlan == 0)
1258 new->f->vlan = I40E_VLAN_ANY;
1259 }
1260
1261
1262 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1263
1264
1265
1266
1267
1268 if ((pvid && f->vlan != pvid) ||
1269 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1270 (!vlan_filters && f->vlan == 0)) {
1271
1272 if (pvid)
1273 new_vlan = pvid;
1274 else if (vlan_filters)
1275 new_vlan = 0;
1276 else
1277 new_vlan = I40E_VLAN_ANY;
1278
1279
1280 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1281 if (!add_head)
1282 return -ENOMEM;
1283
1284
1285 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1286 if (!new)
1287 return -ENOMEM;
1288
1289 new->f = add_head;
1290 new->state = add_head->state;
1291
1292
1293 hlist_add_head(&new->hlist, tmp_add_list);
1294
1295
1296 f->state = I40E_FILTER_REMOVE;
1297 hash_del(&f->hlist);
1298 hlist_add_head(&f->hlist, tmp_del_list);
1299 }
1300 }
1301
1302 vsi->has_vlan_filter = !!vlan_filters;
1303
1304 return 0;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1316{
1317 struct i40e_aqc_remove_macvlan_element_data element;
1318 struct i40e_pf *pf = vsi->back;
1319
1320
1321 if (vsi->type != I40E_VSI_MAIN)
1322 return;
1323
1324 memset(&element, 0, sizeof(element));
1325 ether_addr_copy(element.mac_addr, macaddr);
1326 element.vlan_tag = 0;
1327
1328 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1329 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1330
1331 memset(&element, 0, sizeof(element));
1332 ether_addr_copy(element.mac_addr, macaddr);
1333 element.vlan_tag = 0;
1334
1335 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1336 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1337 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1352 const u8 *macaddr, s16 vlan)
1353{
1354 struct i40e_mac_filter *f;
1355 u64 key;
1356
1357 if (!vsi || !macaddr)
1358 return NULL;
1359
1360 f = i40e_find_filter(vsi, macaddr, vlan);
1361 if (!f) {
1362 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1363 if (!f)
1364 return NULL;
1365
1366
1367
1368
1369 if (vlan >= 0)
1370 vsi->has_vlan_filter = true;
1371
1372 ether_addr_copy(f->macaddr, macaddr);
1373 f->vlan = vlan;
1374 f->state = I40E_FILTER_NEW;
1375 INIT_HLIST_NODE(&f->hlist);
1376
1377 key = i40e_addr_to_hkey(macaddr);
1378 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1379
1380 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1381 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 if (f->state == I40E_FILTER_REMOVE)
1393 f->state = I40E_FILTER_ACTIVE;
1394
1395 return f;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1414{
1415 if (!f)
1416 return;
1417
1418
1419
1420
1421
1422 if ((f->state == I40E_FILTER_FAILED) ||
1423 (f->state == I40E_FILTER_NEW)) {
1424 hash_del(&f->hlist);
1425 kfree(f);
1426 } else {
1427 f->state = I40E_FILTER_REMOVE;
1428 }
1429
1430 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1431 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1447{
1448 struct i40e_mac_filter *f;
1449
1450 if (!vsi || !macaddr)
1451 return;
1452
1453 f = i40e_find_filter(vsi, macaddr, vlan);
1454 __i40e_del_filter(vsi, f);
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1470 const u8 *macaddr)
1471{
1472 struct i40e_mac_filter *f, *add = NULL;
1473 struct hlist_node *h;
1474 int bkt;
1475
1476 if (vsi->info.pvid)
1477 return i40e_add_filter(vsi, macaddr,
1478 le16_to_cpu(vsi->info.pvid));
1479
1480 if (!i40e_is_vsi_in_vlan(vsi))
1481 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1482
1483 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1484 if (f->state == I40E_FILTER_REMOVE)
1485 continue;
1486 add = i40e_add_filter(vsi, macaddr, f->vlan);
1487 if (!add)
1488 return NULL;
1489 }
1490
1491 return add;
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1505{
1506 struct i40e_mac_filter *f;
1507 struct hlist_node *h;
1508 bool found = false;
1509 int bkt;
1510
1511 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1512 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1513 if (ether_addr_equal(macaddr, f->macaddr)) {
1514 __i40e_del_filter(vsi, f);
1515 found = true;
1516 }
1517 }
1518
1519 if (found)
1520 return 0;
1521 else
1522 return -ENOENT;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532static int i40e_set_mac(struct net_device *netdev, void *p)
1533{
1534 struct i40e_netdev_priv *np = netdev_priv(netdev);
1535 struct i40e_vsi *vsi = np->vsi;
1536 struct i40e_pf *pf = vsi->back;
1537 struct i40e_hw *hw = &pf->hw;
1538 struct sockaddr *addr = p;
1539
1540 if (!is_valid_ether_addr(addr->sa_data))
1541 return -EADDRNOTAVAIL;
1542
1543 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1544 netdev_info(netdev, "already using mac address %pM\n",
1545 addr->sa_data);
1546 return 0;
1547 }
1548
1549 if (test_bit(__I40E_DOWN, pf->state) ||
1550 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1551 return -EADDRNOTAVAIL;
1552
1553 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1554 netdev_info(netdev, "returning to hw mac address %pM\n",
1555 hw->mac.addr);
1556 else
1557 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1558
1559
1560
1561
1562
1563
1564
1565 spin_lock_bh(&vsi->mac_filter_hash_lock);
1566 i40e_del_mac_filter(vsi, netdev->dev_addr);
1567 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1568 i40e_add_mac_filter(vsi, netdev->dev_addr);
1569 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1570
1571 if (vsi->type == I40E_VSI_MAIN) {
1572 i40e_status ret;
1573
1574 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1575 addr->sa_data, NULL);
1576 if (ret)
1577 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1578 i40e_stat_str(hw, ret),
1579 i40e_aq_str(hw, hw->aq.asq_last_status));
1580 }
1581
1582
1583
1584
1585 i40e_service_event_schedule(pf);
1586 return 0;
1587}
1588
1589
1590
1591
1592
1593
1594static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1595 u8 *lut, u16 lut_size)
1596{
1597 struct i40e_pf *pf = vsi->back;
1598 struct i40e_hw *hw = &pf->hw;
1599 int ret = 0;
1600
1601 if (seed) {
1602 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1603 (struct i40e_aqc_get_set_rss_key_data *)seed;
1604 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1605 if (ret) {
1606 dev_info(&pf->pdev->dev,
1607 "Cannot set RSS key, err %s aq_err %s\n",
1608 i40e_stat_str(hw, ret),
1609 i40e_aq_str(hw, hw->aq.asq_last_status));
1610 return ret;
1611 }
1612 }
1613 if (lut) {
1614 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1615
1616 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1617 if (ret) {
1618 dev_info(&pf->pdev->dev,
1619 "Cannot set RSS lut, err %s aq_err %s\n",
1620 i40e_stat_str(hw, ret),
1621 i40e_aq_str(hw, hw->aq.asq_last_status));
1622 return ret;
1623 }
1624 }
1625 return ret;
1626}
1627
1628
1629
1630
1631
1632static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1633{
1634 struct i40e_pf *pf = vsi->back;
1635 u8 seed[I40E_HKEY_ARRAY_SIZE];
1636 u8 *lut;
1637 int ret;
1638
1639 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1640 return 0;
1641 if (!vsi->rss_size)
1642 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1643 vsi->num_queue_pairs);
1644 if (!vsi->rss_size)
1645 return -EINVAL;
1646 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1647 if (!lut)
1648 return -ENOMEM;
1649
1650
1651
1652
1653 if (vsi->rss_lut_user)
1654 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1655 else
1656 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1657 if (vsi->rss_hkey_user)
1658 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1659 else
1660 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1661 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1662 kfree(lut);
1663 return ret;
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1675 struct i40e_vsi_context *ctxt,
1676 u8 enabled_tc)
1677{
1678 u16 qcount = 0, max_qcount, qmap, sections = 0;
1679 int i, override_q, pow, num_qps, ret;
1680 u8 netdev_tc = 0, offset = 0;
1681
1682 if (vsi->type != I40E_VSI_MAIN)
1683 return -EINVAL;
1684 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1685 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1686 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1687 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1688 num_qps = vsi->mqprio_qopt.qopt.count[0];
1689
1690
1691 pow = ilog2(num_qps);
1692 if (!is_power_of_2(num_qps))
1693 pow++;
1694 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1695 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1696
1697
1698 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1699 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1700
1701 if (vsi->tc_config.enabled_tc & BIT(i)) {
1702 offset = vsi->mqprio_qopt.qopt.offset[i];
1703 qcount = vsi->mqprio_qopt.qopt.count[i];
1704 if (qcount > max_qcount)
1705 max_qcount = qcount;
1706 vsi->tc_config.tc_info[i].qoffset = offset;
1707 vsi->tc_config.tc_info[i].qcount = qcount;
1708 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1709 } else {
1710
1711
1712
1713
1714 vsi->tc_config.tc_info[i].qoffset = 0;
1715 vsi->tc_config.tc_info[i].qcount = 1;
1716 vsi->tc_config.tc_info[i].netdev_tc = 0;
1717 }
1718 }
1719
1720
1721 vsi->num_queue_pairs = offset + qcount;
1722
1723
1724 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1725 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1726 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1727 ctxt->info.valid_sections |= cpu_to_le16(sections);
1728
1729
1730 vsi->rss_size = max_qcount;
1731 ret = i40e_vsi_config_rss(vsi);
1732 if (ret) {
1733 dev_info(&vsi->back->pdev->dev,
1734 "Failed to reconfig rss for num_queues (%u)\n",
1735 max_qcount);
1736 return ret;
1737 }
1738 vsi->reconfig_rss = true;
1739 dev_dbg(&vsi->back->pdev->dev,
1740 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1741
1742
1743
1744
1745 override_q = vsi->mqprio_qopt.qopt.count[0];
1746 if (override_q && override_q < vsi->num_queue_pairs) {
1747 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1748 vsi->next_base_queue = override_q;
1749 }
1750 return 0;
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1763 struct i40e_vsi_context *ctxt,
1764 u8 enabled_tc,
1765 bool is_add)
1766{
1767 struct i40e_pf *pf = vsi->back;
1768 u16 sections = 0;
1769 u8 netdev_tc = 0;
1770 u16 numtc = 1;
1771 u16 qcount;
1772 u8 offset;
1773 u16 qmap;
1774 int i;
1775 u16 num_tc_qps = 0;
1776
1777 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1778 offset = 0;
1779
1780
1781 num_tc_qps = vsi->alloc_queue_pairs;
1782 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1783
1784 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1785 if (enabled_tc & BIT(i))
1786 numtc++;
1787 }
1788 if (!numtc) {
1789 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1790 numtc = 1;
1791 }
1792 num_tc_qps = num_tc_qps / numtc;
1793 num_tc_qps = min_t(int, num_tc_qps,
1794 i40e_pf_get_max_q_per_tc(pf));
1795 }
1796
1797 vsi->tc_config.numtc = numtc;
1798 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1799
1800
1801 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1802 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1803
1804
1805 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1806
1807 if (vsi->tc_config.enabled_tc & BIT(i)) {
1808
1809 int pow, num_qps;
1810
1811 switch (vsi->type) {
1812 case I40E_VSI_MAIN:
1813 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1814 I40E_FLAG_FD_ATR_ENABLED)) ||
1815 vsi->tc_config.enabled_tc != 1) {
1816 qcount = min_t(int, pf->alloc_rss_size,
1817 num_tc_qps);
1818 break;
1819 }
1820
1821 case I40E_VSI_FDIR:
1822 case I40E_VSI_SRIOV:
1823 case I40E_VSI_VMDQ2:
1824 default:
1825 qcount = num_tc_qps;
1826 WARN_ON(i != 0);
1827 break;
1828 }
1829 vsi->tc_config.tc_info[i].qoffset = offset;
1830 vsi->tc_config.tc_info[i].qcount = qcount;
1831
1832
1833 num_qps = qcount;
1834 pow = 0;
1835 while (num_qps && (BIT_ULL(pow) < qcount)) {
1836 pow++;
1837 num_qps >>= 1;
1838 }
1839
1840 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1841 qmap =
1842 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1843 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1844
1845 offset += qcount;
1846 } else {
1847
1848
1849
1850
1851 vsi->tc_config.tc_info[i].qoffset = 0;
1852 vsi->tc_config.tc_info[i].qcount = 1;
1853 vsi->tc_config.tc_info[i].netdev_tc = 0;
1854
1855 qmap = 0;
1856 }
1857 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1858 }
1859
1860
1861 vsi->num_queue_pairs = offset;
1862 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1863 if (vsi->req_queue_pairs > 0)
1864 vsi->num_queue_pairs = vsi->req_queue_pairs;
1865 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1866 vsi->num_queue_pairs = pf->num_lan_msix;
1867 }
1868
1869
1870 if (is_add) {
1871 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1872
1873 ctxt->info.up_enable_bits = enabled_tc;
1874 }
1875 if (vsi->type == I40E_VSI_SRIOV) {
1876 ctxt->info.mapping_flags |=
1877 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1878 for (i = 0; i < vsi->num_queue_pairs; i++)
1879 ctxt->info.queue_mapping[i] =
1880 cpu_to_le16(vsi->base_queue + i);
1881 } else {
1882 ctxt->info.mapping_flags |=
1883 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1884 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1885 }
1886 ctxt->info.valid_sections |= cpu_to_le16(sections);
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1898{
1899 struct i40e_netdev_priv *np = netdev_priv(netdev);
1900 struct i40e_vsi *vsi = np->vsi;
1901
1902 if (i40e_add_mac_filter(vsi, addr))
1903 return 0;
1904 else
1905 return -ENOMEM;
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1917{
1918 struct i40e_netdev_priv *np = netdev_priv(netdev);
1919 struct i40e_vsi *vsi = np->vsi;
1920
1921
1922
1923
1924
1925
1926 if (ether_addr_equal(addr, netdev->dev_addr))
1927 return 0;
1928
1929 i40e_del_mac_filter(vsi, addr);
1930
1931 return 0;
1932}
1933
1934
1935
1936
1937
1938static void i40e_set_rx_mode(struct net_device *netdev)
1939{
1940 struct i40e_netdev_priv *np = netdev_priv(netdev);
1941 struct i40e_vsi *vsi = np->vsi;
1942
1943 spin_lock_bh(&vsi->mac_filter_hash_lock);
1944
1945 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1946 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1947
1948 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1949
1950
1951 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1952 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1953 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1954 }
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1966 struct hlist_head *from)
1967{
1968 struct i40e_mac_filter *f;
1969 struct hlist_node *h;
1970
1971 hlist_for_each_entry_safe(f, h, from, hlist) {
1972 u64 key = i40e_addr_to_hkey(f->macaddr);
1973
1974
1975 hlist_del(&f->hlist);
1976 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1977 }
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1989 struct hlist_head *from)
1990{
1991 struct i40e_new_mac_filter *new;
1992 struct hlist_node *h;
1993
1994 hlist_for_each_entry_safe(new, h, from, hlist) {
1995
1996 hlist_del(&new->hlist);
1997 kfree(new);
1998 }
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009static
2010struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2011{
2012 hlist_for_each_entry_continue(next, hlist) {
2013 if (!is_broadcast_ether_addr(next->f->macaddr))
2014 return next;
2015 }
2016
2017 return NULL;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static int
2031i40e_update_filter_state(int count,
2032 struct i40e_aqc_add_macvlan_element_data *add_list,
2033 struct i40e_new_mac_filter *add_head)
2034{
2035 int retval = 0;
2036 int i;
2037
2038 for (i = 0; i < count; i++) {
2039
2040
2041
2042
2043
2044
2045 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2046 add_head->state = I40E_FILTER_FAILED;
2047 } else {
2048 add_head->state = I40E_FILTER_ACTIVE;
2049 retval++;
2050 }
2051
2052 add_head = i40e_next_filter(add_head);
2053 if (!add_head)
2054 break;
2055 }
2056
2057 return retval;
2058}
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073static
2074void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2075 struct i40e_aqc_remove_macvlan_element_data *list,
2076 int num_del, int *retval)
2077{
2078 struct i40e_hw *hw = &vsi->back->hw;
2079 i40e_status aq_ret;
2080 int aq_err;
2081
2082 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2083 aq_err = hw->aq.asq_last_status;
2084
2085
2086 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2087 *retval = -EIO;
2088 dev_info(&vsi->back->pdev->dev,
2089 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2090 vsi_name, i40e_stat_str(hw, aq_ret),
2091 i40e_aq_str(hw, aq_err));
2092 }
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static
2108void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2109 struct i40e_aqc_add_macvlan_element_data *list,
2110 struct i40e_new_mac_filter *add_head,
2111 int num_add)
2112{
2113 struct i40e_hw *hw = &vsi->back->hw;
2114 int aq_err, fcnt;
2115
2116 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2117 aq_err = hw->aq.asq_last_status;
2118 fcnt = i40e_update_filter_state(num_add, list, add_head);
2119
2120 if (fcnt != num_add) {
2121 if (vsi->type == I40E_VSI_MAIN) {
2122 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2123 dev_warn(&vsi->back->pdev->dev,
2124 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2125 i40e_aq_str(hw, aq_err), vsi_name);
2126 } else if (vsi->type == I40E_VSI_SRIOV ||
2127 vsi->type == I40E_VSI_VMDQ1 ||
2128 vsi->type == I40E_VSI_VMDQ2) {
2129 dev_warn(&vsi->back->pdev->dev,
2130 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2131 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2132 } else {
2133 dev_warn(&vsi->back->pdev->dev,
2134 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2135 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2136 }
2137 }
2138}
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152static i40e_status
2153i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2154 struct i40e_mac_filter *f)
2155{
2156 bool enable = f->state == I40E_FILTER_NEW;
2157 struct i40e_hw *hw = &vsi->back->hw;
2158 i40e_status aq_ret;
2159
2160 if (f->vlan == I40E_VLAN_ANY) {
2161 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2162 vsi->seid,
2163 enable,
2164 NULL);
2165 } else {
2166 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2167 vsi->seid,
2168 enable,
2169 f->vlan,
2170 NULL);
2171 }
2172
2173 if (aq_ret) {
2174 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2175 dev_warn(&vsi->back->pdev->dev,
2176 "Error %s, forcing overflow promiscuous on %s\n",
2177 i40e_aq_str(hw, hw->aq.asq_last_status),
2178 vsi_name);
2179 }
2180
2181 return aq_ret;
2182}
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2194{
2195 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2196 struct i40e_hw *hw = &pf->hw;
2197 i40e_status aq_ret;
2198
2199 if (vsi->type == I40E_VSI_MAIN &&
2200 pf->lan_veb != I40E_NO_VEB &&
2201 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2202
2203
2204
2205
2206
2207 if (promisc)
2208 aq_ret = i40e_aq_set_default_vsi(hw,
2209 vsi->seid,
2210 NULL);
2211 else
2212 aq_ret = i40e_aq_clear_default_vsi(hw,
2213 vsi->seid,
2214 NULL);
2215 if (aq_ret) {
2216 dev_info(&pf->pdev->dev,
2217 "Set default VSI failed, err %s, aq_err %s\n",
2218 i40e_stat_str(hw, aq_ret),
2219 i40e_aq_str(hw, hw->aq.asq_last_status));
2220 }
2221 } else {
2222 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2223 hw,
2224 vsi->seid,
2225 promisc, NULL,
2226 true);
2227 if (aq_ret) {
2228 dev_info(&pf->pdev->dev,
2229 "set unicast promisc failed, err %s, aq_err %s\n",
2230 i40e_stat_str(hw, aq_ret),
2231 i40e_aq_str(hw, hw->aq.asq_last_status));
2232 }
2233 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2234 hw,
2235 vsi->seid,
2236 promisc, NULL);
2237 if (aq_ret) {
2238 dev_info(&pf->pdev->dev,
2239 "set multicast promisc failed, err %s, aq_err %s\n",
2240 i40e_stat_str(hw, aq_ret),
2241 i40e_aq_str(hw, hw->aq.asq_last_status));
2242 }
2243 }
2244
2245 if (!aq_ret)
2246 pf->cur_promisc = promisc;
2247
2248 return aq_ret;
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2260{
2261 struct hlist_head tmp_add_list, tmp_del_list;
2262 struct i40e_mac_filter *f;
2263 struct i40e_new_mac_filter *new, *add_head = NULL;
2264 struct i40e_hw *hw = &vsi->back->hw;
2265 bool old_overflow, new_overflow;
2266 unsigned int failed_filters = 0;
2267 unsigned int vlan_filters = 0;
2268 char vsi_name[16] = "PF";
2269 int filter_list_len = 0;
2270 i40e_status aq_ret = 0;
2271 u32 changed_flags = 0;
2272 struct hlist_node *h;
2273 struct i40e_pf *pf;
2274 int num_add = 0;
2275 int num_del = 0;
2276 int retval = 0;
2277 u16 cmd_flags;
2278 int list_size;
2279 int bkt;
2280
2281
2282 struct i40e_aqc_add_macvlan_element_data *add_list;
2283 struct i40e_aqc_remove_macvlan_element_data *del_list;
2284
2285 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2286 usleep_range(1000, 2000);
2287 pf = vsi->back;
2288
2289 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2290
2291 if (vsi->netdev) {
2292 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2293 vsi->current_netdev_flags = vsi->netdev->flags;
2294 }
2295
2296 INIT_HLIST_HEAD(&tmp_add_list);
2297 INIT_HLIST_HEAD(&tmp_del_list);
2298
2299 if (vsi->type == I40E_VSI_SRIOV)
2300 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2301 else if (vsi->type != I40E_VSI_MAIN)
2302 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2303
2304 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2305 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2306
2307 spin_lock_bh(&vsi->mac_filter_hash_lock);
2308
2309 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2310 if (f->state == I40E_FILTER_REMOVE) {
2311
2312 hash_del(&f->hlist);
2313 hlist_add_head(&f->hlist, &tmp_del_list);
2314
2315
2316 continue;
2317 }
2318 if (f->state == I40E_FILTER_NEW) {
2319
2320 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2321 if (!new)
2322 goto err_no_memory_locked;
2323
2324
2325 new->f = f;
2326 new->state = f->state;
2327
2328
2329 hlist_add_head(&new->hlist, &tmp_add_list);
2330 }
2331
2332
2333
2334
2335
2336 if (f->vlan > 0)
2337 vlan_filters++;
2338 }
2339
2340 retval = i40e_correct_mac_vlan_filters(vsi,
2341 &tmp_add_list,
2342 &tmp_del_list,
2343 vlan_filters);
2344 if (retval)
2345 goto err_no_memory_locked;
2346
2347 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2348 }
2349
2350
2351 if (!hlist_empty(&tmp_del_list)) {
2352 filter_list_len = hw->aq.asq_buf_size /
2353 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2354 list_size = filter_list_len *
2355 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2356 del_list = kzalloc(list_size, GFP_ATOMIC);
2357 if (!del_list)
2358 goto err_no_memory;
2359
2360 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2361 cmd_flags = 0;
2362
2363
2364
2365
2366 if (is_broadcast_ether_addr(f->macaddr)) {
2367 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2368
2369 hlist_del(&f->hlist);
2370 kfree(f);
2371 continue;
2372 }
2373
2374
2375 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2376 if (f->vlan == I40E_VLAN_ANY) {
2377 del_list[num_del].vlan_tag = 0;
2378 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2379 } else {
2380 del_list[num_del].vlan_tag =
2381 cpu_to_le16((u16)(f->vlan));
2382 }
2383
2384 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2385 del_list[num_del].flags = cmd_flags;
2386 num_del++;
2387
2388
2389 if (num_del == filter_list_len) {
2390 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2391 num_del, &retval);
2392 memset(del_list, 0, list_size);
2393 num_del = 0;
2394 }
2395
2396
2397
2398 hlist_del(&f->hlist);
2399 kfree(f);
2400 }
2401
2402 if (num_del) {
2403 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2404 num_del, &retval);
2405 }
2406
2407 kfree(del_list);
2408 del_list = NULL;
2409 }
2410
2411 if (!hlist_empty(&tmp_add_list)) {
2412
2413 filter_list_len = hw->aq.asq_buf_size /
2414 sizeof(struct i40e_aqc_add_macvlan_element_data);
2415 list_size = filter_list_len *
2416 sizeof(struct i40e_aqc_add_macvlan_element_data);
2417 add_list = kzalloc(list_size, GFP_ATOMIC);
2418 if (!add_list)
2419 goto err_no_memory;
2420
2421 num_add = 0;
2422 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2423
2424
2425
2426 if (is_broadcast_ether_addr(new->f->macaddr)) {
2427 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2428 new->f))
2429 new->state = I40E_FILTER_FAILED;
2430 else
2431 new->state = I40E_FILTER_ACTIVE;
2432 continue;
2433 }
2434
2435
2436 if (num_add == 0)
2437 add_head = new;
2438 cmd_flags = 0;
2439 ether_addr_copy(add_list[num_add].mac_addr,
2440 new->f->macaddr);
2441 if (new->f->vlan == I40E_VLAN_ANY) {
2442 add_list[num_add].vlan_tag = 0;
2443 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2444 } else {
2445 add_list[num_add].vlan_tag =
2446 cpu_to_le16((u16)(new->f->vlan));
2447 }
2448 add_list[num_add].queue_number = 0;
2449
2450 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2451 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2452 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2453 num_add++;
2454
2455
2456 if (num_add == filter_list_len) {
2457 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2458 add_head, num_add);
2459 memset(add_list, 0, list_size);
2460 num_add = 0;
2461 }
2462 }
2463 if (num_add) {
2464 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2465 num_add);
2466 }
2467
2468
2469
2470 spin_lock_bh(&vsi->mac_filter_hash_lock);
2471 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2472
2473 if (new->f->state == I40E_FILTER_NEW)
2474 new->f->state = new->state;
2475 hlist_del(&new->hlist);
2476 kfree(new);
2477 }
2478 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2479 kfree(add_list);
2480 add_list = NULL;
2481 }
2482
2483
2484 spin_lock_bh(&vsi->mac_filter_hash_lock);
2485 vsi->active_filters = 0;
2486 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2487 if (f->state == I40E_FILTER_ACTIVE)
2488 vsi->active_filters++;
2489 else if (f->state == I40E_FILTER_FAILED)
2490 failed_filters++;
2491 }
2492 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2493
2494
2495
2496
2497
2498 if (old_overflow && !failed_filters &&
2499 vsi->active_filters < vsi->promisc_threshold) {
2500 dev_info(&pf->pdev->dev,
2501 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2502 vsi_name);
2503 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2504 vsi->promisc_threshold = 0;
2505 }
2506
2507
2508 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2509 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2510 goto out;
2511 }
2512
2513 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2514
2515
2516
2517
2518 if (!old_overflow && new_overflow)
2519 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2520
2521
2522 if (changed_flags & IFF_ALLMULTI) {
2523 bool cur_multipromisc;
2524
2525 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2526 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2527 vsi->seid,
2528 cur_multipromisc,
2529 NULL);
2530 if (aq_ret) {
2531 retval = i40e_aq_rc_to_posix(aq_ret,
2532 hw->aq.asq_last_status);
2533 dev_info(&pf->pdev->dev,
2534 "set multi promisc failed on %s, err %s aq_err %s\n",
2535 vsi_name,
2536 i40e_stat_str(hw, aq_ret),
2537 i40e_aq_str(hw, hw->aq.asq_last_status));
2538 } else {
2539 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
2540 vsi->netdev->name,
2541 cur_multipromisc ? "entering" : "leaving");
2542 }
2543 }
2544
2545 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2546 bool cur_promisc;
2547
2548 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2549 new_overflow);
2550 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2551 if (aq_ret) {
2552 retval = i40e_aq_rc_to_posix(aq_ret,
2553 hw->aq.asq_last_status);
2554 dev_info(&pf->pdev->dev,
2555 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2556 cur_promisc ? "on" : "off",
2557 vsi_name,
2558 i40e_stat_str(hw, aq_ret),
2559 i40e_aq_str(hw, hw->aq.asq_last_status));
2560 }
2561 }
2562out:
2563
2564 if (retval)
2565 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2566
2567 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2568 return retval;
2569
2570err_no_memory:
2571
2572 spin_lock_bh(&vsi->mac_filter_hash_lock);
2573err_no_memory_locked:
2574 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2575 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2576 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2577
2578 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2579 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2580 return -ENOMEM;
2581}
2582
2583
2584
2585
2586
2587static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2588{
2589 int v;
2590
2591 if (!pf)
2592 return;
2593 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2594 return;
2595 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2596 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2597 return;
2598 }
2599
2600 for (v = 0; v < pf->num_alloc_vsi; v++) {
2601 if (pf->vsi[v] &&
2602 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2603 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2604
2605 if (ret) {
2606
2607 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2608 pf->state);
2609 break;
2610 }
2611 }
2612 }
2613 clear_bit(__I40E_VF_DISABLE, pf->state);
2614}
2615
2616
2617
2618
2619
2620static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2621{
2622 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2623 return I40E_RXBUFFER_2048;
2624 else
2625 return I40E_RXBUFFER_3072;
2626}
2627
2628
2629
2630
2631
2632
2633
2634
2635static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2636{
2637 struct i40e_netdev_priv *np = netdev_priv(netdev);
2638 struct i40e_vsi *vsi = np->vsi;
2639 struct i40e_pf *pf = vsi->back;
2640
2641 if (i40e_enabled_xdp_vsi(vsi)) {
2642 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2643
2644 if (frame_size > i40e_max_xdp_frame_size(vsi))
2645 return -EINVAL;
2646 }
2647
2648 netdev_info(netdev, "changing MTU from %d to %d\n",
2649 netdev->mtu, new_mtu);
2650 netdev->mtu = new_mtu;
2651 if (netif_running(netdev))
2652 i40e_vsi_reinit_locked(vsi);
2653 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2654 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2655 return 0;
2656}
2657
2658
2659
2660
2661
2662
2663
2664int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2665{
2666 struct i40e_netdev_priv *np = netdev_priv(netdev);
2667 struct i40e_pf *pf = np->vsi->back;
2668
2669 switch (cmd) {
2670 case SIOCGHWTSTAMP:
2671 return i40e_ptp_get_ts_config(pf, ifr);
2672 case SIOCSHWTSTAMP:
2673 return i40e_ptp_set_ts_config(pf, ifr);
2674 default:
2675 return -EOPNOTSUPP;
2676 }
2677}
2678
2679
2680
2681
2682
2683void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2684{
2685 struct i40e_vsi_context ctxt;
2686 i40e_status ret;
2687
2688
2689 if (vsi->info.pvid)
2690 return;
2691
2692 if ((vsi->info.valid_sections &
2693 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2694 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2695 return;
2696
2697 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2698 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2699 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2700
2701 ctxt.seid = vsi->seid;
2702 ctxt.info = vsi->info;
2703 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2704 if (ret) {
2705 dev_info(&vsi->back->pdev->dev,
2706 "update vlan stripping failed, err %s aq_err %s\n",
2707 i40e_stat_str(&vsi->back->hw, ret),
2708 i40e_aq_str(&vsi->back->hw,
2709 vsi->back->hw.aq.asq_last_status));
2710 }
2711}
2712
2713
2714
2715
2716
2717void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2718{
2719 struct i40e_vsi_context ctxt;
2720 i40e_status ret;
2721
2722
2723 if (vsi->info.pvid)
2724 return;
2725
2726 if ((vsi->info.valid_sections &
2727 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2728 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2729 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2730 return;
2731
2732 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2733 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2734 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2735
2736 ctxt.seid = vsi->seid;
2737 ctxt.info = vsi->info;
2738 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2739 if (ret) {
2740 dev_info(&vsi->back->pdev->dev,
2741 "update vlan stripping failed, err %s aq_err %s\n",
2742 i40e_stat_str(&vsi->back->hw, ret),
2743 i40e_aq_str(&vsi->back->hw,
2744 vsi->back->hw.aq.asq_last_status));
2745 }
2746}
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2762{
2763 struct i40e_mac_filter *f, *add_f;
2764 struct hlist_node *h;
2765 int bkt;
2766
2767 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2768 if (f->state == I40E_FILTER_REMOVE)
2769 continue;
2770 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2771 if (!add_f) {
2772 dev_info(&vsi->back->pdev->dev,
2773 "Could not add vlan filter %d for %pM\n",
2774 vid, f->macaddr);
2775 return -ENOMEM;
2776 }
2777 }
2778
2779 return 0;
2780}
2781
2782
2783
2784
2785
2786
2787int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2788{
2789 int err;
2790
2791 if (vsi->info.pvid)
2792 return -EINVAL;
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802 if (!vid)
2803 return 0;
2804
2805
2806 spin_lock_bh(&vsi->mac_filter_hash_lock);
2807 err = i40e_add_vlan_all_mac(vsi, vid);
2808 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2809 if (err)
2810 return err;
2811
2812
2813
2814
2815 i40e_service_event_schedule(vsi->back);
2816 return 0;
2817}
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2833{
2834 struct i40e_mac_filter *f;
2835 struct hlist_node *h;
2836 int bkt;
2837
2838 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2839 if (f->vlan == vid)
2840 __i40e_del_filter(vsi, f);
2841 }
2842}
2843
2844
2845
2846
2847
2848
2849void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2850{
2851 if (!vid || vsi->info.pvid)
2852 return;
2853
2854 spin_lock_bh(&vsi->mac_filter_hash_lock);
2855 i40e_rm_vlan_all_mac(vsi, vid);
2856 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2857
2858
2859
2860
2861 i40e_service_event_schedule(vsi->back);
2862}
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2873 __always_unused __be16 proto, u16 vid)
2874{
2875 struct i40e_netdev_priv *np = netdev_priv(netdev);
2876 struct i40e_vsi *vsi = np->vsi;
2877 int ret = 0;
2878
2879 if (vid >= VLAN_N_VID)
2880 return -EINVAL;
2881
2882 ret = i40e_vsi_add_vlan(vsi, vid);
2883 if (!ret)
2884 set_bit(vid, vsi->active_vlans);
2885
2886 return ret;
2887}
2888
2889
2890
2891
2892
2893
2894
2895static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2896 __always_unused __be16 proto, u16 vid)
2897{
2898 struct i40e_netdev_priv *np = netdev_priv(netdev);
2899 struct i40e_vsi *vsi = np->vsi;
2900
2901 if (vid >= VLAN_N_VID)
2902 return;
2903 set_bit(vid, vsi->active_vlans);
2904}
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2915 __always_unused __be16 proto, u16 vid)
2916{
2917 struct i40e_netdev_priv *np = netdev_priv(netdev);
2918 struct i40e_vsi *vsi = np->vsi;
2919
2920
2921
2922
2923
2924 i40e_vsi_kill_vlan(vsi, vid);
2925
2926 clear_bit(vid, vsi->active_vlans);
2927
2928 return 0;
2929}
2930
2931
2932
2933
2934
2935static void i40e_restore_vlan(struct i40e_vsi *vsi)
2936{
2937 u16 vid;
2938
2939 if (!vsi->netdev)
2940 return;
2941
2942 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2943 i40e_vlan_stripping_enable(vsi);
2944 else
2945 i40e_vlan_stripping_disable(vsi);
2946
2947 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2948 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2949 vid);
2950}
2951
2952
2953
2954
2955
2956
2957int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2958{
2959 struct i40e_vsi_context ctxt;
2960 i40e_status ret;
2961
2962 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2963 vsi->info.pvid = cpu_to_le16(vid);
2964 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2965 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2966 I40E_AQ_VSI_PVLAN_EMOD_STR;
2967
2968 ctxt.seid = vsi->seid;
2969 ctxt.info = vsi->info;
2970 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2971 if (ret) {
2972 dev_info(&vsi->back->pdev->dev,
2973 "add pvid failed, err %s aq_err %s\n",
2974 i40e_stat_str(&vsi->back->hw, ret),
2975 i40e_aq_str(&vsi->back->hw,
2976 vsi->back->hw.aq.asq_last_status));
2977 return -ENOENT;
2978 }
2979
2980 return 0;
2981}
2982
2983
2984
2985
2986
2987
2988
2989void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2990{
2991 vsi->info.pvid = 0;
2992
2993 i40e_vlan_stripping_disable(vsi);
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3007{
3008 int i, err = 0;
3009
3010 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3011 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3012
3013 if (!i40e_enabled_xdp_vsi(vsi))
3014 return err;
3015
3016 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3017 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3018
3019 return err;
3020}
3021
3022
3023
3024
3025
3026
3027
3028static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3029{
3030 int i;
3031
3032 if (vsi->tx_rings) {
3033 for (i = 0; i < vsi->num_queue_pairs; i++)
3034 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3035 i40e_free_tx_resources(vsi->tx_rings[i]);
3036 }
3037
3038 if (vsi->xdp_rings) {
3039 for (i = 0; i < vsi->num_queue_pairs; i++)
3040 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3041 i40e_free_tx_resources(vsi->xdp_rings[i]);
3042 }
3043}
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3056{
3057 int i, err = 0;
3058
3059 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3060 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3061 return err;
3062}
3063
3064
3065
3066
3067
3068
3069
3070static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3071{
3072 int i;
3073
3074 if (!vsi->rx_rings)
3075 return;
3076
3077 for (i = 0; i < vsi->num_queue_pairs; i++)
3078 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3079 i40e_free_rx_resources(vsi->rx_rings[i]);
3080}
3081
3082
3083
3084
3085
3086
3087
3088
3089static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3090{
3091 int cpu;
3092
3093 if (!ring->q_vector || !ring->netdev || ring->ch)
3094 return;
3095
3096
3097 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3098 return;
3099
3100 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3101 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3102 ring->queue_index);
3103}
3104
3105
3106
3107
3108
3109
3110
3111static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3112{
3113 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3114 int qid = ring->queue_index;
3115
3116 if (ring_is_xdp(ring))
3117 qid -= ring->vsi->alloc_queue_pairs;
3118
3119 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3120 return NULL;
3121
3122 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3123}
3124
3125
3126
3127
3128
3129
3130
3131static int i40e_configure_tx_ring(struct i40e_ring *ring)
3132{
3133 struct i40e_vsi *vsi = ring->vsi;
3134 u16 pf_q = vsi->base_queue + ring->queue_index;
3135 struct i40e_hw *hw = &vsi->back->hw;
3136 struct i40e_hmc_obj_txq tx_ctx;
3137 i40e_status err = 0;
3138 u32 qtx_ctl = 0;
3139
3140 if (ring_is_xdp(ring))
3141 ring->xsk_umem = i40e_xsk_umem(ring);
3142
3143
3144 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3145 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3146 ring->atr_count = 0;
3147 } else {
3148 ring->atr_sample_rate = 0;
3149 }
3150
3151
3152 i40e_config_xps_tx_ring(ring);
3153
3154
3155 memset(&tx_ctx, 0, sizeof(tx_ctx));
3156
3157 tx_ctx.new_context = 1;
3158 tx_ctx.base = (ring->dma / 128);
3159 tx_ctx.qlen = ring->count;
3160 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3161 I40E_FLAG_FD_ATR_ENABLED));
3162 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3163
3164 if (vsi->type != I40E_VSI_FDIR)
3165 tx_ctx.head_wb_ena = 1;
3166 tx_ctx.head_wb_addr = ring->dma +
3167 (ring->count * sizeof(struct i40e_tx_desc));
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180 if (ring->ch)
3181 tx_ctx.rdylist =
3182 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3183
3184 else
3185 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3186
3187 tx_ctx.rdylist_act = 0;
3188
3189
3190 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3191 if (err) {
3192 dev_info(&vsi->back->pdev->dev,
3193 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3194 ring->queue_index, pf_q, err);
3195 return -ENOMEM;
3196 }
3197
3198
3199 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3200 if (err) {
3201 dev_info(&vsi->back->pdev->dev,
3202 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3203 ring->queue_index, pf_q, err);
3204 return -ENOMEM;
3205 }
3206
3207
3208 if (ring->ch) {
3209 if (ring->ch->type == I40E_VSI_VMDQ2)
3210 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3211 else
3212 return -EINVAL;
3213
3214 qtx_ctl |= (ring->ch->vsi_number <<
3215 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3216 I40E_QTX_CTL_VFVM_INDX_MASK;
3217 } else {
3218 if (vsi->type == I40E_VSI_VMDQ2) {
3219 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3220 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3221 I40E_QTX_CTL_VFVM_INDX_MASK;
3222 } else {
3223 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3224 }
3225 }
3226
3227 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3228 I40E_QTX_CTL_PF_INDX_MASK);
3229 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3230 i40e_flush(hw);
3231
3232
3233 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3234
3235 return 0;
3236}
3237
3238
3239
3240
3241
3242
3243
3244static int i40e_configure_rx_ring(struct i40e_ring *ring)
3245{
3246 struct i40e_vsi *vsi = ring->vsi;
3247 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3248 u16 pf_q = vsi->base_queue + ring->queue_index;
3249 struct i40e_hw *hw = &vsi->back->hw;
3250 struct i40e_hmc_obj_rxq rx_ctx;
3251 i40e_status err = 0;
3252 bool ok;
3253 int ret;
3254
3255 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3256
3257
3258 memset(&rx_ctx, 0, sizeof(rx_ctx));
3259
3260 if (ring->vsi->type == I40E_VSI_MAIN)
3261 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3262
3263 ring->xsk_umem = i40e_xsk_umem(ring);
3264 if (ring->xsk_umem) {
3265 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3266 XDP_PACKET_HEADROOM;
3267
3268
3269
3270
3271 chain_len = 1;
3272 ring->zca.free = i40e_zca_free;
3273 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3274 MEM_TYPE_ZERO_COPY,
3275 &ring->zca);
3276 if (ret)
3277 return ret;
3278 dev_info(&vsi->back->pdev->dev,
3279 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3280 ring->queue_index);
3281
3282 } else {
3283 ring->rx_buf_len = vsi->rx_buf_len;
3284 if (ring->vsi->type == I40E_VSI_MAIN) {
3285 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3286 MEM_TYPE_PAGE_SHARED,
3287 NULL);
3288 if (ret)
3289 return ret;
3290 }
3291 }
3292
3293 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3294 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3295
3296 rx_ctx.base = (ring->dma / 128);
3297 rx_ctx.qlen = ring->count;
3298
3299
3300 rx_ctx.dsize = 1;
3301
3302
3303
3304
3305 rx_ctx.hsplit_0 = 0;
3306
3307 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3308 if (hw->revision_id == 0)
3309 rx_ctx.lrxqthresh = 0;
3310 else
3311 rx_ctx.lrxqthresh = 1;
3312 rx_ctx.crcstrip = 1;
3313 rx_ctx.l2tsel = 1;
3314
3315 rx_ctx.showiv = 0;
3316
3317 rx_ctx.prefena = 1;
3318
3319
3320 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3321 if (err) {
3322 dev_info(&vsi->back->pdev->dev,
3323 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3324 ring->queue_index, pf_q, err);
3325 return -ENOMEM;
3326 }
3327
3328
3329 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3330 if (err) {
3331 dev_info(&vsi->back->pdev->dev,
3332 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3333 ring->queue_index, pf_q, err);
3334 return -ENOMEM;
3335 }
3336
3337
3338 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3339 clear_ring_build_skb_enabled(ring);
3340 else
3341 set_ring_build_skb_enabled(ring);
3342
3343
3344 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3345 writel(0, ring->tail);
3346
3347 ok = ring->xsk_umem ?
3348 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3349 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3350 if (!ok) {
3351
3352
3353
3354 dev_info(&vsi->back->pdev->dev,
3355 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3356 ring->xsk_umem ? "UMEM enabled " : "",
3357 ring->queue_index, pf_q);
3358 }
3359
3360 return 0;
3361}
3362
3363
3364
3365
3366
3367
3368
3369static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3370{
3371 int err = 0;
3372 u16 i;
3373
3374 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3375 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3376
3377 if (err || !i40e_enabled_xdp_vsi(vsi))
3378 return err;
3379
3380 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3381 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3382
3383 return err;
3384}
3385
3386
3387
3388
3389
3390
3391
3392static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3393{
3394 int err = 0;
3395 u16 i;
3396
3397 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3398 vsi->max_frame = I40E_MAX_RXBUFFER;
3399 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3400#if (PAGE_SIZE < 8192)
3401 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3402 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3403 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3404 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3405#endif
3406 } else {
3407 vsi->max_frame = I40E_MAX_RXBUFFER;
3408 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3409 I40E_RXBUFFER_2048;
3410 }
3411
3412
3413 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3414 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3415
3416 return err;
3417}
3418
3419
3420
3421
3422
3423static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3424{
3425 struct i40e_ring *tx_ring, *rx_ring;
3426 u16 qoffset, qcount;
3427 int i, n;
3428
3429 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3430
3431 for (i = 0; i < vsi->num_queue_pairs; i++) {
3432 rx_ring = vsi->rx_rings[i];
3433 tx_ring = vsi->tx_rings[i];
3434 rx_ring->dcb_tc = 0;
3435 tx_ring->dcb_tc = 0;
3436 }
3437 return;
3438 }
3439
3440 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3441 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3442 continue;
3443
3444 qoffset = vsi->tc_config.tc_info[n].qoffset;
3445 qcount = vsi->tc_config.tc_info[n].qcount;
3446 for (i = qoffset; i < (qoffset + qcount); i++) {
3447 rx_ring = vsi->rx_rings[i];
3448 tx_ring = vsi->tx_rings[i];
3449 rx_ring->dcb_tc = n;
3450 tx_ring->dcb_tc = n;
3451 }
3452 }
3453}
3454
3455
3456
3457
3458
3459static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3460{
3461 if (vsi->netdev)
3462 i40e_set_rx_mode(vsi->netdev);
3463}
3464
3465
3466
3467
3468
3469
3470
3471
3472static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3473{
3474 struct i40e_fdir_filter *filter;
3475 struct i40e_pf *pf = vsi->back;
3476 struct hlist_node *node;
3477
3478 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3479 return;
3480
3481
3482 pf->fd_tcp4_filter_cnt = 0;
3483 pf->fd_udp4_filter_cnt = 0;
3484 pf->fd_sctp4_filter_cnt = 0;
3485 pf->fd_ip4_filter_cnt = 0;
3486
3487 hlist_for_each_entry_safe(filter, node,
3488 &pf->fdir_filter_list, fdir_node) {
3489 i40e_add_del_fdir(vsi, filter, true);
3490 }
3491}
3492
3493
3494
3495
3496
3497static int i40e_vsi_configure(struct i40e_vsi *vsi)
3498{
3499 int err;
3500
3501 i40e_set_vsi_rx_mode(vsi);
3502 i40e_restore_vlan(vsi);
3503 i40e_vsi_config_dcb_rings(vsi);
3504 err = i40e_vsi_configure_tx(vsi);
3505 if (!err)
3506 err = i40e_vsi_configure_rx(vsi);
3507
3508 return err;
3509}
3510
3511
3512
3513
3514
3515static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3516{
3517 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3518 struct i40e_pf *pf = vsi->back;
3519 struct i40e_hw *hw = &pf->hw;
3520 u16 vector;
3521 int i, q;
3522 u32 qp;
3523
3524
3525
3526
3527
3528 qp = vsi->base_queue;
3529 vector = vsi->base_vector;
3530 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3531 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3532
3533 q_vector->rx.next_update = jiffies + 1;
3534 q_vector->rx.target_itr =
3535 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3536 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3537 q_vector->rx.target_itr);
3538 q_vector->rx.current_itr = q_vector->rx.target_itr;
3539
3540 q_vector->tx.next_update = jiffies + 1;
3541 q_vector->tx.target_itr =
3542 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3543 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3544 q_vector->tx.target_itr);
3545 q_vector->tx.current_itr = q_vector->tx.target_itr;
3546
3547 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3548 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3549
3550
3551 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3552 for (q = 0; q < q_vector->num_ringpairs; q++) {
3553 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3554 u32 val;
3555
3556 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3557 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3558 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3559 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3560 (I40E_QUEUE_TYPE_TX <<
3561 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3562
3563 wr32(hw, I40E_QINT_RQCTL(qp), val);
3564
3565 if (has_xdp) {
3566 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3567 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3568 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3569 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3570 (I40E_QUEUE_TYPE_TX <<
3571 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3572
3573 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3574 }
3575
3576 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3577 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3578 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3579 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3580 (I40E_QUEUE_TYPE_RX <<
3581 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3582
3583
3584 if (q == (q_vector->num_ringpairs - 1))
3585 val |= (I40E_QUEUE_END_OF_LIST <<
3586 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3587
3588 wr32(hw, I40E_QINT_TQCTL(qp), val);
3589 qp++;
3590 }
3591 }
3592
3593 i40e_flush(hw);
3594}
3595
3596
3597
3598
3599
3600static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3601{
3602 struct i40e_hw *hw = &pf->hw;
3603 u32 val;
3604
3605
3606 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3607 rd32(hw, I40E_PFINT_ICR0);
3608
3609 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3610 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3611 I40E_PFINT_ICR0_ENA_GRST_MASK |
3612 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3613 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3614 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3615 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3616 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3617
3618 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3619 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3620
3621 if (pf->flags & I40E_FLAG_PTP)
3622 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3623
3624 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3625
3626
3627 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3628 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3629
3630
3631 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3632}
3633
3634
3635
3636
3637
3638static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3639{
3640 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3641 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3642 struct i40e_pf *pf = vsi->back;
3643 struct i40e_hw *hw = &pf->hw;
3644 u32 val;
3645
3646
3647 q_vector->rx.next_update = jiffies + 1;
3648 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3649 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
3650 q_vector->rx.current_itr = q_vector->rx.target_itr;
3651 q_vector->tx.next_update = jiffies + 1;
3652 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3653 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
3654 q_vector->tx.current_itr = q_vector->tx.target_itr;
3655
3656 i40e_enable_misc_int_causes(pf);
3657
3658
3659 wr32(hw, I40E_PFINT_LNKLST0, 0);
3660
3661
3662 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3663 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3664 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3665 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3666
3667 wr32(hw, I40E_QINT_RQCTL(0), val);
3668
3669 if (i40e_enabled_xdp_vsi(vsi)) {
3670 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3671 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3672 (I40E_QUEUE_TYPE_TX
3673 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3674
3675 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3676 }
3677
3678 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3679 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3680 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3681
3682 wr32(hw, I40E_QINT_TQCTL(0), val);
3683 i40e_flush(hw);
3684}
3685
3686
3687
3688
3689
3690void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3691{
3692 struct i40e_hw *hw = &pf->hw;
3693
3694 wr32(hw, I40E_PFINT_DYN_CTL0,
3695 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3696 i40e_flush(hw);
3697}
3698
3699
3700
3701
3702
3703void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3704{
3705 struct i40e_hw *hw = &pf->hw;
3706 u32 val;
3707
3708 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3709 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3710 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3711
3712 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3713 i40e_flush(hw);
3714}
3715
3716
3717
3718
3719
3720
3721static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3722{
3723 struct i40e_q_vector *q_vector = data;
3724
3725 if (!q_vector->tx.ring && !q_vector->rx.ring)
3726 return IRQ_HANDLED;
3727
3728 napi_schedule_irqoff(&q_vector->napi);
3729
3730 return IRQ_HANDLED;
3731}
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3742 const cpumask_t *mask)
3743{
3744 struct i40e_q_vector *q_vector =
3745 container_of(notify, struct i40e_q_vector, affinity_notify);
3746
3747 cpumask_copy(&q_vector->affinity_mask, mask);
3748}
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758static void i40e_irq_affinity_release(struct kref *ref) {}
3759
3760
3761
3762
3763
3764
3765
3766
3767static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3768{
3769 int q_vectors = vsi->num_q_vectors;
3770 struct i40e_pf *pf = vsi->back;
3771 int base = vsi->base_vector;
3772 int rx_int_idx = 0;
3773 int tx_int_idx = 0;
3774 int vector, err;
3775 int irq_num;
3776 int cpu;
3777
3778 for (vector = 0; vector < q_vectors; vector++) {
3779 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3780
3781 irq_num = pf->msix_entries[base + vector].vector;
3782
3783 if (q_vector->tx.ring && q_vector->rx.ring) {
3784 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3785 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3786 tx_int_idx++;
3787 } else if (q_vector->rx.ring) {
3788 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3789 "%s-%s-%d", basename, "rx", rx_int_idx++);
3790 } else if (q_vector->tx.ring) {
3791 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3792 "%s-%s-%d", basename, "tx", tx_int_idx++);
3793 } else {
3794
3795 continue;
3796 }
3797 err = request_irq(irq_num,
3798 vsi->irq_handler,
3799 0,
3800 q_vector->name,
3801 q_vector);
3802 if (err) {
3803 dev_info(&pf->pdev->dev,
3804 "MSIX request_irq failed, error: %d\n", err);
3805 goto free_queue_irqs;
3806 }
3807
3808
3809 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3810 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3811 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3812
3813
3814
3815
3816
3817
3818 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3819 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3820 }
3821
3822 vsi->irqs_ready = true;
3823 return 0;
3824
3825free_queue_irqs:
3826 while (vector) {
3827 vector--;
3828 irq_num = pf->msix_entries[base + vector].vector;
3829 irq_set_affinity_notifier(irq_num, NULL);
3830 irq_set_affinity_hint(irq_num, NULL);
3831 free_irq(irq_num, &vsi->q_vectors[vector]);
3832 }
3833 return err;
3834}
3835
3836
3837
3838
3839
3840static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3841{
3842 struct i40e_pf *pf = vsi->back;
3843 struct i40e_hw *hw = &pf->hw;
3844 int base = vsi->base_vector;
3845 int i;
3846
3847
3848 for (i = 0; i < vsi->num_queue_pairs; i++) {
3849 u32 val;
3850
3851 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3852 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3853 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3854
3855 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3856 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3857 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3858
3859 if (!i40e_enabled_xdp_vsi(vsi))
3860 continue;
3861 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3862 }
3863
3864
3865 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3866 for (i = vsi->base_vector;
3867 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3868 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3869
3870 i40e_flush(hw);
3871 for (i = 0; i < vsi->num_q_vectors; i++)
3872 synchronize_irq(pf->msix_entries[i + base].vector);
3873 } else {
3874
3875 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3876 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3877 i40e_flush(hw);
3878 synchronize_irq(pf->pdev->irq);
3879 }
3880}
3881
3882
3883
3884
3885
3886static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3887{
3888 struct i40e_pf *pf = vsi->back;
3889 int i;
3890
3891 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3892 for (i = 0; i < vsi->num_q_vectors; i++)
3893 i40e_irq_dynamic_enable(vsi, i);
3894 } else {
3895 i40e_irq_dynamic_enable_icr0(pf);
3896 }
3897
3898 i40e_flush(&pf->hw);
3899 return 0;
3900}
3901
3902
3903
3904
3905
3906static void i40e_free_misc_vector(struct i40e_pf *pf)
3907{
3908
3909 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3910 i40e_flush(&pf->hw);
3911
3912 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3913 synchronize_irq(pf->msix_entries[0].vector);
3914 free_irq(pf->msix_entries[0].vector, pf);
3915 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3916 }
3917}
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928static irqreturn_t i40e_intr(int irq, void *data)
3929{
3930 struct i40e_pf *pf = (struct i40e_pf *)data;
3931 struct i40e_hw *hw = &pf->hw;
3932 irqreturn_t ret = IRQ_NONE;
3933 u32 icr0, icr0_remaining;
3934 u32 val, ena_mask;
3935
3936 icr0 = rd32(hw, I40E_PFINT_ICR0);
3937 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3938
3939
3940 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3941 goto enable_intr;
3942
3943
3944 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3945 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3946 pf->sw_int_count++;
3947
3948 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3949 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3950 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3951 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3952 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3953 }
3954
3955
3956 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3957 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3958 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3959
3960
3961
3962
3963
3964
3965
3966 if (!test_bit(__I40E_DOWN, pf->state))
3967 napi_schedule_irqoff(&q_vector->napi);
3968 }
3969
3970 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3971 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3972 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3973 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3974 }
3975
3976 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3977 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3978 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3979 }
3980
3981 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3982 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3983 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3984 }
3985
3986 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3987 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3988 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3989 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3990 val = rd32(hw, I40E_GLGEN_RSTAT);
3991 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3992 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3993 if (val == I40E_RESET_CORER) {
3994 pf->corer_count++;
3995 } else if (val == I40E_RESET_GLOBR) {
3996 pf->globr_count++;
3997 } else if (val == I40E_RESET_EMPR) {
3998 pf->empr_count++;
3999 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4000 }
4001 }
4002
4003 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4004 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4005 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4006 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4007 rd32(hw, I40E_PFHMC_ERRORINFO),
4008 rd32(hw, I40E_PFHMC_ERRORDATA));
4009 }
4010
4011 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4012 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4013
4014 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4015 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4016 i40e_ptp_tx_hwtstamp(pf);
4017 }
4018 }
4019
4020
4021
4022
4023
4024 icr0_remaining = icr0 & ena_mask;
4025 if (icr0_remaining) {
4026 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4027 icr0_remaining);
4028 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4029 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4030 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4031 dev_info(&pf->pdev->dev, "device will be reset\n");
4032 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4033 i40e_service_event_schedule(pf);
4034 }
4035 ena_mask &= ~icr0_remaining;
4036 }
4037 ret = IRQ_HANDLED;
4038
4039enable_intr:
4040
4041 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4042 if (!test_bit(__I40E_DOWN, pf->state) ||
4043 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4044 i40e_service_event_schedule(pf);
4045 i40e_irq_dynamic_enable_icr0(pf);
4046 }
4047
4048 return ret;
4049}
4050
4051
4052
4053
4054
4055
4056
4057
4058static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4059{
4060 struct i40e_vsi *vsi = tx_ring->vsi;
4061 u16 i = tx_ring->next_to_clean;
4062 struct i40e_tx_buffer *tx_buf;
4063 struct i40e_tx_desc *tx_desc;
4064
4065 tx_buf = &tx_ring->tx_bi[i];
4066 tx_desc = I40E_TX_DESC(tx_ring, i);
4067 i -= tx_ring->count;
4068
4069 do {
4070 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4071
4072
4073 if (!eop_desc)
4074 break;
4075
4076
4077 smp_rmb();
4078
4079
4080 if (!(eop_desc->cmd_type_offset_bsz &
4081 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4082 break;
4083
4084
4085 tx_buf->next_to_watch = NULL;
4086
4087 tx_desc->buffer_addr = 0;
4088 tx_desc->cmd_type_offset_bsz = 0;
4089
4090 tx_buf++;
4091 tx_desc++;
4092 i++;
4093 if (unlikely(!i)) {
4094 i -= tx_ring->count;
4095 tx_buf = tx_ring->tx_bi;
4096 tx_desc = I40E_TX_DESC(tx_ring, 0);
4097 }
4098
4099 dma_unmap_single(tx_ring->dev,
4100 dma_unmap_addr(tx_buf, dma),
4101 dma_unmap_len(tx_buf, len),
4102 DMA_TO_DEVICE);
4103 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4104 kfree(tx_buf->raw_buf);
4105
4106 tx_buf->raw_buf = NULL;
4107 tx_buf->tx_flags = 0;
4108 tx_buf->next_to_watch = NULL;
4109 dma_unmap_len_set(tx_buf, len, 0);
4110 tx_desc->buffer_addr = 0;
4111 tx_desc->cmd_type_offset_bsz = 0;
4112
4113
4114 tx_buf++;
4115 tx_desc++;
4116 i++;
4117 if (unlikely(!i)) {
4118 i -= tx_ring->count;
4119 tx_buf = tx_ring->tx_bi;
4120 tx_desc = I40E_TX_DESC(tx_ring, 0);
4121 }
4122
4123
4124 budget--;
4125 } while (likely(budget));
4126
4127 i += tx_ring->count;
4128 tx_ring->next_to_clean = i;
4129
4130 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4131 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4132
4133 return budget > 0;
4134}
4135
4136
4137
4138
4139
4140
4141static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4142{
4143 struct i40e_q_vector *q_vector = data;
4144 struct i40e_vsi *vsi;
4145
4146 if (!q_vector->tx.ring)
4147 return IRQ_HANDLED;
4148
4149 vsi = q_vector->tx.ring->vsi;
4150 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4151
4152 return IRQ_HANDLED;
4153}
4154
4155
4156
4157
4158
4159
4160
4161static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4162{
4163 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4164 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4165 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4166
4167 tx_ring->q_vector = q_vector;
4168 tx_ring->next = q_vector->tx.ring;
4169 q_vector->tx.ring = tx_ring;
4170 q_vector->tx.count++;
4171
4172
4173 if (i40e_enabled_xdp_vsi(vsi)) {
4174 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4175
4176 xdp_ring->q_vector = q_vector;
4177 xdp_ring->next = q_vector->tx.ring;
4178 q_vector->tx.ring = xdp_ring;
4179 q_vector->tx.count++;
4180 }
4181
4182 rx_ring->q_vector = q_vector;
4183 rx_ring->next = q_vector->rx.ring;
4184 q_vector->rx.ring = rx_ring;
4185 q_vector->rx.count++;
4186}
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4198{
4199 int qp_remaining = vsi->num_queue_pairs;
4200 int q_vectors = vsi->num_q_vectors;
4201 int num_ringpairs;
4202 int v_start = 0;
4203 int qp_idx = 0;
4204
4205
4206
4207
4208
4209
4210
4211
4212 for (; v_start < q_vectors; v_start++) {
4213 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4214
4215 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4216
4217 q_vector->num_ringpairs = num_ringpairs;
4218 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4219
4220 q_vector->rx.count = 0;
4221 q_vector->tx.count = 0;
4222 q_vector->rx.ring = NULL;
4223 q_vector->tx.ring = NULL;
4224
4225 while (num_ringpairs--) {
4226 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4227 qp_idx++;
4228 qp_remaining--;
4229 }
4230 }
4231}
4232
4233
4234
4235
4236
4237
4238static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4239{
4240 struct i40e_pf *pf = vsi->back;
4241 int err;
4242
4243 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4244 err = i40e_vsi_request_irq_msix(vsi, basename);
4245 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4246 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4247 pf->int_name, pf);
4248 else
4249 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4250 pf->int_name, pf);
4251
4252 if (err)
4253 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4254
4255 return err;
4256}
4257
4258#ifdef CONFIG_NET_POLL_CONTROLLER
4259
4260
4261
4262
4263
4264
4265
4266static void i40e_netpoll(struct net_device *netdev)
4267{
4268 struct i40e_netdev_priv *np = netdev_priv(netdev);
4269 struct i40e_vsi *vsi = np->vsi;
4270 struct i40e_pf *pf = vsi->back;
4271 int i;
4272
4273
4274 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4275 return;
4276
4277 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4278 for (i = 0; i < vsi->num_q_vectors; i++)
4279 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4280 } else {
4281 i40e_intr(pf->pdev->irq, netdev);
4282 }
4283}
4284#endif
4285
4286#define I40E_QTX_ENA_WAIT_COUNT 50
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4300{
4301 int i;
4302 u32 tx_reg;
4303
4304 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4305 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4306 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4307 break;
4308
4309 usleep_range(10, 20);
4310 }
4311 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4312 return -ETIMEDOUT;
4313
4314 return 0;
4315}
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4328{
4329 struct i40e_hw *hw = &pf->hw;
4330 u32 tx_reg;
4331 int i;
4332
4333
4334 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4335 if (!enable)
4336 usleep_range(10, 20);
4337
4338 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4339 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4340 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4341 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4342 break;
4343 usleep_range(1000, 2000);
4344 }
4345
4346
4347 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4348 return;
4349
4350
4351 if (enable) {
4352 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4353 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4354 } else {
4355 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4356 }
4357
4358 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4359}
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4370 bool is_xdp, bool enable)
4371{
4372 int ret;
4373
4374 i40e_control_tx_q(pf, pf_q, enable);
4375
4376
4377 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4378 if (ret) {
4379 dev_info(&pf->pdev->dev,
4380 "VSI seid %d %sTx ring %d %sable timeout\n",
4381 seid, (is_xdp ? "XDP " : ""), pf_q,
4382 (enable ? "en" : "dis"));
4383 }
4384
4385 return ret;
4386}
4387
4388
4389
4390
4391
4392
4393static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4394{
4395 struct i40e_pf *pf = vsi->back;
4396 int i, pf_q, ret = 0;
4397
4398 pf_q = vsi->base_queue;
4399 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4400 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4401 pf_q,
4402 false , enable);
4403 if (ret)
4404 break;
4405
4406 if (!i40e_enabled_xdp_vsi(vsi))
4407 continue;
4408
4409 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4410 pf_q + vsi->alloc_queue_pairs,
4411 true , enable);
4412 if (ret)
4413 break;
4414 }
4415 return ret;
4416}
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4430{
4431 int i;
4432 u32 rx_reg;
4433
4434 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4435 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4436 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4437 break;
4438
4439 usleep_range(10, 20);
4440 }
4441 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4442 return -ETIMEDOUT;
4443
4444 return 0;
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4458{
4459 struct i40e_hw *hw = &pf->hw;
4460 u32 rx_reg;
4461 int i;
4462
4463 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4464 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4465 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4466 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4467 break;
4468 usleep_range(1000, 2000);
4469 }
4470
4471
4472 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4473 return;
4474
4475
4476 if (enable)
4477 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4478 else
4479 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4480
4481 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4482}
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4495{
4496 int ret = 0;
4497
4498 i40e_control_rx_q(pf, pf_q, enable);
4499
4500
4501 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4502 if (ret)
4503 return ret;
4504
4505 return ret;
4506}
4507
4508
4509
4510
4511
4512
4513static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4514{
4515 struct i40e_pf *pf = vsi->back;
4516 int i, pf_q, ret = 0;
4517
4518 pf_q = vsi->base_queue;
4519 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4520 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4521 if (ret) {
4522 dev_info(&pf->pdev->dev,
4523 "VSI seid %d Rx ring %d %sable timeout\n",
4524 vsi->seid, pf_q, (enable ? "en" : "dis"));
4525 break;
4526 }
4527 }
4528
4529
4530
4531
4532 if (!enable)
4533 mdelay(50);
4534
4535 return ret;
4536}
4537
4538
4539
4540
4541
4542int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4543{
4544 int ret = 0;
4545
4546
4547 ret = i40e_vsi_control_rx(vsi, true);
4548 if (ret)
4549 return ret;
4550 ret = i40e_vsi_control_tx(vsi, true);
4551
4552 return ret;
4553}
4554
4555
4556
4557
4558
4559void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4560{
4561
4562 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4563 return i40e_vsi_stop_rings_no_wait(vsi);
4564
4565
4566
4567
4568 i40e_vsi_control_tx(vsi, false);
4569 i40e_vsi_control_rx(vsi, false);
4570}
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4584{
4585 struct i40e_pf *pf = vsi->back;
4586 int i, pf_q;
4587
4588 pf_q = vsi->base_queue;
4589 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4590 i40e_control_tx_q(pf, pf_q, false);
4591 i40e_control_rx_q(pf, pf_q, false);
4592 }
4593}
4594
4595
4596
4597
4598
4599static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4600{
4601 struct i40e_pf *pf = vsi->back;
4602 struct i40e_hw *hw = &pf->hw;
4603 int base = vsi->base_vector;
4604 u32 val, qp;
4605 int i;
4606
4607 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4608 if (!vsi->q_vectors)
4609 return;
4610
4611 if (!vsi->irqs_ready)
4612 return;
4613
4614 vsi->irqs_ready = false;
4615 for (i = 0; i < vsi->num_q_vectors; i++) {
4616 int irq_num;
4617 u16 vector;
4618
4619 vector = i + base;
4620 irq_num = pf->msix_entries[vector].vector;
4621
4622
4623 if (!vsi->q_vectors[i] ||
4624 !vsi->q_vectors[i]->num_ringpairs)
4625 continue;
4626
4627
4628 irq_set_affinity_notifier(irq_num, NULL);
4629
4630 irq_set_affinity_hint(irq_num, NULL);
4631 synchronize_irq(irq_num);
4632 free_irq(irq_num, vsi->q_vectors[i]);
4633
4634
4635
4636
4637
4638
4639
4640
4641 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4642 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4643 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4644 val |= I40E_QUEUE_END_OF_LIST
4645 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4646 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4647
4648 while (qp != I40E_QUEUE_END_OF_LIST) {
4649 u32 next;
4650
4651 val = rd32(hw, I40E_QINT_RQCTL(qp));
4652
4653 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4654 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4655 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4656 I40E_QINT_RQCTL_INTEVENT_MASK);
4657
4658 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4659 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4660
4661 wr32(hw, I40E_QINT_RQCTL(qp), val);
4662
4663 val = rd32(hw, I40E_QINT_TQCTL(qp));
4664
4665 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4666 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4667
4668 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4669 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4670 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4671 I40E_QINT_TQCTL_INTEVENT_MASK);
4672
4673 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4674 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4675
4676 wr32(hw, I40E_QINT_TQCTL(qp), val);
4677 qp = next;
4678 }
4679 }
4680 } else {
4681 free_irq(pf->pdev->irq, pf);
4682
4683 val = rd32(hw, I40E_PFINT_LNKLST0);
4684 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4685 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4686 val |= I40E_QUEUE_END_OF_LIST
4687 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4688 wr32(hw, I40E_PFINT_LNKLST0, val);
4689
4690 val = rd32(hw, I40E_QINT_RQCTL(qp));
4691 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4692 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4693 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4694 I40E_QINT_RQCTL_INTEVENT_MASK);
4695
4696 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4697 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4698
4699 wr32(hw, I40E_QINT_RQCTL(qp), val);
4700
4701 val = rd32(hw, I40E_QINT_TQCTL(qp));
4702
4703 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4704 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4705 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4706 I40E_QINT_TQCTL_INTEVENT_MASK);
4707
4708 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4709 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4710
4711 wr32(hw, I40E_QINT_TQCTL(qp), val);
4712 }
4713}
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4725{
4726 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4727 struct i40e_ring *ring;
4728
4729 if (!q_vector)
4730 return;
4731
4732
4733 i40e_for_each_ring(ring, q_vector->tx)
4734 ring->q_vector = NULL;
4735
4736 i40e_for_each_ring(ring, q_vector->rx)
4737 ring->q_vector = NULL;
4738
4739
4740 if (vsi->netdev)
4741 netif_napi_del(&q_vector->napi);
4742
4743 vsi->q_vectors[v_idx] = NULL;
4744
4745 kfree_rcu(q_vector, rcu);
4746}
4747
4748
4749
4750
4751
4752
4753
4754
4755static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4756{
4757 int v_idx;
4758
4759 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4760 i40e_free_q_vector(vsi, v_idx);
4761}
4762
4763
4764
4765
4766
4767static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4768{
4769
4770 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4771 pci_disable_msix(pf->pdev);
4772 kfree(pf->msix_entries);
4773 pf->msix_entries = NULL;
4774<