1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <linux/bitfield.h>
45#include <linux/bpf.h>
46#include <linux/bpf_trace.h>
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/fs.h>
51#include <linux/netdevice.h>
52#include <linux/etherdevice.h>
53#include <linux/interrupt.h>
54#include <linux/ip.h>
55#include <linux/ipv6.h>
56#include <linux/page_ref.h>
57#include <linux/pci.h>
58#include <linux/pci_regs.h>
59#include <linux/msi.h>
60#include <linux/ethtool.h>
61#include <linux/log2.h>
62#include <linux/if_vlan.h>
63#include <linux/random.h>
64#include <linux/vmalloc.h>
65#include <linux/ktime.h>
66
67#include <net/switchdev.h>
68#include <net/vxlan.h>
69
70#include "nfpcore/nfp_nsp.h"
71#include "nfp_app.h"
72#include "nfp_net_ctrl.h"
73#include "nfp_net.h"
74#include "nfp_net_sriov.h"
75#include "nfp_port.h"
76
77
78
79
80
81
82void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
83 void __iomem *ctrl_bar)
84{
85 u32 reg;
86
87 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
88 put_unaligned_le32(reg, fw_ver);
89}
90
91static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
92{
93 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
94 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
95 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
96}
97
98static void
99nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
100{
101 dma_sync_single_for_device(dp->dev, dma_addr,
102 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
103 dp->rx_dma_dir);
104}
105
106static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
107{
108 dma_unmap_single_attrs(dp->dev, dma_addr,
109 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
110 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
111}
112
113static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
114 unsigned int len)
115{
116 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
117 len, dp->rx_dma_dir);
118}
119
120
121
122
123
124
125
126static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
127{
128 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
129
130 nn_pci_flush(nn);
131 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
132}
133
134
135static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
136{
137 update |= nn->reconfig_posted;
138 nn->reconfig_posted = 0;
139
140 nfp_net_reconfig_start(nn, update);
141
142 nn->reconfig_timer_active = true;
143 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
144}
145
146static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
147{
148 u32 reg;
149
150 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
151 if (reg == 0)
152 return true;
153 if (reg & NFP_NET_CFG_UPDATE_ERR) {
154 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
155 return true;
156 } else if (last_check) {
157 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
158 return true;
159 }
160
161 return false;
162}
163
164static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
165{
166 bool timed_out = false;
167
168
169 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
170 msleep(1);
171 timed_out = time_is_before_eq_jiffies(deadline);
172 }
173
174 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
175 return -EIO;
176
177 return timed_out ? -EIO : 0;
178}
179
180static void nfp_net_reconfig_timer(struct timer_list *t)
181{
182 struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
183
184 spin_lock_bh(&nn->reconfig_lock);
185
186 nn->reconfig_timer_active = false;
187
188
189 if (nn->reconfig_sync_present)
190 goto done;
191
192
193 nfp_net_reconfig_check_done(nn, true);
194
195 if (nn->reconfig_posted)
196 nfp_net_reconfig_start_async(nn, 0);
197done:
198 spin_unlock_bh(&nn->reconfig_lock);
199}
200
201
202
203
204
205
206
207
208
209
210static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
211{
212 spin_lock_bh(&nn->reconfig_lock);
213
214
215 if (nn->reconfig_sync_present) {
216 nn->reconfig_posted |= update;
217 goto done;
218 }
219
220
221 if (!nn->reconfig_timer_active ||
222 nfp_net_reconfig_check_done(nn, false))
223 nfp_net_reconfig_start_async(nn, update);
224 else
225 nn->reconfig_posted |= update;
226done:
227 spin_unlock_bh(&nn->reconfig_lock);
228}
229
230
231
232
233
234
235
236
237
238
239
240
241int nfp_net_reconfig(struct nfp_net *nn, u32 update)
242{
243 bool cancelled_timer = false;
244 u32 pre_posted_requests;
245 int ret;
246
247 spin_lock_bh(&nn->reconfig_lock);
248
249 nn->reconfig_sync_present = true;
250
251 if (nn->reconfig_timer_active) {
252 del_timer(&nn->reconfig_timer);
253 nn->reconfig_timer_active = false;
254 cancelled_timer = true;
255 }
256 pre_posted_requests = nn->reconfig_posted;
257 nn->reconfig_posted = 0;
258
259 spin_unlock_bh(&nn->reconfig_lock);
260
261 if (cancelled_timer)
262 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
263
264
265 if (pre_posted_requests) {
266 nfp_net_reconfig_start(nn, pre_posted_requests);
267 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
268 }
269
270 nfp_net_reconfig_start(nn, update);
271 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
272
273 spin_lock_bh(&nn->reconfig_lock);
274
275 if (nn->reconfig_posted)
276 nfp_net_reconfig_start_async(nn, 0);
277
278 nn->reconfig_sync_present = false;
279
280 spin_unlock_bh(&nn->reconfig_lock);
281
282 return ret;
283}
284
285
286
287
288
289
290
291
292
293
294static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
295{
296 u32 mbox = nn->tlv_caps.mbox_off;
297 int ret;
298
299 if (!nfp_net_has_mbox(&nn->tlv_caps)) {
300 nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
301 return -EIO;
302 }
303
304 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
305
306 ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
307 if (ret) {
308 nn_err(nn, "Mailbox update error\n");
309 return ret;
310 }
311
312 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
313}
314
315
316
317
318
319
320
321
322
323
324
325static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
326{
327 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
328 nn_pci_flush(nn);
329}
330
331
332
333
334
335
336
337
338
339
340unsigned int
341nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
342 unsigned int min_irqs, unsigned int wanted_irqs)
343{
344 unsigned int i;
345 int got_irqs;
346
347 for (i = 0; i < wanted_irqs; i++)
348 irq_entries[i].entry = i;
349
350 got_irqs = pci_enable_msix_range(pdev, irq_entries,
351 min_irqs, wanted_irqs);
352 if (got_irqs < 0) {
353 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
354 min_irqs, wanted_irqs, got_irqs);
355 return 0;
356 }
357
358 if (got_irqs < wanted_irqs)
359 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
360 wanted_irqs, got_irqs);
361
362 return got_irqs;
363}
364
365
366
367
368
369
370
371
372
373
374void
375nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
376 unsigned int n)
377{
378 struct nfp_net_dp *dp = &nn->dp;
379
380 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
381 dp->num_r_vecs = nn->max_r_vecs;
382
383 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
384
385 if (dp->num_rx_rings > dp->num_r_vecs ||
386 dp->num_tx_rings > dp->num_r_vecs)
387 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
388 dp->num_rx_rings, dp->num_tx_rings,
389 dp->num_r_vecs);
390
391 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
392 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
393 dp->num_stack_tx_rings = dp->num_tx_rings;
394}
395
396
397
398
399
400
401
402void nfp_net_irqs_disable(struct pci_dev *pdev)
403{
404 pci_disable_msix(pdev);
405}
406
407
408
409
410
411
412
413
414static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
415{
416 struct nfp_net_r_vector *r_vec = data;
417
418 napi_schedule_irqoff(&r_vec->napi);
419
420
421
422
423
424 return IRQ_HANDLED;
425}
426
427static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
428{
429 struct nfp_net_r_vector *r_vec = data;
430
431 tasklet_schedule(&r_vec->tasklet);
432
433 return IRQ_HANDLED;
434}
435
436
437
438
439
440static void nfp_net_read_link_status(struct nfp_net *nn)
441{
442 unsigned long flags;
443 bool link_up;
444 u32 sts;
445
446 spin_lock_irqsave(&nn->link_status_lock, flags);
447
448 sts = nn_readl(nn, NFP_NET_CFG_STS);
449 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
450
451 if (nn->link_up == link_up)
452 goto out;
453
454 nn->link_up = link_up;
455 if (nn->port)
456 set_bit(NFP_PORT_CHANGED, &nn->port->flags);
457
458 if (nn->link_up) {
459 netif_carrier_on(nn->dp.netdev);
460 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
461 } else {
462 netif_carrier_off(nn->dp.netdev);
463 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
464 }
465out:
466 spin_unlock_irqrestore(&nn->link_status_lock, flags);
467}
468
469
470
471
472
473
474
475
476static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
477{
478 struct nfp_net *nn = data;
479 struct msix_entry *entry;
480
481 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
482
483 nfp_net_read_link_status(nn);
484
485 nfp_net_irq_unmask(nn, entry->entry);
486
487 return IRQ_HANDLED;
488}
489
490
491
492
493
494
495
496
497static irqreturn_t nfp_net_irq_exn(int irq, void *data)
498{
499 struct nfp_net *nn = data;
500
501 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
502
503 return IRQ_HANDLED;
504}
505
506
507
508
509
510
511
512
513static void
514nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
515 struct nfp_net_r_vector *r_vec, unsigned int idx,
516 bool is_xdp)
517{
518 struct nfp_net *nn = r_vec->nfp_net;
519
520 tx_ring->idx = idx;
521 tx_ring->r_vec = r_vec;
522 tx_ring->is_xdp = is_xdp;
523 u64_stats_init(&tx_ring->r_vec->tx_sync);
524
525 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
526 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
527}
528
529
530
531
532
533
534
535static void
536nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
537 struct nfp_net_r_vector *r_vec, unsigned int idx)
538{
539 struct nfp_net *nn = r_vec->nfp_net;
540
541 rx_ring->idx = idx;
542 rx_ring->r_vec = r_vec;
543 u64_stats_init(&rx_ring->r_vec->rx_sync);
544
545 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
546 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
547}
548
549
550
551
552
553
554
555
556
557
558
559static int
560nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
561 const char *format, char *name, size_t name_sz,
562 unsigned int vector_idx, irq_handler_t handler)
563{
564 struct msix_entry *entry;
565 int err;
566
567 entry = &nn->irq_entries[vector_idx];
568
569 snprintf(name, name_sz, format, nfp_net_name(nn));
570 err = request_irq(entry->vector, handler, 0, name, nn);
571 if (err) {
572 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
573 entry->vector, err);
574 return err;
575 }
576 nn_writeb(nn, ctrl_offset, entry->entry);
577 nfp_net_irq_unmask(nn, entry->entry);
578
579 return 0;
580}
581
582
583
584
585
586
587
588static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
589 unsigned int vector_idx)
590{
591 nn_writeb(nn, ctrl_offset, 0xff);
592 nn_pci_flush(nn);
593 free_irq(nn->irq_entries[vector_idx].vector, nn);
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
621{
622 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
623}
624
625
626static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
627{
628 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
629}
630
631static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
632{
633 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
634}
635
636
637
638
639
640
641
642
643
644
645static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
646 struct nfp_net_tx_ring *tx_ring)
647{
648 netif_tx_stop_queue(nd_q);
649
650
651 smp_mb();
652 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
653 netif_tx_start_queue(nd_q);
654}
655
656
657
658
659
660
661
662
663
664
665
666static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
667 struct nfp_net_tx_buf *txbuf,
668 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
669{
670 u32 hdrlen;
671 u16 mss;
672
673 if (!skb_is_gso(skb))
674 return;
675
676 if (!skb->encapsulation) {
677 txd->l3_offset = skb_network_offset(skb);
678 txd->l4_offset = skb_transport_offset(skb);
679 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
680 } else {
681 txd->l3_offset = skb_inner_network_offset(skb);
682 txd->l4_offset = skb_inner_transport_offset(skb);
683 hdrlen = skb_inner_transport_header(skb) - skb->data +
684 inner_tcp_hdrlen(skb);
685 }
686
687 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
688 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
689
690 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
691 txd->lso_hdrlen = hdrlen;
692 txd->mss = cpu_to_le16(mss);
693 txd->flags |= PCIE_DESC_TX_LSO;
694
695 u64_stats_update_begin(&r_vec->tx_sync);
696 r_vec->tx_lso++;
697 u64_stats_update_end(&r_vec->tx_sync);
698}
699
700
701
702
703
704
705
706
707
708
709
710
711static void nfp_net_tx_csum(struct nfp_net_dp *dp,
712 struct nfp_net_r_vector *r_vec,
713 struct nfp_net_tx_buf *txbuf,
714 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
715{
716 struct ipv6hdr *ipv6h;
717 struct iphdr *iph;
718 u8 l4_hdr;
719
720 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
721 return;
722
723 if (skb->ip_summed != CHECKSUM_PARTIAL)
724 return;
725
726 txd->flags |= PCIE_DESC_TX_CSUM;
727 if (skb->encapsulation)
728 txd->flags |= PCIE_DESC_TX_ENCAP;
729
730 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
731 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
732
733 if (iph->version == 4) {
734 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
735 l4_hdr = iph->protocol;
736 } else if (ipv6h->version == 6) {
737 l4_hdr = ipv6h->nexthdr;
738 } else {
739 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
740 return;
741 }
742
743 switch (l4_hdr) {
744 case IPPROTO_TCP:
745 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
746 break;
747 case IPPROTO_UDP:
748 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
749 break;
750 default:
751 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
752 return;
753 }
754
755 u64_stats_update_begin(&r_vec->tx_sync);
756 if (skb->encapsulation)
757 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
758 else
759 r_vec->hw_csum_tx += txbuf->pkt_cnt;
760 u64_stats_update_end(&r_vec->tx_sync);
761}
762
763static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
764{
765 wmb();
766 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
767 tx_ring->wr_ptr_add = 0;
768}
769
770static int nfp_net_prep_port_id(struct sk_buff *skb)
771{
772 struct metadata_dst *md_dst = skb_metadata_dst(skb);
773 unsigned char *data;
774
775 if (likely(!md_dst))
776 return 0;
777 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
778 return 0;
779
780 if (unlikely(skb_cow_head(skb, 8)))
781 return -ENOMEM;
782
783 data = skb_push(skb, 8);
784 put_unaligned_be32(NFP_NET_META_PORTID, data);
785 put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
786
787 return 8;
788}
789
790
791
792
793
794
795
796
797static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
798{
799 struct nfp_net *nn = netdev_priv(netdev);
800 const struct skb_frag_struct *frag;
801 struct nfp_net_tx_desc *txd, txdg;
802 int f, nr_frags, wr_idx, md_bytes;
803 struct nfp_net_tx_ring *tx_ring;
804 struct nfp_net_r_vector *r_vec;
805 struct nfp_net_tx_buf *txbuf;
806 struct netdev_queue *nd_q;
807 struct nfp_net_dp *dp;
808 dma_addr_t dma_addr;
809 unsigned int fsize;
810 u16 qidx;
811
812 dp = &nn->dp;
813 qidx = skb_get_queue_mapping(skb);
814 tx_ring = &dp->tx_rings[qidx];
815 r_vec = tx_ring->r_vec;
816 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
817
818 nr_frags = skb_shinfo(skb)->nr_frags;
819
820 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
821 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
822 qidx, tx_ring->wr_p, tx_ring->rd_p);
823 netif_tx_stop_queue(nd_q);
824 nfp_net_tx_xmit_more_flush(tx_ring);
825 u64_stats_update_begin(&r_vec->tx_sync);
826 r_vec->tx_busy++;
827 u64_stats_update_end(&r_vec->tx_sync);
828 return NETDEV_TX_BUSY;
829 }
830
831 md_bytes = nfp_net_prep_port_id(skb);
832 if (unlikely(md_bytes < 0)) {
833 nfp_net_tx_xmit_more_flush(tx_ring);
834 dev_kfree_skb_any(skb);
835 return NETDEV_TX_OK;
836 }
837
838
839 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
840 DMA_TO_DEVICE);
841 if (dma_mapping_error(dp->dev, dma_addr))
842 goto err_free;
843
844 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
845
846
847 txbuf = &tx_ring->txbufs[wr_idx];
848 txbuf->skb = skb;
849 txbuf->dma_addr = dma_addr;
850 txbuf->fidx = -1;
851 txbuf->pkt_cnt = 1;
852 txbuf->real_len = skb->len;
853
854
855 txd = &tx_ring->txds[wr_idx];
856 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
857 txd->dma_len = cpu_to_le16(skb_headlen(skb));
858 nfp_desc_set_dma_addr(txd, dma_addr);
859 txd->data_len = cpu_to_le16(skb->len);
860
861 txd->flags = 0;
862 txd->mss = 0;
863 txd->lso_hdrlen = 0;
864
865
866 nfp_net_tx_tso(r_vec, txbuf, txd, skb);
867 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
868 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
869 txd->flags |= PCIE_DESC_TX_VLAN;
870 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
871 }
872
873
874 if (nr_frags > 0) {
875
876 txdg = *txd;
877
878 for (f = 0; f < nr_frags; f++) {
879 frag = &skb_shinfo(skb)->frags[f];
880 fsize = skb_frag_size(frag);
881
882 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
883 fsize, DMA_TO_DEVICE);
884 if (dma_mapping_error(dp->dev, dma_addr))
885 goto err_unmap;
886
887 wr_idx = D_IDX(tx_ring, wr_idx + 1);
888 tx_ring->txbufs[wr_idx].skb = skb;
889 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
890 tx_ring->txbufs[wr_idx].fidx = f;
891
892 txd = &tx_ring->txds[wr_idx];
893 *txd = txdg;
894 txd->dma_len = cpu_to_le16(fsize);
895 nfp_desc_set_dma_addr(txd, dma_addr);
896 txd->offset_eop |=
897 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
898 }
899
900 u64_stats_update_begin(&r_vec->tx_sync);
901 r_vec->tx_gather++;
902 u64_stats_update_end(&r_vec->tx_sync);
903 }
904
905 netdev_tx_sent_queue(nd_q, txbuf->real_len);
906
907 skb_tx_timestamp(skb);
908
909 tx_ring->wr_p += nr_frags + 1;
910 if (nfp_net_tx_ring_should_stop(tx_ring))
911 nfp_net_tx_ring_stop(nd_q, tx_ring);
912
913 tx_ring->wr_ptr_add += nr_frags + 1;
914 if (!skb->xmit_more || netif_xmit_stopped(nd_q))
915 nfp_net_tx_xmit_more_flush(tx_ring);
916
917 return NETDEV_TX_OK;
918
919err_unmap:
920 while (--f >= 0) {
921 frag = &skb_shinfo(skb)->frags[f];
922 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
923 skb_frag_size(frag), DMA_TO_DEVICE);
924 tx_ring->txbufs[wr_idx].skb = NULL;
925 tx_ring->txbufs[wr_idx].dma_addr = 0;
926 tx_ring->txbufs[wr_idx].fidx = -2;
927 wr_idx = wr_idx - 1;
928 if (wr_idx < 0)
929 wr_idx += tx_ring->cnt;
930 }
931 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
932 skb_headlen(skb), DMA_TO_DEVICE);
933 tx_ring->txbufs[wr_idx].skb = NULL;
934 tx_ring->txbufs[wr_idx].dma_addr = 0;
935 tx_ring->txbufs[wr_idx].fidx = -2;
936err_free:
937 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
938 nfp_net_tx_xmit_more_flush(tx_ring);
939 u64_stats_update_begin(&r_vec->tx_sync);
940 r_vec->tx_errors++;
941 u64_stats_update_end(&r_vec->tx_sync);
942 dev_kfree_skb_any(skb);
943 return NETDEV_TX_OK;
944}
945
946
947
948
949
950
951
952static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
953{
954 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
955 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
956 const struct skb_frag_struct *frag;
957 struct netdev_queue *nd_q;
958 u32 done_pkts = 0, done_bytes = 0;
959 struct sk_buff *skb;
960 int todo, nr_frags;
961 u32 qcp_rd_p;
962 int fidx;
963 int idx;
964
965 if (tx_ring->wr_p == tx_ring->rd_p)
966 return;
967
968
969 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
970
971 if (qcp_rd_p == tx_ring->qcp_rd_p)
972 return;
973
974 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
975
976 while (todo--) {
977 idx = D_IDX(tx_ring, tx_ring->rd_p++);
978
979 skb = tx_ring->txbufs[idx].skb;
980 if (!skb)
981 continue;
982
983 nr_frags = skb_shinfo(skb)->nr_frags;
984 fidx = tx_ring->txbufs[idx].fidx;
985
986 if (fidx == -1) {
987
988 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
989 skb_headlen(skb), DMA_TO_DEVICE);
990
991 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
992 done_bytes += tx_ring->txbufs[idx].real_len;
993 } else {
994
995 frag = &skb_shinfo(skb)->frags[fidx];
996 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
997 skb_frag_size(frag), DMA_TO_DEVICE);
998 }
999
1000
1001 if (fidx == nr_frags - 1)
1002 dev_consume_skb_any(skb);
1003
1004 tx_ring->txbufs[idx].dma_addr = 0;
1005 tx_ring->txbufs[idx].skb = NULL;
1006 tx_ring->txbufs[idx].fidx = -2;
1007 }
1008
1009 tx_ring->qcp_rd_p = qcp_rd_p;
1010
1011 u64_stats_update_begin(&r_vec->tx_sync);
1012 r_vec->tx_bytes += done_bytes;
1013 r_vec->tx_pkts += done_pkts;
1014 u64_stats_update_end(&r_vec->tx_sync);
1015
1016 if (!dp->netdev)
1017 return;
1018
1019 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1020 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
1021 if (nfp_net_tx_ring_should_wake(tx_ring)) {
1022
1023 smp_mb();
1024
1025 if (unlikely(netif_tx_queue_stopped(nd_q)))
1026 netif_tx_wake_queue(nd_q);
1027 }
1028
1029 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1030 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1031 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1032}
1033
1034static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1035{
1036 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1037 u32 done_pkts = 0, done_bytes = 0;
1038 bool done_all;
1039 int idx, todo;
1040 u32 qcp_rd_p;
1041
1042
1043 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1044
1045 if (qcp_rd_p == tx_ring->qcp_rd_p)
1046 return true;
1047
1048 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1049
1050 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
1051 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
1052
1053 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
1054
1055 done_pkts = todo;
1056 while (todo--) {
1057 idx = D_IDX(tx_ring, tx_ring->rd_p);
1058 tx_ring->rd_p++;
1059
1060 done_bytes += tx_ring->txbufs[idx].real_len;
1061 }
1062
1063 u64_stats_update_begin(&r_vec->tx_sync);
1064 r_vec->tx_bytes += done_bytes;
1065 r_vec->tx_pkts += done_pkts;
1066 u64_stats_update_end(&r_vec->tx_sync);
1067
1068 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1069 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1070 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1071
1072 return done_all;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082static void
1083nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1084{
1085 const struct skb_frag_struct *frag;
1086 struct netdev_queue *nd_q;
1087
1088 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
1089 struct nfp_net_tx_buf *tx_buf;
1090 struct sk_buff *skb;
1091 int idx, nr_frags;
1092
1093 idx = D_IDX(tx_ring, tx_ring->rd_p);
1094 tx_buf = &tx_ring->txbufs[idx];
1095
1096 skb = tx_ring->txbufs[idx].skb;
1097 nr_frags = skb_shinfo(skb)->nr_frags;
1098
1099 if (tx_buf->fidx == -1) {
1100
1101 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1102 skb_headlen(skb), DMA_TO_DEVICE);
1103 } else {
1104
1105 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1106 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1107 skb_frag_size(frag), DMA_TO_DEVICE);
1108 }
1109
1110
1111 if (tx_buf->fidx == nr_frags - 1)
1112 dev_kfree_skb_any(skb);
1113
1114 tx_buf->dma_addr = 0;
1115 tx_buf->skb = NULL;
1116 tx_buf->fidx = -2;
1117
1118 tx_ring->qcp_rd_p++;
1119 tx_ring->rd_p++;
1120 }
1121
1122 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1123 tx_ring->wr_p = 0;
1124 tx_ring->rd_p = 0;
1125 tx_ring->qcp_rd_p = 0;
1126 tx_ring->wr_ptr_add = 0;
1127
1128 if (tx_ring->is_xdp || !dp->netdev)
1129 return;
1130
1131 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1132 netdev_tx_reset_queue(nd_q);
1133}
1134
1135static void nfp_net_tx_timeout(struct net_device *netdev)
1136{
1137 struct nfp_net *nn = netdev_priv(netdev);
1138 int i;
1139
1140 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1141 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1142 continue;
1143 nn_warn(nn, "TX timeout on ring: %d\n", i);
1144 }
1145 nn_warn(nn, "TX watchdog timeout\n");
1146}
1147
1148
1149
1150static unsigned int
1151nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1152{
1153 unsigned int fl_bufsz;
1154
1155 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1156 fl_bufsz += dp->rx_dma_off;
1157 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1158 fl_bufsz += NFP_NET_MAX_PREPEND;
1159 else
1160 fl_bufsz += dp->rx_offset;
1161 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1162
1163 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1164 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1165
1166 return fl_bufsz;
1167}
1168
1169static void
1170nfp_net_free_frag(void *frag, bool xdp)
1171{
1172 if (!xdp)
1173 skb_free_frag(frag);
1174 else
1175 __free_page(virt_to_page(frag));
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1188{
1189 void *frag;
1190
1191 if (!dp->xdp_prog) {
1192 frag = netdev_alloc_frag(dp->fl_bufsz);
1193 } else {
1194 struct page *page;
1195
1196 page = alloc_page(GFP_KERNEL);
1197 frag = page ? page_address(page) : NULL;
1198 }
1199 if (!frag) {
1200 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1201 return NULL;
1202 }
1203
1204 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1205 if (dma_mapping_error(dp->dev, *dma_addr)) {
1206 nfp_net_free_frag(frag, dp->xdp_prog);
1207 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1208 return NULL;
1209 }
1210
1211 return frag;
1212}
1213
1214static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1215{
1216 void *frag;
1217
1218 if (!dp->xdp_prog) {
1219 frag = napi_alloc_frag(dp->fl_bufsz);
1220 if (unlikely(!frag))
1221 return NULL;
1222 } else {
1223 struct page *page;
1224
1225 page = dev_alloc_page();
1226 if (unlikely(!page))
1227 return NULL;
1228 frag = page_address(page);
1229 }
1230
1231 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1232 if (dma_mapping_error(dp->dev, *dma_addr)) {
1233 nfp_net_free_frag(frag, dp->xdp_prog);
1234 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1235 return NULL;
1236 }
1237
1238 return frag;
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1249 struct nfp_net_rx_ring *rx_ring,
1250 void *frag, dma_addr_t dma_addr)
1251{
1252 unsigned int wr_idx;
1253
1254 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1255
1256 nfp_net_dma_sync_dev_rx(dp, dma_addr);
1257
1258
1259 rx_ring->rxbufs[wr_idx].frag = frag;
1260 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1261
1262
1263 rx_ring->rxds[wr_idx].fld.reserved = 0;
1264 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1265 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1266 dma_addr + dp->rx_dma_off);
1267
1268 rx_ring->wr_p++;
1269 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
1270
1271
1272
1273 wmb();
1274 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
1275 }
1276}
1277
1278
1279
1280
1281
1282
1283
1284
1285static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1286{
1287 unsigned int wr_idx, last_idx;
1288
1289
1290 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1291 last_idx = rx_ring->cnt - 1;
1292 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1293 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1294 rx_ring->rxbufs[last_idx].dma_addr = 0;
1295 rx_ring->rxbufs[last_idx].frag = NULL;
1296
1297 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1298 rx_ring->wr_p = 0;
1299 rx_ring->rd_p = 0;
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static void
1312nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1313 struct nfp_net_rx_ring *rx_ring)
1314{
1315 unsigned int i;
1316
1317 for (i = 0; i < rx_ring->cnt - 1; i++) {
1318
1319
1320
1321
1322 if (!rx_ring->rxbufs[i].frag)
1323 continue;
1324
1325 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1326 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1327 rx_ring->rxbufs[i].dma_addr = 0;
1328 rx_ring->rxbufs[i].frag = NULL;
1329 }
1330}
1331
1332
1333
1334
1335
1336
1337static int
1338nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1339 struct nfp_net_rx_ring *rx_ring)
1340{
1341 struct nfp_net_rx_buf *rxbufs;
1342 unsigned int i;
1343
1344 rxbufs = rx_ring->rxbufs;
1345
1346 for (i = 0; i < rx_ring->cnt - 1; i++) {
1347 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
1348 if (!rxbufs[i].frag) {
1349 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1350 return -ENOMEM;
1351 }
1352 }
1353
1354 return 0;
1355}
1356
1357
1358
1359
1360
1361
1362static void
1363nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1364 struct nfp_net_rx_ring *rx_ring)
1365{
1366 unsigned int i;
1367
1368 for (i = 0; i < rx_ring->cnt - 1; i++)
1369 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1370 rx_ring->rxbufs[i].dma_addr);
1371}
1372
1373
1374
1375
1376
1377static int nfp_net_rx_csum_has_errors(u16 flags)
1378{
1379 u16 csum_all_checked, csum_all_ok;
1380
1381 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1382 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1383
1384 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1385}
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1396 struct nfp_net_r_vector *r_vec,
1397 struct nfp_net_rx_desc *rxd,
1398 struct nfp_meta_parsed *meta, struct sk_buff *skb)
1399{
1400 skb_checksum_none_assert(skb);
1401
1402 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1403 return;
1404
1405 if (meta->csum_type) {
1406 skb->ip_summed = meta->csum_type;
1407 skb->csum = meta->csum;
1408 u64_stats_update_begin(&r_vec->rx_sync);
1409 r_vec->hw_csum_rx_complete++;
1410 u64_stats_update_end(&r_vec->rx_sync);
1411 return;
1412 }
1413
1414 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1415 u64_stats_update_begin(&r_vec->rx_sync);
1416 r_vec->hw_csum_rx_error++;
1417 u64_stats_update_end(&r_vec->rx_sync);
1418 return;
1419 }
1420
1421
1422
1423
1424
1425 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1426 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1427 __skb_incr_checksum_unnecessary(skb);
1428 u64_stats_update_begin(&r_vec->rx_sync);
1429 r_vec->hw_csum_rx_ok++;
1430 u64_stats_update_end(&r_vec->rx_sync);
1431 }
1432
1433 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1434 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1435 __skb_incr_checksum_unnecessary(skb);
1436 u64_stats_update_begin(&r_vec->rx_sync);
1437 r_vec->hw_csum_rx_inner_ok++;
1438 u64_stats_update_end(&r_vec->rx_sync);
1439 }
1440}
1441
1442static void
1443nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
1444 unsigned int type, __be32 *hash)
1445{
1446 if (!(netdev->features & NETIF_F_RXHASH))
1447 return;
1448
1449 switch (type) {
1450 case NFP_NET_RSS_IPV4:
1451 case NFP_NET_RSS_IPV6:
1452 case NFP_NET_RSS_IPV6_EX:
1453 meta->hash_type = PKT_HASH_TYPE_L3;
1454 break;
1455 default:
1456 meta->hash_type = PKT_HASH_TYPE_L4;
1457 break;
1458 }
1459
1460 meta->hash = get_unaligned_be32(hash);
1461}
1462
1463static void
1464nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
1465 void *data, struct nfp_net_rx_desc *rxd)
1466{
1467 struct nfp_net_rx_hash *rx_hash = data;
1468
1469 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1470 return;
1471
1472 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
1473 &rx_hash->hash);
1474}
1475
1476static void *
1477nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1478 void *data, int meta_len)
1479{
1480 u32 meta_info;
1481
1482 meta_info = get_unaligned_be32(data);
1483 data += 4;
1484
1485 while (meta_info) {
1486 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1487 case NFP_NET_META_HASH:
1488 meta_info >>= NFP_NET_META_FIELD_SIZE;
1489 nfp_net_set_hash(netdev, meta,
1490 meta_info & NFP_NET_META_FIELD_MASK,
1491 (__be32 *)data);
1492 data += 4;
1493 break;
1494 case NFP_NET_META_MARK:
1495 meta->mark = get_unaligned_be32(data);
1496 data += 4;
1497 break;
1498 case NFP_NET_META_PORTID:
1499 meta->portid = get_unaligned_be32(data);
1500 data += 4;
1501 break;
1502 case NFP_NET_META_CSUM:
1503 meta->csum_type = CHECKSUM_COMPLETE;
1504 meta->csum =
1505 (__force __wsum)__get_unaligned_cpu32(data);
1506 data += 4;
1507 break;
1508 default:
1509 return NULL;
1510 }
1511
1512 meta_info >>= NFP_NET_META_FIELD_SIZE;
1513 }
1514
1515 return data;
1516}
1517
1518static void
1519nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1520 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1521 struct sk_buff *skb)
1522{
1523 u64_stats_update_begin(&r_vec->rx_sync);
1524 r_vec->rx_drops++;
1525
1526
1527
1528 if (skb && rxbuf)
1529 r_vec->rx_replace_buf_alloc_fail++;
1530 u64_stats_update_end(&r_vec->rx_sync);
1531
1532
1533
1534
1535 if (skb && rxbuf && skb->head == rxbuf->frag)
1536 page_ref_inc(virt_to_head_page(rxbuf->frag));
1537 if (rxbuf)
1538 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1539 if (skb)
1540 dev_kfree_skb_any(skb);
1541}
1542
1543static bool
1544nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1545 struct nfp_net_tx_ring *tx_ring,
1546 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1547 unsigned int pkt_len, bool *completed)
1548{
1549 struct nfp_net_tx_buf *txbuf;
1550 struct nfp_net_tx_desc *txd;
1551 int wr_idx;
1552
1553 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1554 if (!*completed) {
1555 nfp_net_xdp_complete(tx_ring);
1556 *completed = true;
1557 }
1558
1559 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1560 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
1561 NULL);
1562 return false;
1563 }
1564 }
1565
1566 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1567
1568
1569 txbuf = &tx_ring->txbufs[wr_idx];
1570
1571 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
1572
1573 txbuf->frag = rxbuf->frag;
1574 txbuf->dma_addr = rxbuf->dma_addr;
1575 txbuf->fidx = -1;
1576 txbuf->pkt_cnt = 1;
1577 txbuf->real_len = pkt_len;
1578
1579 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1580 pkt_len, DMA_BIDIRECTIONAL);
1581
1582
1583 txd = &tx_ring->txds[wr_idx];
1584 txd->offset_eop = PCIE_DESC_TX_EOP;
1585 txd->dma_len = cpu_to_le16(pkt_len);
1586 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1587 txd->data_len = cpu_to_le16(pkt_len);
1588
1589 txd->flags = 0;
1590 txd->mss = 0;
1591 txd->lso_hdrlen = 0;
1592
1593 tx_ring->wr_p++;
1594 tx_ring->wr_ptr_add++;
1595 return true;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1610{
1611 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1612 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1613 struct nfp_net_tx_ring *tx_ring;
1614 struct bpf_prog *xdp_prog;
1615 bool xdp_tx_cmpl = false;
1616 unsigned int true_bufsz;
1617 struct sk_buff *skb;
1618 int pkts_polled = 0;
1619 struct xdp_buff xdp;
1620 int idx;
1621
1622 rcu_read_lock();
1623 xdp_prog = READ_ONCE(dp->xdp_prog);
1624 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1625 xdp.rxq = &rx_ring->xdp_rxq;
1626 tx_ring = r_vec->xdp_ring;
1627
1628 while (pkts_polled < budget) {
1629 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1630 struct nfp_net_rx_buf *rxbuf;
1631 struct nfp_net_rx_desc *rxd;
1632 struct nfp_meta_parsed meta;
1633 struct net_device *netdev;
1634 dma_addr_t new_dma_addr;
1635 u32 meta_len_xdp = 0;
1636 void *new_frag;
1637
1638 idx = D_IDX(rx_ring, rx_ring->rd_p);
1639
1640 rxd = &rx_ring->rxds[idx];
1641 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1642 break;
1643
1644
1645
1646
1647 dma_rmb();
1648
1649 memset(&meta, 0, sizeof(meta));
1650
1651 rx_ring->rd_p++;
1652 pkts_polled++;
1653
1654 rxbuf = &rx_ring->rxbufs[idx];
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1668 data_len = le16_to_cpu(rxd->rxd.data_len);
1669 pkt_len = data_len - meta_len;
1670
1671 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1672 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1673 pkt_off += meta_len;
1674 else
1675 pkt_off += dp->rx_offset;
1676 meta_off = pkt_off - meta_len;
1677
1678
1679 u64_stats_update_begin(&r_vec->rx_sync);
1680 r_vec->rx_pkts++;
1681 r_vec->rx_bytes += pkt_len;
1682 u64_stats_update_end(&r_vec->rx_sync);
1683
1684 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1685 (dp->rx_offset && meta_len > dp->rx_offset))) {
1686 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1687 meta_len);
1688 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1689 continue;
1690 }
1691
1692 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1693 data_len);
1694
1695 if (!dp->chained_metadata_format) {
1696 nfp_net_set_hash_desc(dp->netdev, &meta,
1697 rxbuf->frag + meta_off, rxd);
1698 } else if (meta_len) {
1699 void *end;
1700
1701 end = nfp_net_parse_meta(dp->netdev, &meta,
1702 rxbuf->frag + meta_off,
1703 meta_len);
1704 if (unlikely(end != rxbuf->frag + pkt_off)) {
1705 nn_dp_warn(dp, "invalid RX packet metadata\n");
1706 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1707 NULL);
1708 continue;
1709 }
1710 }
1711
1712 if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
1713 dp->bpf_offload_xdp) && !meta.portid) {
1714 void *orig_data = rxbuf->frag + pkt_off;
1715 unsigned int dma_off;
1716 int act;
1717
1718 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1719 xdp.data = orig_data;
1720 xdp.data_meta = orig_data;
1721 xdp.data_end = orig_data + pkt_len;
1722
1723 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1724
1725 pkt_len -= xdp.data - orig_data;
1726 pkt_off += xdp.data - orig_data;
1727
1728 switch (act) {
1729 case XDP_PASS:
1730 meta_len_xdp = xdp.data - xdp.data_meta;
1731 break;
1732 case XDP_TX:
1733 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1734 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1735 tx_ring, rxbuf,
1736 dma_off,
1737 pkt_len,
1738 &xdp_tx_cmpl)))
1739 trace_xdp_exception(dp->netdev,
1740 xdp_prog, act);
1741 continue;
1742 default:
1743 bpf_warn_invalid_xdp_action(act);
1744
1745 case XDP_ABORTED:
1746 trace_xdp_exception(dp->netdev, xdp_prog, act);
1747
1748 case XDP_DROP:
1749 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1750 rxbuf->dma_addr);
1751 continue;
1752 }
1753 }
1754
1755 skb = build_skb(rxbuf->frag, true_bufsz);
1756 if (unlikely(!skb)) {
1757 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1758 continue;
1759 }
1760 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1761 if (unlikely(!new_frag)) {
1762 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1763 continue;
1764 }
1765
1766 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1767
1768 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1769
1770 if (likely(!meta.portid)) {
1771 netdev = dp->netdev;
1772 } else {
1773 struct nfp_net *nn;
1774
1775 nn = netdev_priv(dp->netdev);
1776 netdev = nfp_app_repr_get(nn->app, meta.portid);
1777 if (unlikely(!netdev)) {
1778 nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
1779 continue;
1780 }
1781 nfp_repr_inc_rx_stats(netdev, pkt_len);
1782 }
1783
1784 skb_reserve(skb, pkt_off);
1785 skb_put(skb, pkt_len);
1786
1787 skb->mark = meta.mark;
1788 skb_set_hash(skb, meta.hash, meta.hash_type);
1789
1790 skb_record_rx_queue(skb, rx_ring->idx);
1791 skb->protocol = eth_type_trans(skb, netdev);
1792
1793 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
1794
1795 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1796 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1797 le16_to_cpu(rxd->rxd.vlan));
1798 if (meta_len_xdp)
1799 skb_metadata_set(skb, meta_len_xdp);
1800
1801 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1802 }
1803
1804 if (xdp_prog) {
1805 if (tx_ring->wr_ptr_add)
1806 nfp_net_tx_xmit_more_flush(tx_ring);
1807 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1808 !xdp_tx_cmpl)
1809 if (!nfp_net_xdp_complete(tx_ring))
1810 pkts_polled = budget;
1811 }
1812 rcu_read_unlock();
1813
1814 return pkts_polled;
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824static int nfp_net_poll(struct napi_struct *napi, int budget)
1825{
1826 struct nfp_net_r_vector *r_vec =
1827 container_of(napi, struct nfp_net_r_vector, napi);
1828 unsigned int pkts_polled = 0;
1829
1830 if (r_vec->tx_ring)
1831 nfp_net_tx_complete(r_vec->tx_ring);
1832 if (r_vec->rx_ring)
1833 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1834
1835 if (pkts_polled < budget)
1836 if (napi_complete_done(napi, pkts_polled))
1837 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1838
1839 return pkts_polled;
1840}
1841
1842
1843
1844
1845static bool
1846nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1847 struct sk_buff *skb, bool old)
1848{
1849 unsigned int real_len = skb->len, meta_len = 0;
1850 struct nfp_net_tx_ring *tx_ring;
1851 struct nfp_net_tx_buf *txbuf;
1852 struct nfp_net_tx_desc *txd;
1853 struct nfp_net_dp *dp;
1854 dma_addr_t dma_addr;
1855 int wr_idx;
1856
1857 dp = &r_vec->nfp_net->dp;
1858 tx_ring = r_vec->tx_ring;
1859
1860 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1861 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1862 goto err_free;
1863 }
1864
1865 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1866 u64_stats_update_begin(&r_vec->tx_sync);
1867 r_vec->tx_busy++;
1868 u64_stats_update_end(&r_vec->tx_sync);
1869 if (!old)
1870 __skb_queue_tail(&r_vec->queue, skb);
1871 else
1872 __skb_queue_head(&r_vec->queue, skb);
1873 return true;
1874 }
1875
1876 if (nfp_app_ctrl_has_meta(nn->app)) {
1877 if (unlikely(skb_headroom(skb) < 8)) {
1878 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1879 goto err_free;
1880 }
1881 meta_len = 8;
1882 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1883 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
1884 }
1885
1886
1887 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
1888 DMA_TO_DEVICE);
1889 if (dma_mapping_error(dp->dev, dma_addr))
1890 goto err_dma_warn;
1891
1892 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1893
1894
1895 txbuf = &tx_ring->txbufs[wr_idx];
1896 txbuf->skb = skb;
1897 txbuf->dma_addr = dma_addr;
1898 txbuf->fidx = -1;
1899 txbuf->pkt_cnt = 1;
1900 txbuf->real_len = real_len;
1901
1902
1903 txd = &tx_ring->txds[wr_idx];
1904 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
1905 txd->dma_len = cpu_to_le16(skb_headlen(skb));
1906 nfp_desc_set_dma_addr(txd, dma_addr);
1907 txd->data_len = cpu_to_le16(skb->len);
1908
1909 txd->flags = 0;
1910 txd->mss = 0;
1911 txd->lso_hdrlen = 0;
1912
1913 tx_ring->wr_p++;
1914 tx_ring->wr_ptr_add++;
1915 nfp_net_tx_xmit_more_flush(tx_ring);
1916
1917 return false;
1918
1919err_dma_warn:
1920 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
1921err_free:
1922 u64_stats_update_begin(&r_vec->tx_sync);
1923 r_vec->tx_errors++;
1924 u64_stats_update_end(&r_vec->tx_sync);
1925 dev_kfree_skb_any(skb);
1926 return false;
1927}
1928
1929bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
1930{
1931 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
1932
1933 return nfp_ctrl_tx_one(nn, r_vec, skb, false);
1934}
1935
1936bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
1937{
1938 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
1939 bool ret;
1940
1941 spin_lock_bh(&r_vec->lock);
1942 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
1943 spin_unlock_bh(&r_vec->lock);
1944
1945 return ret;
1946}
1947
1948static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1949{
1950 struct sk_buff *skb;
1951
1952 while ((skb = __skb_dequeue(&r_vec->queue)))
1953 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1954 return;
1955}
1956
1957static bool
1958nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1959{
1960 u32 meta_type, meta_tag;
1961
1962 if (!nfp_app_ctrl_has_meta(nn->app))
1963 return !meta_len;
1964
1965 if (meta_len != 8)
1966 return false;
1967
1968 meta_type = get_unaligned_be32(data);
1969 meta_tag = get_unaligned_be32(data + 4);
1970
1971 return (meta_type == NFP_NET_META_PORTID &&
1972 meta_tag == NFP_META_PORT_ID_CTRL);
1973}
1974
1975static bool
1976nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
1977 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
1978{
1979 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1980 struct nfp_net_rx_buf *rxbuf;
1981 struct nfp_net_rx_desc *rxd;
1982 dma_addr_t new_dma_addr;
1983 struct sk_buff *skb;
1984 void *new_frag;
1985 int idx;
1986
1987 idx = D_IDX(rx_ring, rx_ring->rd_p);
1988
1989 rxd = &rx_ring->rxds[idx];
1990 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1991 return false;
1992
1993
1994
1995
1996 dma_rmb();
1997
1998 rx_ring->rd_p++;
1999
2000 rxbuf = &rx_ring->rxbufs[idx];
2001 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
2002 data_len = le16_to_cpu(rxd->rxd.data_len);
2003 pkt_len = data_len - meta_len;
2004
2005 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
2006 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
2007 pkt_off += meta_len;
2008 else
2009 pkt_off += dp->rx_offset;
2010 meta_off = pkt_off - meta_len;
2011
2012
2013 u64_stats_update_begin(&r_vec->rx_sync);
2014 r_vec->rx_pkts++;
2015 r_vec->rx_bytes += pkt_len;
2016 u64_stats_update_end(&r_vec->rx_sync);
2017
2018 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
2019
2020 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
2021 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
2022 meta_len);
2023 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2024 return true;
2025 }
2026
2027 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
2028 if (unlikely(!skb)) {
2029 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2030 return true;
2031 }
2032 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
2033 if (unlikely(!new_frag)) {
2034 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
2035 return true;
2036 }
2037
2038 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
2039
2040 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
2041
2042 skb_reserve(skb, pkt_off);
2043 skb_put(skb, pkt_len);
2044
2045 nfp_app_ctrl_rx(nn->app, skb);
2046
2047 return true;
2048}
2049
2050static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2051{
2052 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2053 struct nfp_net *nn = r_vec->nfp_net;
2054 struct nfp_net_dp *dp = &nn->dp;
2055
2056 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
2057 continue;
2058}
2059
2060static void nfp_ctrl_poll(unsigned long arg)
2061{
2062 struct nfp_net_r_vector *r_vec = (void *)arg;
2063
2064 spin_lock_bh(&r_vec->lock);
2065 nfp_net_tx_complete(r_vec->tx_ring);
2066 __nfp_ctrl_tx_queued(r_vec);
2067 spin_unlock_bh(&r_vec->lock);
2068
2069 nfp_ctrl_rx(r_vec);
2070
2071 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081static void nfp_net_vecs_init(struct nfp_net *nn)
2082{
2083 struct nfp_net_r_vector *r_vec;
2084 int r;
2085
2086 nn->lsc_handler = nfp_net_irq_lsc;
2087 nn->exn_handler = nfp_net_irq_exn;
2088
2089 for (r = 0; r < nn->max_r_vecs; r++) {
2090 struct msix_entry *entry;
2091
2092 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
2093
2094 r_vec = &nn->r_vecs[r];
2095 r_vec->nfp_net = nn;
2096 r_vec->irq_entry = entry->entry;
2097 r_vec->irq_vector = entry->vector;
2098
2099 if (nn->dp.netdev) {
2100 r_vec->handler = nfp_net_irq_rxtx;
2101 } else {
2102 r_vec->handler = nfp_ctrl_irq_rxtx;
2103
2104 __skb_queue_head_init(&r_vec->queue);
2105 spin_lock_init(&r_vec->lock);
2106 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
2107 (unsigned long)r_vec);
2108 tasklet_disable(&r_vec->tasklet);
2109 }
2110
2111 cpumask_set_cpu(r, &r_vec->affinity_mask);
2112 }
2113}
2114
2115
2116
2117
2118
2119static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
2120{
2121 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2122 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2123
2124 kfree(tx_ring->txbufs);
2125
2126 if (tx_ring->txds)
2127 dma_free_coherent(dp->dev, tx_ring->size,
2128 tx_ring->txds, tx_ring->dma);
2129
2130 tx_ring->cnt = 0;
2131 tx_ring->txbufs = NULL;
2132 tx_ring->txds = NULL;
2133 tx_ring->dma = 0;
2134 tx_ring->size = 0;
2135}
2136
2137
2138
2139
2140
2141
2142
2143
2144static int
2145nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2146{
2147 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2148 int sz;
2149
2150 tx_ring->cnt = dp->txd_cnt;
2151
2152 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
2153 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
2154 &tx_ring->dma, GFP_KERNEL);
2155 if (!tx_ring->txds)
2156 goto err_alloc;
2157
2158 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
2159 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
2160 if (!tx_ring->txbufs)
2161 goto err_alloc;
2162
2163 if (!tx_ring->is_xdp && dp->netdev)
2164 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
2165 tx_ring->idx);
2166
2167 return 0;
2168
2169err_alloc:
2170 nfp_net_tx_ring_free(tx_ring);
2171 return -ENOMEM;
2172}
2173
2174static void
2175nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
2176 struct nfp_net_tx_ring *tx_ring)
2177{
2178 unsigned int i;
2179
2180 if (!tx_ring->is_xdp)
2181 return;
2182
2183 for (i = 0; i < tx_ring->cnt; i++) {
2184 if (!tx_ring->txbufs[i].frag)
2185 return;
2186
2187 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
2188 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
2189 }
2190}
2191
2192static int
2193nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
2194 struct nfp_net_tx_ring *tx_ring)
2195{
2196 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
2197 unsigned int i;
2198
2199 if (!tx_ring->is_xdp)
2200 return 0;
2201
2202 for (i = 0; i < tx_ring->cnt; i++) {
2203 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
2204 if (!txbufs[i].frag) {
2205 nfp_net_tx_ring_bufs_free(dp, tx_ring);
2206 return -ENOMEM;
2207 }
2208 }
2209
2210 return 0;
2211}
2212
2213static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2214{
2215 unsigned int r;
2216
2217 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
2218 GFP_KERNEL);
2219 if (!dp->tx_rings)
2220 return -ENOMEM;
2221
2222 for (r = 0; r < dp->num_tx_rings; r++) {
2223 int bias = 0;
2224
2225 if (r >= dp->num_stack_tx_rings)
2226 bias = dp->num_stack_tx_rings;
2227
2228 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
2229 r, bias);
2230
2231 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
2232 goto err_free_prev;
2233
2234 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
2235 goto err_free_ring;
2236 }
2237
2238 return 0;
2239
2240err_free_prev:
2241 while (r--) {
2242 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2243err_free_ring:
2244 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2245 }
2246 kfree(dp->tx_rings);
2247 return -ENOMEM;
2248}
2249
2250static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
2251{
2252 unsigned int r;
2253
2254 for (r = 0; r < dp->num_tx_rings; r++) {
2255 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2256 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2257 }
2258
2259 kfree(dp->tx_rings);
2260}
2261
2262
2263
2264
2265
2266static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2267{
2268 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2269 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2270
2271 if (dp->netdev)
2272 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
2273 kfree(rx_ring->rxbufs);
2274
2275 if (rx_ring->rxds)
2276 dma_free_coherent(dp->dev, rx_ring->size,
2277 rx_ring->rxds, rx_ring->dma);
2278
2279 rx_ring->cnt = 0;
2280 rx_ring->rxbufs = NULL;
2281 rx_ring->rxds = NULL;
2282 rx_ring->dma = 0;
2283 rx_ring->size = 0;
2284}
2285
2286
2287
2288
2289
2290
2291
2292
2293static int
2294nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2295{
2296 int sz, err;
2297
2298 if (dp->netdev) {
2299 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
2300 rx_ring->idx);
2301 if (err < 0)
2302 return err;
2303 }
2304
2305 rx_ring->cnt = dp->rxd_cnt;
2306 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
2307 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
2308 &rx_ring->dma, GFP_KERNEL);
2309 if (!rx_ring->rxds)
2310 goto err_alloc;
2311
2312 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
2313 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
2314 if (!rx_ring->rxbufs)
2315 goto err_alloc;
2316
2317 return 0;
2318
2319err_alloc:
2320 nfp_net_rx_ring_free(rx_ring);
2321 return -ENOMEM;
2322}
2323
2324static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2325{
2326 unsigned int r;
2327
2328 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
2329 GFP_KERNEL);
2330 if (!dp->rx_rings)
2331 return -ENOMEM;
2332
2333 for (r = 0; r < dp->num_rx_rings; r++) {
2334 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
2335
2336 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
2337 goto err_free_prev;
2338
2339 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
2340 goto err_free_ring;
2341 }
2342
2343 return 0;
2344
2345err_free_prev:
2346 while (r--) {
2347 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2348err_free_ring:
2349 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2350 }
2351 kfree(dp->rx_rings);
2352 return -ENOMEM;
2353}
2354
2355static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
2356{
2357 unsigned int r;
2358
2359 for (r = 0; r < dp->num_rx_rings; r++) {
2360 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2361 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2362 }
2363
2364 kfree(dp->rx_rings);
2365}
2366
2367static void
2368nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
2369 struct nfp_net_r_vector *r_vec, int idx)
2370{
2371 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
2372 r_vec->tx_ring =
2373 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
2374
2375 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
2376 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
2377}
2378
2379static int
2380nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2381 int idx)
2382{
2383 int err;
2384
2385
2386 if (nn->dp.netdev)
2387 netif_napi_add(nn->dp.netdev, &r_vec->napi,
2388 nfp_net_poll, NAPI_POLL_WEIGHT);
2389 else
2390 tasklet_enable(&r_vec->tasklet);
2391
2392 snprintf(r_vec->name, sizeof(r_vec->name),
2393 "%s-rxtx-%d", nfp_net_name(nn), idx);
2394 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2395 r_vec);
2396 if (err) {
2397 if (nn->dp.netdev)
2398 netif_napi_del(&r_vec->napi);
2399 else
2400 tasklet_disable(&r_vec->tasklet);
2401
2402 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
2403 return err;
2404 }
2405 disable_irq(r_vec->irq_vector);
2406
2407 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
2408
2409 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
2410 r_vec->irq_entry);
2411
2412 return 0;
2413}
2414
2415static void
2416nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
2417{
2418 irq_set_affinity_hint(r_vec->irq_vector, NULL);
2419 if (nn->dp.netdev)
2420 netif_napi_del(&r_vec->napi);
2421 else
2422 tasklet_disable(&r_vec->tasklet);
2423
2424 free_irq(r_vec->irq_vector, r_vec);
2425}
2426
2427
2428
2429
2430
2431void nfp_net_rss_write_itbl(struct nfp_net *nn)
2432{
2433 int i;
2434
2435 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
2436 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
2437 get_unaligned_le32(nn->rss_itbl + i));
2438}
2439
2440
2441
2442
2443
2444void nfp_net_rss_write_key(struct nfp_net *nn)
2445{
2446 int i;
2447
2448 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2449 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2450 get_unaligned_le32(nn->rss_key + i));
2451}
2452
2453
2454
2455
2456
2457void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2458{
2459 u8 i;
2460 u32 factor;
2461 u32 value;
2462
2463
2464
2465
2466
2467 factor = nn->tlv_caps.me_freq_mhz / 16;
2468
2469
2470 value = (nn->rx_coalesce_max_frames << 16) |
2471 (factor * nn->rx_coalesce_usecs);
2472 for (i = 0; i < nn->dp.num_rx_rings; i++)
2473 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2474
2475
2476 value = (nn->tx_coalesce_max_frames << 16) |
2477 (factor * nn->tx_coalesce_usecs);
2478 for (i = 0; i < nn->dp.num_tx_rings; i++)
2479 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
2492{
2493 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
2494 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
2495}
2496
2497static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
2498{
2499 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
2500 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
2501 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
2502
2503 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
2504 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
2505 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
2506}
2507
2508
2509
2510
2511
2512static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2513{
2514 u32 new_ctrl, update;
2515 unsigned int r;
2516 int err;
2517
2518 new_ctrl = nn->dp.ctrl;
2519 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2520 update = NFP_NET_CFG_UPDATE_GEN;
2521 update |= NFP_NET_CFG_UPDATE_MSIX;
2522 update |= NFP_NET_CFG_UPDATE_RING;
2523
2524 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2525 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
2526
2527 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2528 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2529
2530 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2531 err = nfp_net_reconfig(nn, update);
2532 if (err)
2533 nn_err(nn, "Could not disable device: %d\n", err);
2534
2535 for (r = 0; r < nn->dp.num_rx_rings; r++)
2536 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2537 for (r = 0; r < nn->dp.num_tx_rings; r++)
2538 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2539 for (r = 0; r < nn->dp.num_r_vecs; r++)
2540 nfp_net_vec_clear_ring_data(nn, r);
2541
2542 nn->dp.ctrl = new_ctrl;
2543}
2544
2545static void
2546nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
2547 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
2548{
2549
2550 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
2551 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
2552 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
2553}
2554
2555static void
2556nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
2557 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
2558{
2559 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
2560 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
2561 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
2562}
2563
2564
2565
2566
2567
2568static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2569{
2570 u32 bufsz, new_ctrl, update = 0;
2571 unsigned int r;
2572 int err;
2573
2574 new_ctrl = nn->dp.ctrl;
2575
2576 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
2577 nfp_net_rss_write_key(nn);
2578 nfp_net_rss_write_itbl(nn);
2579 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
2580 update |= NFP_NET_CFG_UPDATE_RSS;
2581 }
2582
2583 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
2584 nfp_net_coalesce_write_cfg(nn);
2585 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2586 }
2587
2588 for (r = 0; r < nn->dp.num_tx_rings; r++)
2589 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2590 for (r = 0; r < nn->dp.num_rx_rings; r++)
2591 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2592
2593 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2594 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2595
2596 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2597 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2598
2599 if (nn->dp.netdev)
2600 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2601
2602 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
2603
2604 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
2605 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
2606
2607
2608 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2609 update |= NFP_NET_CFG_UPDATE_GEN;
2610 update |= NFP_NET_CFG_UPDATE_MSIX;
2611 update |= NFP_NET_CFG_UPDATE_RING;
2612 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2613 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2614
2615 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2616 err = nfp_net_reconfig(nn, update);
2617 if (err) {
2618 nfp_net_clear_config_and_disable(nn);
2619 return err;
2620 }
2621
2622 nn->dp.ctrl = new_ctrl;
2623
2624 for (r = 0; r < nn->dp.num_rx_rings; r++)
2625 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2626
2627
2628
2629
2630 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2631 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2632 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2633 udp_tunnel_get_rx_info(nn->dp.netdev);
2634 }
2635
2636 return 0;
2637}
2638
2639
2640
2641
2642
2643static void nfp_net_close_stack(struct nfp_net *nn)
2644{
2645 unsigned int r;
2646
2647 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2648 netif_carrier_off(nn->dp.netdev);
2649 nn->link_up = false;
2650
2651 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2652 disable_irq(nn->r_vecs[r].irq_vector);
2653 napi_disable(&nn->r_vecs[r].napi);
2654 }
2655
2656 netif_tx_disable(nn->dp.netdev);
2657}
2658
2659
2660
2661
2662
2663static void nfp_net_close_free_all(struct nfp_net *nn)
2664{
2665 unsigned int r;
2666
2667 nfp_net_tx_rings_free(&nn->dp);
2668 nfp_net_rx_rings_free(&nn->dp);
2669
2670 for (r = 0; r < nn->dp.num_r_vecs; r++)
2671 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2672
2673 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2674 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2675}
2676
2677
2678
2679
2680
2681static int nfp_net_netdev_close(struct net_device *netdev)
2682{
2683 struct nfp_net *nn = netdev_priv(netdev);
2684
2685
2686
2687 nfp_net_close_stack(nn);
2688
2689
2690
2691 nfp_net_clear_config_and_disable(nn);
2692 nfp_port_configure(netdev, false);
2693
2694
2695
2696 nfp_net_close_free_all(nn);
2697
2698 nn_dbg(nn, "%s down", netdev->name);
2699 return 0;
2700}
2701
2702void nfp_ctrl_close(struct nfp_net *nn)
2703{
2704 int r;
2705
2706 rtnl_lock();
2707
2708 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2709 disable_irq(nn->r_vecs[r].irq_vector);
2710 tasklet_disable(&nn->r_vecs[r].tasklet);
2711 }
2712
2713 nfp_net_clear_config_and_disable(nn);
2714
2715 nfp_net_close_free_all(nn);
2716
2717 rtnl_unlock();
2718}
2719
2720
2721
2722
2723
2724static void nfp_net_open_stack(struct nfp_net *nn)
2725{
2726 unsigned int r;
2727
2728 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2729 napi_enable(&nn->r_vecs[r].napi);
2730 enable_irq(nn->r_vecs[r].irq_vector);
2731 }
2732
2733 netif_tx_wake_all_queues(nn->dp.netdev);
2734
2735 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2736 nfp_net_read_link_status(nn);
2737}
2738
2739static int nfp_net_open_alloc_all(struct nfp_net *nn)
2740{
2741 int err, r;
2742
2743 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2744 nn->exn_name, sizeof(nn->exn_name),
2745 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2746 if (err)
2747 return err;
2748 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2749 nn->lsc_name, sizeof(nn->lsc_name),
2750 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2751 if (err)
2752 goto err_free_exn;
2753 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2754
2755 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2756 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2757 if (err)
2758 goto err_cleanup_vec_p;
2759 }
2760
2761 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2762 if (err)
2763 goto err_cleanup_vec;
2764
2765 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
2766 if (err)
2767 goto err_free_rx_rings;
2768
2769 for (r = 0; r < nn->max_r_vecs; r++)
2770 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2771
2772 return 0;
2773
2774err_free_rx_rings:
2775 nfp_net_rx_rings_free(&nn->dp);
2776err_cleanup_vec:
2777 r = nn->dp.num_r_vecs;
2778err_cleanup_vec_p:
2779 while (r--)
2780 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2781 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2782err_free_exn:
2783 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2784 return err;
2785}
2786
2787static int nfp_net_netdev_open(struct net_device *netdev)
2788{
2789 struct nfp_net *nn = netdev_priv(netdev);
2790 int err;
2791
2792
2793
2794
2795
2796
2797 err = nfp_net_open_alloc_all(nn);
2798 if (err)
2799 return err;
2800
2801 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
2802 if (err)
2803 goto err_free_all;
2804
2805 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
2806 if (err)
2807 goto err_free_all;
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817 err = nfp_port_configure(netdev, true);
2818 if (err)
2819 goto err_free_all;
2820
2821 err = nfp_net_set_config_and_enable(nn);
2822 if (err)
2823 goto err_port_disable;
2824
2825
2826
2827
2828
2829
2830
2831 nfp_net_open_stack(nn);
2832
2833 return 0;
2834
2835err_port_disable:
2836 nfp_port_configure(netdev, false);
2837err_free_all:
2838 nfp_net_close_free_all(nn);
2839 return err;
2840}
2841
2842int nfp_ctrl_open(struct nfp_net *nn)
2843{
2844 int err, r;
2845
2846
2847 rtnl_lock();
2848
2849 err = nfp_net_open_alloc_all(nn);
2850 if (err)
2851 goto err_unlock;
2852
2853 err = nfp_net_set_config_and_enable(nn);
2854 if (err)
2855 goto err_free_all;
2856
2857 for (r = 0; r < nn->dp.num_r_vecs; r++)
2858 enable_irq(nn->r_vecs[r].irq_vector);
2859
2860 rtnl_unlock();
2861
2862 return 0;
2863
2864err_free_all:
2865 nfp_net_close_free_all(nn);
2866err_unlock:
2867 rtnl_unlock();
2868 return err;
2869}
2870
2871static void nfp_net_set_rx_mode(struct net_device *netdev)
2872{
2873 struct nfp_net *nn = netdev_priv(netdev);
2874 u32 new_ctrl;
2875
2876 new_ctrl = nn->dp.ctrl;
2877
2878 if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
2879 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
2880 else
2881 new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
2882
2883 if (netdev->flags & IFF_PROMISC) {
2884 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2885 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2886 else
2887 nn_warn(nn, "FW does not support promiscuous mode\n");
2888 } else {
2889 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2890 }
2891
2892 if (new_ctrl == nn->dp.ctrl)
2893 return;
2894
2895 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2896 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2897
2898 nn->dp.ctrl = new_ctrl;
2899}
2900
2901static void nfp_net_rss_init_itbl(struct nfp_net *nn)
2902{
2903 int i;
2904
2905 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2906 nn->rss_itbl[i] =
2907 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
2908}
2909
2910static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
2911{
2912 struct nfp_net_dp new_dp = *dp;
2913
2914 *dp = nn->dp;
2915 nn->dp = new_dp;
2916
2917 nn->dp.netdev->mtu = new_dp.mtu;
2918
2919 if (!netif_is_rxfh_configured(nn->dp.netdev))
2920 nfp_net_rss_init_itbl(nn);
2921}
2922
2923static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
2924{
2925 unsigned int r;
2926 int err;
2927
2928 nfp_net_dp_swap(nn, dp);
2929
2930 for (r = 0; r < nn->max_r_vecs; r++)
2931 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2932
2933 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
2934 if (err)
2935 return err;
2936
2937 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
2938 err = netif_set_real_num_tx_queues(nn->dp.netdev,
2939 nn->dp.num_stack_tx_rings);
2940 if (err)
2941 return err;
2942 }
2943
2944 return nfp_net_set_config_and_enable(nn);
2945}
2946
2947struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
2948{
2949 struct nfp_net_dp *new;
2950
2951 new = kmalloc(sizeof(*new), GFP_KERNEL);
2952 if (!new)
2953 return NULL;
2954
2955 *new = nn->dp;
2956
2957
2958 new->fl_bufsz = 0;
2959 new->tx_rings = NULL;
2960 new->rx_rings = NULL;
2961 new->num_r_vecs = 0;
2962 new->num_stack_tx_rings = 0;
2963
2964 return new;
2965}
2966
2967static int
2968nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
2969 struct netlink_ext_ack *extack)
2970{
2971
2972 if (!dp->xdp_prog)
2973 return 0;
2974 if (dp->fl_bufsz > PAGE_SIZE) {
2975 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
2976 return -EINVAL;
2977 }
2978 if (dp->num_tx_rings > nn->max_tx_rings) {
2979 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
2980 return -EINVAL;
2981 }
2982
2983 return 0;
2984}
2985
2986int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
2987 struct netlink_ext_ack *extack)
2988{
2989 int r, err;
2990
2991 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
2992
2993 dp->num_stack_tx_rings = dp->num_tx_rings;
2994 if (dp->xdp_prog)
2995 dp->num_stack_tx_rings -= dp->num_rx_rings;
2996
2997 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
2998
2999 err = nfp_net_check_config(nn, dp, extack);
3000 if (err)
3001 goto exit_free_dp;
3002
3003 if (!netif_running(dp->netdev)) {
3004 nfp_net_dp_swap(nn, dp);
3005 err = 0;
3006 goto exit_free_dp;
3007 }
3008
3009
3010 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
3011 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
3012 if (err) {
3013 dp->num_r_vecs = r;
3014 goto err_cleanup_vecs;
3015 }
3016 }
3017
3018 err = nfp_net_rx_rings_prepare(nn, dp);
3019 if (err)
3020 goto err_cleanup_vecs;
3021
3022 err = nfp_net_tx_rings_prepare(nn, dp);
3023 if (err)
3024 goto err_free_rx;
3025
3026
3027 nfp_net_close_stack(nn);
3028 nfp_net_clear_config_and_disable(nn);
3029
3030 err = nfp_net_dp_swap_enable(nn, dp);
3031 if (err) {
3032 int err2;
3033
3034 nfp_net_clear_config_and_disable(nn);
3035
3036
3037 err2 = nfp_net_dp_swap_enable(nn, dp);
3038 if (err2)
3039 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
3040 err, err2);
3041 }
3042 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3043 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3044
3045 nfp_net_rx_rings_free(dp);
3046 nfp_net_tx_rings_free(dp);
3047
3048 nfp_net_open_stack(nn);
3049exit_free_dp:
3050 kfree(dp);
3051
3052 return err;
3053
3054err_free_rx:
3055 nfp_net_rx_rings_free(dp);
3056err_cleanup_vecs:
3057 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3058 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3059 kfree(dp);
3060 return err;
3061}
3062
3063static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
3064{
3065 struct nfp_net *nn = netdev_priv(netdev);
3066 struct nfp_net_dp *dp;
3067 int err;
3068
3069 err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
3070 if (err)
3071 return err;
3072
3073 dp = nfp_net_clone_dp(nn);
3074 if (!dp)
3075 return -ENOMEM;
3076
3077 dp->mtu = new_mtu;
3078
3079 return nfp_net_ring_reconfig(nn, dp, NULL);
3080}
3081
3082static int
3083nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3084{
3085 struct nfp_net *nn = netdev_priv(netdev);
3086
3087
3088
3089
3090 if (!vid)
3091 return 0;
3092
3093 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3094 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3095 ETH_P_8021Q);
3096
3097 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
3098}
3099
3100static int
3101nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3102{
3103 struct nfp_net *nn = netdev_priv(netdev);
3104
3105
3106
3107
3108 if (!vid)
3109 return 0;
3110
3111 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3112 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3113 ETH_P_8021Q);
3114
3115 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
3116}
3117
3118static void nfp_net_stat64(struct net_device *netdev,
3119 struct rtnl_link_stats64 *stats)
3120{
3121 struct nfp_net *nn = netdev_priv(netdev);
3122 int r;
3123
3124 for (r = 0; r < nn->dp.num_r_vecs; r++) {
3125 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
3126 u64 data[3];
3127 unsigned int start;
3128
3129 do {
3130 start = u64_stats_fetch_begin(&r_vec->rx_sync);
3131 data[0] = r_vec->rx_pkts;
3132 data[1] = r_vec->rx_bytes;
3133 data[2] = r_vec->rx_drops;
3134 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
3135 stats->rx_packets += data[0];
3136 stats->rx_bytes += data[1];
3137 stats->rx_dropped += data[2];
3138
3139 do {
3140 start = u64_stats_fetch_begin(&r_vec->tx_sync);
3141 data[0] = r_vec->tx_pkts;
3142 data[1] = r_vec->tx_bytes;
3143 data[2] = r_vec->tx_errors;
3144 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
3145 stats->tx_packets += data[0];
3146 stats->tx_bytes += data[1];
3147 stats->tx_errors += data[2];
3148 }
3149}
3150
3151static int nfp_net_set_features(struct net_device *netdev,
3152 netdev_features_t features)
3153{
3154 netdev_features_t changed = netdev->features ^ features;
3155 struct nfp_net *nn = netdev_priv(netdev);
3156 u32 new_ctrl;
3157 int err;
3158
3159
3160
3161 new_ctrl = nn->dp.ctrl;
3162
3163 if (changed & NETIF_F_RXCSUM) {
3164 if (features & NETIF_F_RXCSUM)
3165 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3166 else
3167 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
3168 }
3169
3170 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3171 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
3172 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3173 else
3174 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
3175 }
3176
3177 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
3178 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
3179 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3180 NFP_NET_CFG_CTRL_LSO;
3181 else
3182 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3183 }
3184
3185 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
3186 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3187 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3188 else
3189 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
3190 }
3191
3192 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
3193 if (features & NETIF_F_HW_VLAN_CTAG_TX)
3194 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3195 else
3196 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
3197 }
3198
3199 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3200 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3201 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3202 else
3203 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
3204 }
3205
3206 if (changed & NETIF_F_SG) {
3207 if (features & NETIF_F_SG)
3208 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
3209 else
3210 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
3211 }
3212
3213 err = nfp_port_set_features(netdev, features);
3214 if (err)
3215 return err;
3216
3217 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
3218 netdev->features, features, changed);
3219
3220 if (new_ctrl == nn->dp.ctrl)
3221 return 0;
3222
3223 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
3224 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
3225 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
3226 if (err)
3227 return err;
3228
3229 nn->dp.ctrl = new_ctrl;
3230
3231 return 0;
3232}
3233
3234static netdev_features_t
3235nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
3236 netdev_features_t features)
3237{
3238 u8 l4_hdr;
3239
3240
3241 features &= vlan_features_check(skb, features);
3242
3243 if (!skb->encapsulation)
3244 return features;
3245
3246
3247 if (skb_is_gso(skb)) {
3248 u32 hdrlen;
3249
3250 hdrlen = skb_inner_transport_header(skb) - skb->data +
3251 inner_tcp_hdrlen(skb);
3252
3253 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
3254 features &= ~NETIF_F_GSO_MASK;
3255 }
3256
3257
3258 switch (vlan_get_protocol(skb)) {
3259 case htons(ETH_P_IP):
3260 l4_hdr = ip_hdr(skb)->protocol;
3261 break;
3262 case htons(ETH_P_IPV6):
3263 l4_hdr = ipv6_hdr(skb)->nexthdr;
3264 break;
3265 default:
3266 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3267 }
3268
3269 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
3270 skb->inner_protocol != htons(ETH_P_TEB) ||
3271 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
3272 (l4_hdr == IPPROTO_UDP &&
3273 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
3274 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
3275 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3276
3277 return features;
3278}
3279
3280
3281
3282
3283
3284
3285
3286static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
3287{
3288 int i;
3289
3290 nn->vxlan_ports[idx] = port;
3291
3292 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
3293 return;
3294
3295 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3296 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
3297 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
3298 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
3299 be16_to_cpu(nn->vxlan_ports[i]));
3300
3301 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
3302}
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
3314{
3315 int i, free_idx = -ENOSPC;
3316
3317 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
3318 if (nn->vxlan_ports[i] == port)
3319 return i;
3320 if (!nn->vxlan_usecnt[i])
3321 free_idx = i;
3322 }
3323
3324 return free_idx;
3325}
3326
3327static void nfp_net_add_vxlan_port(struct net_device *netdev,
3328 struct udp_tunnel_info *ti)
3329{
3330 struct nfp_net *nn = netdev_priv(netdev);
3331 int idx;
3332
3333 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3334 return;
3335
3336 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3337 if (idx == -ENOSPC)
3338 return;
3339
3340 if (!nn->vxlan_usecnt[idx]++)
3341 nfp_net_set_vxlan_port(nn, idx, ti->port);
3342}
3343
3344static void nfp_net_del_vxlan_port(struct net_device *netdev,
3345 struct udp_tunnel_info *ti)
3346{
3347 struct nfp_net *nn = netdev_priv(netdev);
3348 int idx;
3349
3350 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3351 return;
3352
3353 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3354 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
3355 return;
3356
3357 if (!--nn->vxlan_usecnt[idx])
3358 nfp_net_set_vxlan_port(nn, idx, 0);
3359}
3360
3361static int
3362nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
3363 struct netlink_ext_ack *extack)
3364{
3365 struct nfp_net_dp *dp;
3366
3367 if (!prog == !nn->dp.xdp_prog) {
3368 WRITE_ONCE(nn->dp.xdp_prog, prog);
3369 return 0;
3370 }
3371
3372 dp = nfp_net_clone_dp(nn);
3373 if (!dp)
3374 return -ENOMEM;
3375
3376 dp->xdp_prog = prog;
3377 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
3378 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
3379 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
3380
3381
3382 return nfp_net_ring_reconfig(nn, dp, extack);
3383}
3384
3385static int
3386nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
3387 struct netlink_ext_ack *extack)
3388{
3389 struct bpf_prog *drv_prog, *offload_prog;
3390 int err;
3391
3392 if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
3393 return -EBUSY;
3394
3395
3396
3397
3398 drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
3399 offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
3400
3401 err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
3402 if (err)
3403 return err;
3404
3405 err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
3406 if (err && flags & XDP_FLAGS_HW_MODE)
3407 return err;
3408
3409 if (nn->xdp_prog)
3410 bpf_prog_put(nn->xdp_prog);
3411 nn->xdp_prog = prog;
3412 nn->xdp_flags = flags;
3413
3414 return 0;
3415}
3416
3417static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
3418{
3419 struct nfp_net *nn = netdev_priv(netdev);
3420
3421 switch (xdp->command) {
3422 case XDP_SETUP_PROG:
3423 case XDP_SETUP_PROG_HW:
3424 return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
3425 xdp->extack);
3426 case XDP_QUERY_PROG:
3427 xdp->prog_attached = !!nn->xdp_prog;
3428 if (nn->dp.bpf_offload_xdp)
3429 xdp->prog_attached = XDP_ATTACHED_HW;
3430 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
3431 xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
3432 return 0;
3433 default:
3434 return nfp_app_bpf(nn->app, nn, xdp);
3435 }
3436}
3437
3438static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
3439{
3440 struct nfp_net *nn = netdev_priv(netdev);
3441 struct sockaddr *saddr = addr;
3442 int err;
3443
3444 err = eth_prepare_mac_addr_change(netdev, addr);
3445 if (err)
3446 return err;
3447
3448 nfp_net_write_mac_addr(nn, saddr->sa_data);
3449
3450 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
3451 if (err)
3452 return err;
3453
3454 eth_commit_mac_addr_change(netdev, addr);
3455
3456 return 0;
3457}
3458
3459const struct net_device_ops nfp_net_netdev_ops = {
3460 .ndo_open = nfp_net_netdev_open,
3461 .ndo_stop = nfp_net_netdev_close,
3462 .ndo_start_xmit = nfp_net_tx,
3463 .ndo_get_stats64 = nfp_net_stat64,
3464 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3465 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3466 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3467 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3468 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
3469 .ndo_get_vf_config = nfp_app_get_vf_config,
3470 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
3471 .ndo_setup_tc = nfp_port_setup_tc,
3472 .ndo_tx_timeout = nfp_net_tx_timeout,
3473 .ndo_set_rx_mode = nfp_net_set_rx_mode,
3474 .ndo_change_mtu = nfp_net_change_mtu,
3475 .ndo_set_mac_address = nfp_net_set_mac_address,
3476 .ndo_set_features = nfp_net_set_features,
3477 .ndo_features_check = nfp_net_features_check,
3478 .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
3479 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3480 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3481 .ndo_bpf = nfp_net_xdp,
3482};
3483
3484
3485
3486
3487
3488void nfp_net_info(struct nfp_net *nn)
3489{
3490 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3491 nn->dp.is_vf ? "VF " : "",
3492 nn->dp.num_tx_rings, nn->max_tx_rings,
3493 nn->dp.num_rx_rings, nn->max_rx_rings);
3494 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3495 nn->fw_ver.resv, nn->fw_ver.class,
3496 nn->fw_ver.major, nn->fw_ver.minor,
3497 nn->max_mtu);
3498 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3499 nn->cap,
3500 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
3501 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
3502 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
3503 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
3504 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
3505 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
3506 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
3507 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
3508 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
3509 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
3510 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
3511 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
3512 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
3513 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
3514 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
3515 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
3516 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
3517 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
3518 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
3519 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
3520 "RXCSUM_COMPLETE " : "",
3521 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
3522 nfp_app_extra_cap(nn->app, nn));
3523}
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3539 unsigned int max_tx_rings,
3540 unsigned int max_rx_rings)
3541{
3542 struct nfp_net *nn;
3543
3544 if (needs_netdev) {
3545 struct net_device *netdev;
3546
3547 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
3548 max_tx_rings, max_rx_rings);
3549 if (!netdev)
3550 return ERR_PTR(-ENOMEM);
3551
3552 SET_NETDEV_DEV(netdev, &pdev->dev);
3553 nn = netdev_priv(netdev);
3554 nn->dp.netdev = netdev;
3555 } else {
3556 nn = vzalloc(sizeof(*nn));
3557 if (!nn)
3558 return ERR_PTR(-ENOMEM);
3559 }
3560
3561 nn->dp.dev = &pdev->dev;
3562 nn->pdev = pdev;
3563
3564 nn->max_tx_rings = max_tx_rings;
3565 nn->max_rx_rings = max_rx_rings;
3566
3567 nn->dp.num_tx_rings = min_t(unsigned int,
3568 max_tx_rings, num_online_cpus());
3569 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3570 netif_get_num_default_rss_queues());
3571
3572 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3573 nn->dp.num_r_vecs = min_t(unsigned int,
3574 nn->dp.num_r_vecs, num_online_cpus());
3575
3576 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3577 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3578
3579 spin_lock_init(&nn->reconfig_lock);
3580 spin_lock_init(&nn->link_status_lock);
3581
3582 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
3583
3584 return nn;
3585}
3586
3587
3588
3589
3590
3591void nfp_net_free(struct nfp_net *nn)
3592{
3593 if (nn->dp.netdev)
3594 free_netdev(nn->dp.netdev);
3595 else
3596 vfree(nn);
3597}
3598
3599
3600
3601
3602
3603
3604
3605unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3606{
3607 switch (nn->rss_hfunc) {
3608 case ETH_RSS_HASH_TOP:
3609 return NFP_NET_CFG_RSS_KEY_SZ;
3610 case ETH_RSS_HASH_XOR:
3611 return 0;
3612 case ETH_RSS_HASH_CRC32:
3613 return 4;
3614 }
3615
3616 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3617 return 0;
3618}
3619
3620
3621
3622
3623
3624static void nfp_net_rss_init(struct nfp_net *nn)
3625{
3626 unsigned long func_bit, rss_cap_hfunc;
3627 u32 reg;
3628
3629
3630 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3631 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3632 if (!rss_cap_hfunc)
3633 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3634 NFP_NET_CFG_RSS_TOEPLITZ);
3635
3636 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3637 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3638 dev_warn(nn->dp.dev,
3639 "Bad RSS config, defaulting to Toeplitz hash\n");
3640 func_bit = ETH_RSS_HASH_TOP_BIT;
3641 }
3642 nn->rss_hfunc = 1 << func_bit;
3643
3644 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3645
3646 nfp_net_rss_init_itbl(nn);
3647
3648
3649 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3650 NFP_NET_CFG_RSS_IPV6_TCP |
3651 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3652 NFP_NET_CFG_RSS_MASK;
3653}
3654
3655
3656
3657
3658
3659static void nfp_net_irqmod_init(struct nfp_net *nn)
3660{
3661 nn->rx_coalesce_usecs = 50;
3662 nn->rx_coalesce_max_frames = 64;
3663 nn->tx_coalesce_usecs = 50;
3664 nn->tx_coalesce_max_frames = 64;
3665}
3666
3667static void nfp_net_netdev_init(struct nfp_net *nn)
3668{
3669 struct net_device *netdev = nn->dp.netdev;
3670
3671 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
3672
3673 netdev->mtu = nn->dp.mtu;
3674
3675
3676
3677
3678
3679
3680
3681 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
3682 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3683
3684 netdev->hw_features = NETIF_F_HIGHDMA;
3685 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
3686 netdev->hw_features |= NETIF_F_RXCSUM;
3687 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3688 }
3689 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3690 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3691 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3692 }
3693 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3694 netdev->hw_features |= NETIF_F_SG;
3695 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3696 }
3697 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
3698 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3699 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3700 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3701 NFP_NET_CFG_CTRL_LSO;
3702 }
3703 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
3704 netdev->hw_features |= NETIF_F_RXHASH;
3705 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
3706 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3707 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3708 netdev->hw_features |= NETIF_F_GSO_GRE |
3709 NETIF_F_GSO_UDP_TUNNEL;
3710 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
3711
3712 netdev->hw_enc_features = netdev->hw_features;
3713 }
3714
3715 netdev->vlan_features = netdev->hw_features;
3716
3717 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3718 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3719 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3720 }
3721 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3722 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3723 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3724 } else {
3725 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3726 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3727 }
3728 }
3729 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
3730 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3731 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3732 }
3733
3734 netdev->features = netdev->hw_features;
3735
3736 if (nfp_app_has_tc(nn->app) && nn->port)
3737 netdev->hw_features |= NETIF_F_HW_TC;
3738
3739
3740 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3741 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3742
3743
3744 netdev->netdev_ops = &nfp_net_netdev_ops;
3745 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3746
3747 SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
3748
3749
3750 netdev->min_mtu = ETH_MIN_MTU;
3751 netdev->max_mtu = nn->max_mtu;
3752
3753 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
3754
3755 netif_carrier_off(netdev);
3756
3757 nfp_net_set_ethtool_ops(netdev);
3758}
3759
3760static int nfp_net_read_caps(struct nfp_net *nn)
3761{
3762
3763 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
3764 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
3765
3766
3767
3768
3769
3770 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
3771 !nn->dp.netdev ||
3772 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
3773 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
3774
3775
3776
3777 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
3778 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
3779
3780
3781 if (nn->fw_ver.major >= 2) {
3782 u32 reg;
3783
3784 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
3785 if (reg > NFP_NET_MAX_PREPEND) {
3786 nn_err(nn, "Invalid rx offset: %d\n", reg);
3787 return -EINVAL;
3788 }
3789 nn->dp.rx_offset = reg;
3790 } else {
3791 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
3792 }
3793
3794
3795 if (!nn->dp.netdev)
3796 nn->cap &= nn->app->type->ctrl_cap_mask;
3797
3798 return 0;
3799}
3800
3801
3802
3803
3804
3805
3806
3807int nfp_net_init(struct nfp_net *nn)
3808{
3809 int err;
3810
3811 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
3812
3813 err = nfp_net_read_caps(nn);
3814 if (err)
3815 return err;
3816
3817
3818 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
3819 nn->dp.mtu = nn->max_mtu;
3820 else
3821 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
3822 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
3823
3824 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
3825 nfp_net_rss_init(nn);
3826 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
3827 NFP_NET_CFG_CTRL_RSS;
3828 }
3829
3830
3831 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
3832 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
3833
3834
3835 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
3836 nfp_net_irqmod_init(nn);
3837 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
3838 }
3839
3840 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
3841 &nn->tlv_caps);
3842 if (err)
3843 return err;
3844
3845 if (nn->dp.netdev)
3846 nfp_net_netdev_init(nn);
3847
3848
3849 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
3850
3851
3852 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
3853 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
3854 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
3855 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
3856 NFP_NET_CFG_UPDATE_GEN);
3857 if (err)
3858 return err;
3859
3860 nfp_net_vecs_init(nn);
3861
3862 if (!nn->dp.netdev)
3863 return 0;
3864 return register_netdev(nn->dp.netdev);
3865}
3866
3867
3868
3869
3870
3871void nfp_net_clean(struct nfp_net *nn)
3872{
3873 if (!nn->dp.netdev)
3874 return;
3875
3876 unregister_netdev(nn->dp.netdev);
3877}
3878