1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <linux/bitfield.h>
45#include <linux/bpf.h>
46#include <linux/bpf_trace.h>
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/fs.h>
51#include <linux/netdevice.h>
52#include <linux/etherdevice.h>
53#include <linux/interrupt.h>
54#include <linux/ip.h>
55#include <linux/ipv6.h>
56#include <linux/page_ref.h>
57#include <linux/pci.h>
58#include <linux/pci_regs.h>
59#include <linux/msi.h>
60#include <linux/ethtool.h>
61#include <linux/log2.h>
62#include <linux/if_vlan.h>
63#include <linux/random.h>
64#include <linux/vmalloc.h>
65#include <linux/ktime.h>
66
67#include <net/switchdev.h>
68#include <net/vxlan.h>
69
70#include "nfpcore/nfp_nsp.h"
71#include "nfp_app.h"
72#include "nfp_net_ctrl.h"
73#include "nfp_net.h"
74#include "nfp_net_sriov.h"
75#include "nfp_port.h"
76
77
78
79
80
81
82void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
83 void __iomem *ctrl_bar)
84{
85 u32 reg;
86
87 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
88 put_unaligned_le32(reg, fw_ver);
89}
90
91static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
92{
93 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
94 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
95 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
96}
97
98static void
99nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
100{
101 dma_sync_single_for_device(dp->dev, dma_addr,
102 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
103 dp->rx_dma_dir);
104}
105
106static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
107{
108 dma_unmap_single_attrs(dp->dev, dma_addr,
109 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
110 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
111}
112
113static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
114 unsigned int len)
115{
116 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
117 len, dp->rx_dma_dir);
118}
119
120
121
122
123
124
125
126static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
127{
128 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
129
130 nn_pci_flush(nn);
131 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
132}
133
134
135static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
136{
137 update |= nn->reconfig_posted;
138 nn->reconfig_posted = 0;
139
140 nfp_net_reconfig_start(nn, update);
141
142 nn->reconfig_timer_active = true;
143 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
144}
145
146static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
147{
148 u32 reg;
149
150 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
151 if (reg == 0)
152 return true;
153 if (reg & NFP_NET_CFG_UPDATE_ERR) {
154 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
155 return true;
156 } else if (last_check) {
157 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
158 return true;
159 }
160
161 return false;
162}
163
164static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
165{
166 bool timed_out = false;
167
168
169 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
170 msleep(1);
171 timed_out = time_is_before_eq_jiffies(deadline);
172 }
173
174 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
175 return -EIO;
176
177 return timed_out ? -EIO : 0;
178}
179
180static void nfp_net_reconfig_timer(struct timer_list *t)
181{
182 struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
183
184 spin_lock_bh(&nn->reconfig_lock);
185
186 nn->reconfig_timer_active = false;
187
188
189 if (nn->reconfig_sync_present)
190 goto done;
191
192
193 nfp_net_reconfig_check_done(nn, true);
194
195 if (nn->reconfig_posted)
196 nfp_net_reconfig_start_async(nn, 0);
197done:
198 spin_unlock_bh(&nn->reconfig_lock);
199}
200
201
202
203
204
205
206
207
208
209
210static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
211{
212 spin_lock_bh(&nn->reconfig_lock);
213
214
215 if (nn->reconfig_sync_present) {
216 nn->reconfig_posted |= update;
217 goto done;
218 }
219
220
221 if (!nn->reconfig_timer_active ||
222 nfp_net_reconfig_check_done(nn, false))
223 nfp_net_reconfig_start_async(nn, update);
224 else
225 nn->reconfig_posted |= update;
226done:
227 spin_unlock_bh(&nn->reconfig_lock);
228}
229
230
231
232
233
234
235
236
237
238
239
240
241int nfp_net_reconfig(struct nfp_net *nn, u32 update)
242{
243 bool cancelled_timer = false;
244 u32 pre_posted_requests;
245 int ret;
246
247 spin_lock_bh(&nn->reconfig_lock);
248
249 nn->reconfig_sync_present = true;
250
251 if (nn->reconfig_timer_active) {
252 del_timer(&nn->reconfig_timer);
253 nn->reconfig_timer_active = false;
254 cancelled_timer = true;
255 }
256 pre_posted_requests = nn->reconfig_posted;
257 nn->reconfig_posted = 0;
258
259 spin_unlock_bh(&nn->reconfig_lock);
260
261 if (cancelled_timer)
262 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
263
264
265 if (pre_posted_requests) {
266 nfp_net_reconfig_start(nn, pre_posted_requests);
267 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
268 }
269
270 nfp_net_reconfig_start(nn, update);
271 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
272
273 spin_lock_bh(&nn->reconfig_lock);
274
275 if (nn->reconfig_posted)
276 nfp_net_reconfig_start_async(nn, 0);
277
278 nn->reconfig_sync_present = false;
279
280 spin_unlock_bh(&nn->reconfig_lock);
281
282 return ret;
283}
284
285
286
287
288
289
290
291
292
293
294static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
295{
296 int ret;
297
298 nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
299
300 ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
301 if (ret) {
302 nn_err(nn, "Mailbox update error\n");
303 return ret;
304 }
305
306 return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
307}
308
309
310
311
312
313
314
315
316
317
318
319static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
320{
321 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
322 nn_pci_flush(nn);
323}
324
325
326
327
328
329
330
331
332
333
334unsigned int
335nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
336 unsigned int min_irqs, unsigned int wanted_irqs)
337{
338 unsigned int i;
339 int got_irqs;
340
341 for (i = 0; i < wanted_irqs; i++)
342 irq_entries[i].entry = i;
343
344 got_irqs = pci_enable_msix_range(pdev, irq_entries,
345 min_irqs, wanted_irqs);
346 if (got_irqs < 0) {
347 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
348 min_irqs, wanted_irqs, got_irqs);
349 return 0;
350 }
351
352 if (got_irqs < wanted_irqs)
353 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
354 wanted_irqs, got_irqs);
355
356 return got_irqs;
357}
358
359
360
361
362
363
364
365
366
367
368void
369nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
370 unsigned int n)
371{
372 struct nfp_net_dp *dp = &nn->dp;
373
374 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
375 dp->num_r_vecs = nn->max_r_vecs;
376
377 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
378
379 if (dp->num_rx_rings > dp->num_r_vecs ||
380 dp->num_tx_rings > dp->num_r_vecs)
381 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
382 dp->num_rx_rings, dp->num_tx_rings,
383 dp->num_r_vecs);
384
385 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
386 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
387 dp->num_stack_tx_rings = dp->num_tx_rings;
388}
389
390
391
392
393
394
395
396void nfp_net_irqs_disable(struct pci_dev *pdev)
397{
398 pci_disable_msix(pdev);
399}
400
401
402
403
404
405
406
407
408static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
409{
410 struct nfp_net_r_vector *r_vec = data;
411
412 napi_schedule_irqoff(&r_vec->napi);
413
414
415
416
417
418 return IRQ_HANDLED;
419}
420
421static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
422{
423 struct nfp_net_r_vector *r_vec = data;
424
425 tasklet_schedule(&r_vec->tasklet);
426
427 return IRQ_HANDLED;
428}
429
430
431
432
433
434static void nfp_net_read_link_status(struct nfp_net *nn)
435{
436 unsigned long flags;
437 bool link_up;
438 u32 sts;
439
440 spin_lock_irqsave(&nn->link_status_lock, flags);
441
442 sts = nn_readl(nn, NFP_NET_CFG_STS);
443 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
444
445 if (nn->link_up == link_up)
446 goto out;
447
448 nn->link_up = link_up;
449 if (nn->port)
450 set_bit(NFP_PORT_CHANGED, &nn->port->flags);
451
452 if (nn->link_up) {
453 netif_carrier_on(nn->dp.netdev);
454 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
455 } else {
456 netif_carrier_off(nn->dp.netdev);
457 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
458 }
459out:
460 spin_unlock_irqrestore(&nn->link_status_lock, flags);
461}
462
463
464
465
466
467
468
469
470static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
471{
472 struct nfp_net *nn = data;
473 struct msix_entry *entry;
474
475 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
476
477 nfp_net_read_link_status(nn);
478
479 nfp_net_irq_unmask(nn, entry->entry);
480
481 return IRQ_HANDLED;
482}
483
484
485
486
487
488
489
490
491static irqreturn_t nfp_net_irq_exn(int irq, void *data)
492{
493 struct nfp_net *nn = data;
494
495 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
496
497 return IRQ_HANDLED;
498}
499
500
501
502
503
504
505
506
507static void
508nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
509 struct nfp_net_r_vector *r_vec, unsigned int idx,
510 bool is_xdp)
511{
512 struct nfp_net *nn = r_vec->nfp_net;
513
514 tx_ring->idx = idx;
515 tx_ring->r_vec = r_vec;
516 tx_ring->is_xdp = is_xdp;
517 u64_stats_init(&tx_ring->r_vec->tx_sync);
518
519 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
520 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
521}
522
523
524
525
526
527
528
529static void
530nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
531 struct nfp_net_r_vector *r_vec, unsigned int idx)
532{
533 struct nfp_net *nn = r_vec->nfp_net;
534
535 rx_ring->idx = idx;
536 rx_ring->r_vec = r_vec;
537 u64_stats_init(&rx_ring->r_vec->rx_sync);
538
539 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
540 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
541}
542
543
544
545
546
547
548
549
550
551
552
553static int
554nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
555 const char *format, char *name, size_t name_sz,
556 unsigned int vector_idx, irq_handler_t handler)
557{
558 struct msix_entry *entry;
559 int err;
560
561 entry = &nn->irq_entries[vector_idx];
562
563 snprintf(name, name_sz, format, nfp_net_name(nn));
564 err = request_irq(entry->vector, handler, 0, name, nn);
565 if (err) {
566 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
567 entry->vector, err);
568 return err;
569 }
570 nn_writeb(nn, ctrl_offset, entry->entry);
571 nfp_net_irq_unmask(nn, entry->entry);
572
573 return 0;
574}
575
576
577
578
579
580
581
582static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
583 unsigned int vector_idx)
584{
585 nn_writeb(nn, ctrl_offset, 0xff);
586 nn_pci_flush(nn);
587 free_irq(nn->irq_entries[vector_idx].vector, nn);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
615{
616 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
617}
618
619
620static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
621{
622 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
623}
624
625static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
626{
627 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
628}
629
630
631
632
633
634
635
636
637
638
639static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
640 struct nfp_net_tx_ring *tx_ring)
641{
642 netif_tx_stop_queue(nd_q);
643
644
645 smp_mb();
646 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
647 netif_tx_start_queue(nd_q);
648}
649
650
651
652
653
654
655
656
657
658
659
660static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
661 struct nfp_net_tx_buf *txbuf,
662 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
663{
664 u32 hdrlen;
665 u16 mss;
666
667 if (!skb_is_gso(skb))
668 return;
669
670 if (!skb->encapsulation) {
671 txd->l3_offset = skb_network_offset(skb);
672 txd->l4_offset = skb_transport_offset(skb);
673 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
674 } else {
675 txd->l3_offset = skb_inner_network_offset(skb);
676 txd->l4_offset = skb_inner_transport_offset(skb);
677 hdrlen = skb_inner_transport_header(skb) - skb->data +
678 inner_tcp_hdrlen(skb);
679 }
680
681 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
682 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
683
684 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
685 txd->lso_hdrlen = hdrlen;
686 txd->mss = cpu_to_le16(mss);
687 txd->flags |= PCIE_DESC_TX_LSO;
688
689 u64_stats_update_begin(&r_vec->tx_sync);
690 r_vec->tx_lso++;
691 u64_stats_update_end(&r_vec->tx_sync);
692}
693
694
695
696
697
698
699
700
701
702
703
704
705static void nfp_net_tx_csum(struct nfp_net_dp *dp,
706 struct nfp_net_r_vector *r_vec,
707 struct nfp_net_tx_buf *txbuf,
708 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
709{
710 struct ipv6hdr *ipv6h;
711 struct iphdr *iph;
712 u8 l4_hdr;
713
714 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
715 return;
716
717 if (skb->ip_summed != CHECKSUM_PARTIAL)
718 return;
719
720 txd->flags |= PCIE_DESC_TX_CSUM;
721 if (skb->encapsulation)
722 txd->flags |= PCIE_DESC_TX_ENCAP;
723
724 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
725 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
726
727 if (iph->version == 4) {
728 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
729 l4_hdr = iph->protocol;
730 } else if (ipv6h->version == 6) {
731 l4_hdr = ipv6h->nexthdr;
732 } else {
733 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
734 return;
735 }
736
737 switch (l4_hdr) {
738 case IPPROTO_TCP:
739 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
740 break;
741 case IPPROTO_UDP:
742 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
743 break;
744 default:
745 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
746 return;
747 }
748
749 u64_stats_update_begin(&r_vec->tx_sync);
750 if (skb->encapsulation)
751 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
752 else
753 r_vec->hw_csum_tx += txbuf->pkt_cnt;
754 u64_stats_update_end(&r_vec->tx_sync);
755}
756
757static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
758{
759 wmb();
760 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
761 tx_ring->wr_ptr_add = 0;
762}
763
764static int nfp_net_prep_port_id(struct sk_buff *skb)
765{
766 struct metadata_dst *md_dst = skb_metadata_dst(skb);
767 unsigned char *data;
768
769 if (likely(!md_dst))
770 return 0;
771 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
772 return 0;
773
774 if (unlikely(skb_cow_head(skb, 8)))
775 return -ENOMEM;
776
777 data = skb_push(skb, 8);
778 put_unaligned_be32(NFP_NET_META_PORTID, data);
779 put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
780
781 return 8;
782}
783
784
785
786
787
788
789
790
791static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
792{
793 struct nfp_net *nn = netdev_priv(netdev);
794 const struct skb_frag_struct *frag;
795 struct nfp_net_tx_desc *txd, txdg;
796 int f, nr_frags, wr_idx, md_bytes;
797 struct nfp_net_tx_ring *tx_ring;
798 struct nfp_net_r_vector *r_vec;
799 struct nfp_net_tx_buf *txbuf;
800 struct netdev_queue *nd_q;
801 struct nfp_net_dp *dp;
802 dma_addr_t dma_addr;
803 unsigned int fsize;
804 u16 qidx;
805
806 dp = &nn->dp;
807 qidx = skb_get_queue_mapping(skb);
808 tx_ring = &dp->tx_rings[qidx];
809 r_vec = tx_ring->r_vec;
810 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
811
812 nr_frags = skb_shinfo(skb)->nr_frags;
813
814 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
815 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
816 qidx, tx_ring->wr_p, tx_ring->rd_p);
817 netif_tx_stop_queue(nd_q);
818 nfp_net_tx_xmit_more_flush(tx_ring);
819 u64_stats_update_begin(&r_vec->tx_sync);
820 r_vec->tx_busy++;
821 u64_stats_update_end(&r_vec->tx_sync);
822 return NETDEV_TX_BUSY;
823 }
824
825 md_bytes = nfp_net_prep_port_id(skb);
826 if (unlikely(md_bytes < 0)) {
827 nfp_net_tx_xmit_more_flush(tx_ring);
828 dev_kfree_skb_any(skb);
829 return NETDEV_TX_OK;
830 }
831
832
833 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
834 DMA_TO_DEVICE);
835 if (dma_mapping_error(dp->dev, dma_addr))
836 goto err_free;
837
838 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
839
840
841 txbuf = &tx_ring->txbufs[wr_idx];
842 txbuf->skb = skb;
843 txbuf->dma_addr = dma_addr;
844 txbuf->fidx = -1;
845 txbuf->pkt_cnt = 1;
846 txbuf->real_len = skb->len;
847
848
849 txd = &tx_ring->txds[wr_idx];
850 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
851 txd->dma_len = cpu_to_le16(skb_headlen(skb));
852 nfp_desc_set_dma_addr(txd, dma_addr);
853 txd->data_len = cpu_to_le16(skb->len);
854
855 txd->flags = 0;
856 txd->mss = 0;
857 txd->lso_hdrlen = 0;
858
859
860 nfp_net_tx_tso(r_vec, txbuf, txd, skb);
861 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
862 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
863 txd->flags |= PCIE_DESC_TX_VLAN;
864 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
865 }
866
867
868 if (nr_frags > 0) {
869
870 txdg = *txd;
871
872 for (f = 0; f < nr_frags; f++) {
873 frag = &skb_shinfo(skb)->frags[f];
874 fsize = skb_frag_size(frag);
875
876 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
877 fsize, DMA_TO_DEVICE);
878 if (dma_mapping_error(dp->dev, dma_addr))
879 goto err_unmap;
880
881 wr_idx = D_IDX(tx_ring, wr_idx + 1);
882 tx_ring->txbufs[wr_idx].skb = skb;
883 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
884 tx_ring->txbufs[wr_idx].fidx = f;
885
886 txd = &tx_ring->txds[wr_idx];
887 *txd = txdg;
888 txd->dma_len = cpu_to_le16(fsize);
889 nfp_desc_set_dma_addr(txd, dma_addr);
890 txd->offset_eop |=
891 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
892 }
893
894 u64_stats_update_begin(&r_vec->tx_sync);
895 r_vec->tx_gather++;
896 u64_stats_update_end(&r_vec->tx_sync);
897 }
898
899 netdev_tx_sent_queue(nd_q, txbuf->real_len);
900
901 skb_tx_timestamp(skb);
902
903 tx_ring->wr_p += nr_frags + 1;
904 if (nfp_net_tx_ring_should_stop(tx_ring))
905 nfp_net_tx_ring_stop(nd_q, tx_ring);
906
907 tx_ring->wr_ptr_add += nr_frags + 1;
908 if (!skb->xmit_more || netif_xmit_stopped(nd_q))
909 nfp_net_tx_xmit_more_flush(tx_ring);
910
911 return NETDEV_TX_OK;
912
913err_unmap:
914 while (--f >= 0) {
915 frag = &skb_shinfo(skb)->frags[f];
916 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
917 skb_frag_size(frag), DMA_TO_DEVICE);
918 tx_ring->txbufs[wr_idx].skb = NULL;
919 tx_ring->txbufs[wr_idx].dma_addr = 0;
920 tx_ring->txbufs[wr_idx].fidx = -2;
921 wr_idx = wr_idx - 1;
922 if (wr_idx < 0)
923 wr_idx += tx_ring->cnt;
924 }
925 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
926 skb_headlen(skb), DMA_TO_DEVICE);
927 tx_ring->txbufs[wr_idx].skb = NULL;
928 tx_ring->txbufs[wr_idx].dma_addr = 0;
929 tx_ring->txbufs[wr_idx].fidx = -2;
930err_free:
931 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
932 nfp_net_tx_xmit_more_flush(tx_ring);
933 u64_stats_update_begin(&r_vec->tx_sync);
934 r_vec->tx_errors++;
935 u64_stats_update_end(&r_vec->tx_sync);
936 dev_kfree_skb_any(skb);
937 return NETDEV_TX_OK;
938}
939
940
941
942
943
944
945
946static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
947{
948 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
949 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
950 const struct skb_frag_struct *frag;
951 struct netdev_queue *nd_q;
952 u32 done_pkts = 0, done_bytes = 0;
953 struct sk_buff *skb;
954 int todo, nr_frags;
955 u32 qcp_rd_p;
956 int fidx;
957 int idx;
958
959 if (tx_ring->wr_p == tx_ring->rd_p)
960 return;
961
962
963 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
964
965 if (qcp_rd_p == tx_ring->qcp_rd_p)
966 return;
967
968 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
969
970 while (todo--) {
971 idx = D_IDX(tx_ring, tx_ring->rd_p++);
972
973 skb = tx_ring->txbufs[idx].skb;
974 if (!skb)
975 continue;
976
977 nr_frags = skb_shinfo(skb)->nr_frags;
978 fidx = tx_ring->txbufs[idx].fidx;
979
980 if (fidx == -1) {
981
982 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
983 skb_headlen(skb), DMA_TO_DEVICE);
984
985 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
986 done_bytes += tx_ring->txbufs[idx].real_len;
987 } else {
988
989 frag = &skb_shinfo(skb)->frags[fidx];
990 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
991 skb_frag_size(frag), DMA_TO_DEVICE);
992 }
993
994
995 if (fidx == nr_frags - 1)
996 dev_consume_skb_any(skb);
997
998 tx_ring->txbufs[idx].dma_addr = 0;
999 tx_ring->txbufs[idx].skb = NULL;
1000 tx_ring->txbufs[idx].fidx = -2;
1001 }
1002
1003 tx_ring->qcp_rd_p = qcp_rd_p;
1004
1005 u64_stats_update_begin(&r_vec->tx_sync);
1006 r_vec->tx_bytes += done_bytes;
1007 r_vec->tx_pkts += done_pkts;
1008 u64_stats_update_end(&r_vec->tx_sync);
1009
1010 if (!dp->netdev)
1011 return;
1012
1013 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1014 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
1015 if (nfp_net_tx_ring_should_wake(tx_ring)) {
1016
1017 smp_mb();
1018
1019 if (unlikely(netif_tx_queue_stopped(nd_q)))
1020 netif_tx_wake_queue(nd_q);
1021 }
1022
1023 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1024 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1025 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1026}
1027
1028static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1029{
1030 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1031 u32 done_pkts = 0, done_bytes = 0;
1032 bool done_all;
1033 int idx, todo;
1034 u32 qcp_rd_p;
1035
1036
1037 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1038
1039 if (qcp_rd_p == tx_ring->qcp_rd_p)
1040 return true;
1041
1042 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1043
1044 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
1045 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
1046
1047 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
1048
1049 done_pkts = todo;
1050 while (todo--) {
1051 idx = D_IDX(tx_ring, tx_ring->rd_p);
1052 tx_ring->rd_p++;
1053
1054 done_bytes += tx_ring->txbufs[idx].real_len;
1055 }
1056
1057 u64_stats_update_begin(&r_vec->tx_sync);
1058 r_vec->tx_bytes += done_bytes;
1059 r_vec->tx_pkts += done_pkts;
1060 u64_stats_update_end(&r_vec->tx_sync);
1061
1062 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1063 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1064 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1065
1066 return done_all;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076static void
1077nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1078{
1079 const struct skb_frag_struct *frag;
1080 struct netdev_queue *nd_q;
1081
1082 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
1083 struct nfp_net_tx_buf *tx_buf;
1084 struct sk_buff *skb;
1085 int idx, nr_frags;
1086
1087 idx = D_IDX(tx_ring, tx_ring->rd_p);
1088 tx_buf = &tx_ring->txbufs[idx];
1089
1090 skb = tx_ring->txbufs[idx].skb;
1091 nr_frags = skb_shinfo(skb)->nr_frags;
1092
1093 if (tx_buf->fidx == -1) {
1094
1095 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1096 skb_headlen(skb), DMA_TO_DEVICE);
1097 } else {
1098
1099 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1100 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1101 skb_frag_size(frag), DMA_TO_DEVICE);
1102 }
1103
1104
1105 if (tx_buf->fidx == nr_frags - 1)
1106 dev_kfree_skb_any(skb);
1107
1108 tx_buf->dma_addr = 0;
1109 tx_buf->skb = NULL;
1110 tx_buf->fidx = -2;
1111
1112 tx_ring->qcp_rd_p++;
1113 tx_ring->rd_p++;
1114 }
1115
1116 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1117 tx_ring->wr_p = 0;
1118 tx_ring->rd_p = 0;
1119 tx_ring->qcp_rd_p = 0;
1120 tx_ring->wr_ptr_add = 0;
1121
1122 if (tx_ring->is_xdp || !dp->netdev)
1123 return;
1124
1125 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1126 netdev_tx_reset_queue(nd_q);
1127}
1128
1129static void nfp_net_tx_timeout(struct net_device *netdev)
1130{
1131 struct nfp_net *nn = netdev_priv(netdev);
1132 int i;
1133
1134 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1135 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1136 continue;
1137 nn_warn(nn, "TX timeout on ring: %d\n", i);
1138 }
1139 nn_warn(nn, "TX watchdog timeout\n");
1140}
1141
1142
1143
1144static unsigned int
1145nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1146{
1147 unsigned int fl_bufsz;
1148
1149 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1150 fl_bufsz += dp->rx_dma_off;
1151 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1152 fl_bufsz += NFP_NET_MAX_PREPEND;
1153 else
1154 fl_bufsz += dp->rx_offset;
1155 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1156
1157 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1158 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1159
1160 return fl_bufsz;
1161}
1162
1163static void
1164nfp_net_free_frag(void *frag, bool xdp)
1165{
1166 if (!xdp)
1167 skb_free_frag(frag);
1168 else
1169 __free_page(virt_to_page(frag));
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1182{
1183 void *frag;
1184
1185 if (!dp->xdp_prog) {
1186 frag = netdev_alloc_frag(dp->fl_bufsz);
1187 } else {
1188 struct page *page;
1189
1190 page = alloc_page(GFP_KERNEL);
1191 frag = page ? page_address(page) : NULL;
1192 }
1193 if (!frag) {
1194 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1195 return NULL;
1196 }
1197
1198 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1199 if (dma_mapping_error(dp->dev, *dma_addr)) {
1200 nfp_net_free_frag(frag, dp->xdp_prog);
1201 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1202 return NULL;
1203 }
1204
1205 return frag;
1206}
1207
1208static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1209{
1210 void *frag;
1211
1212 if (!dp->xdp_prog) {
1213 frag = napi_alloc_frag(dp->fl_bufsz);
1214 if (unlikely(!frag))
1215 return NULL;
1216 } else {
1217 struct page *page;
1218
1219 page = dev_alloc_page();
1220 if (unlikely(!page))
1221 return NULL;
1222 frag = page_address(page);
1223 }
1224
1225 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1226 if (dma_mapping_error(dp->dev, *dma_addr)) {
1227 nfp_net_free_frag(frag, dp->xdp_prog);
1228 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1229 return NULL;
1230 }
1231
1232 return frag;
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1243 struct nfp_net_rx_ring *rx_ring,
1244 void *frag, dma_addr_t dma_addr)
1245{
1246 unsigned int wr_idx;
1247
1248 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1249
1250 nfp_net_dma_sync_dev_rx(dp, dma_addr);
1251
1252
1253 rx_ring->rxbufs[wr_idx].frag = frag;
1254 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1255
1256
1257 rx_ring->rxds[wr_idx].fld.reserved = 0;
1258 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1259 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1260 dma_addr + dp->rx_dma_off);
1261
1262 rx_ring->wr_p++;
1263 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
1264
1265
1266
1267 wmb();
1268 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
1269 }
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1280{
1281 unsigned int wr_idx, last_idx;
1282
1283
1284 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1285 last_idx = rx_ring->cnt - 1;
1286 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1287 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1288 rx_ring->rxbufs[last_idx].dma_addr = 0;
1289 rx_ring->rxbufs[last_idx].frag = NULL;
1290
1291 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1292 rx_ring->wr_p = 0;
1293 rx_ring->rd_p = 0;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305static void
1306nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1307 struct nfp_net_rx_ring *rx_ring)
1308{
1309 unsigned int i;
1310
1311 for (i = 0; i < rx_ring->cnt - 1; i++) {
1312
1313
1314
1315
1316 if (!rx_ring->rxbufs[i].frag)
1317 continue;
1318
1319 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1320 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1321 rx_ring->rxbufs[i].dma_addr = 0;
1322 rx_ring->rxbufs[i].frag = NULL;
1323 }
1324}
1325
1326
1327
1328
1329
1330
1331static int
1332nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1333 struct nfp_net_rx_ring *rx_ring)
1334{
1335 struct nfp_net_rx_buf *rxbufs;
1336 unsigned int i;
1337
1338 rxbufs = rx_ring->rxbufs;
1339
1340 for (i = 0; i < rx_ring->cnt - 1; i++) {
1341 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
1342 if (!rxbufs[i].frag) {
1343 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1344 return -ENOMEM;
1345 }
1346 }
1347
1348 return 0;
1349}
1350
1351
1352
1353
1354
1355
1356static void
1357nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1358 struct nfp_net_rx_ring *rx_ring)
1359{
1360 unsigned int i;
1361
1362 for (i = 0; i < rx_ring->cnt - 1; i++)
1363 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1364 rx_ring->rxbufs[i].dma_addr);
1365}
1366
1367
1368
1369
1370
1371static int nfp_net_rx_csum_has_errors(u16 flags)
1372{
1373 u16 csum_all_checked, csum_all_ok;
1374
1375 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1376 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1377
1378 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1390 struct nfp_net_r_vector *r_vec,
1391 struct nfp_net_rx_desc *rxd,
1392 struct nfp_meta_parsed *meta, struct sk_buff *skb)
1393{
1394 skb_checksum_none_assert(skb);
1395
1396 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1397 return;
1398
1399 if (meta->csum_type) {
1400 skb->ip_summed = meta->csum_type;
1401 skb->csum = meta->csum;
1402 u64_stats_update_begin(&r_vec->rx_sync);
1403 r_vec->hw_csum_rx_ok++;
1404 u64_stats_update_end(&r_vec->rx_sync);
1405 return;
1406 }
1407
1408 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1409 u64_stats_update_begin(&r_vec->rx_sync);
1410 r_vec->hw_csum_rx_error++;
1411 u64_stats_update_end(&r_vec->rx_sync);
1412 return;
1413 }
1414
1415
1416
1417
1418
1419 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1420 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1421 __skb_incr_checksum_unnecessary(skb);
1422 u64_stats_update_begin(&r_vec->rx_sync);
1423 r_vec->hw_csum_rx_ok++;
1424 u64_stats_update_end(&r_vec->rx_sync);
1425 }
1426
1427 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1428 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1429 __skb_incr_checksum_unnecessary(skb);
1430 u64_stats_update_begin(&r_vec->rx_sync);
1431 r_vec->hw_csum_rx_inner_ok++;
1432 u64_stats_update_end(&r_vec->rx_sync);
1433 }
1434}
1435
1436static void
1437nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
1438 unsigned int type, __be32 *hash)
1439{
1440 if (!(netdev->features & NETIF_F_RXHASH))
1441 return;
1442
1443 switch (type) {
1444 case NFP_NET_RSS_IPV4:
1445 case NFP_NET_RSS_IPV6:
1446 case NFP_NET_RSS_IPV6_EX:
1447 meta->hash_type = PKT_HASH_TYPE_L3;
1448 break;
1449 default:
1450 meta->hash_type = PKT_HASH_TYPE_L4;
1451 break;
1452 }
1453
1454 meta->hash = get_unaligned_be32(hash);
1455}
1456
1457static void
1458nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
1459 void *data, struct nfp_net_rx_desc *rxd)
1460{
1461 struct nfp_net_rx_hash *rx_hash = data;
1462
1463 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1464 return;
1465
1466 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
1467 &rx_hash->hash);
1468}
1469
1470static void *
1471nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1472 void *data, int meta_len)
1473{
1474 u32 meta_info;
1475
1476 meta_info = get_unaligned_be32(data);
1477 data += 4;
1478
1479 while (meta_info) {
1480 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1481 case NFP_NET_META_HASH:
1482 meta_info >>= NFP_NET_META_FIELD_SIZE;
1483 nfp_net_set_hash(netdev, meta,
1484 meta_info & NFP_NET_META_FIELD_MASK,
1485 (__be32 *)data);
1486 data += 4;
1487 break;
1488 case NFP_NET_META_MARK:
1489 meta->mark = get_unaligned_be32(data);
1490 data += 4;
1491 break;
1492 case NFP_NET_META_PORTID:
1493 meta->portid = get_unaligned_be32(data);
1494 data += 4;
1495 break;
1496 case NFP_NET_META_CSUM:
1497 meta->csum_type = CHECKSUM_COMPLETE;
1498 meta->csum =
1499 (__force __wsum)__get_unaligned_cpu32(data);
1500 data += 4;
1501 break;
1502 default:
1503 return NULL;
1504 }
1505
1506 meta_info >>= NFP_NET_META_FIELD_SIZE;
1507 }
1508
1509 return data;
1510}
1511
1512static void
1513nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1514 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1515 struct sk_buff *skb)
1516{
1517 u64_stats_update_begin(&r_vec->rx_sync);
1518 r_vec->rx_drops++;
1519
1520
1521
1522 if (skb && rxbuf)
1523 r_vec->rx_replace_buf_alloc_fail++;
1524 u64_stats_update_end(&r_vec->rx_sync);
1525
1526
1527
1528
1529 if (skb && rxbuf && skb->head == rxbuf->frag)
1530 page_ref_inc(virt_to_head_page(rxbuf->frag));
1531 if (rxbuf)
1532 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1533 if (skb)
1534 dev_kfree_skb_any(skb);
1535}
1536
1537static bool
1538nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1539 struct nfp_net_tx_ring *tx_ring,
1540 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1541 unsigned int pkt_len, bool *completed)
1542{
1543 struct nfp_net_tx_buf *txbuf;
1544 struct nfp_net_tx_desc *txd;
1545 int wr_idx;
1546
1547 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1548 if (!*completed) {
1549 nfp_net_xdp_complete(tx_ring);
1550 *completed = true;
1551 }
1552
1553 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1554 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
1555 NULL);
1556 return false;
1557 }
1558 }
1559
1560 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1561
1562
1563 txbuf = &tx_ring->txbufs[wr_idx];
1564
1565 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
1566
1567 txbuf->frag = rxbuf->frag;
1568 txbuf->dma_addr = rxbuf->dma_addr;
1569 txbuf->fidx = -1;
1570 txbuf->pkt_cnt = 1;
1571 txbuf->real_len = pkt_len;
1572
1573 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1574 pkt_len, DMA_BIDIRECTIONAL);
1575
1576
1577 txd = &tx_ring->txds[wr_idx];
1578 txd->offset_eop = PCIE_DESC_TX_EOP;
1579 txd->dma_len = cpu_to_le16(pkt_len);
1580 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1581 txd->data_len = cpu_to_le16(pkt_len);
1582
1583 txd->flags = 0;
1584 txd->mss = 0;
1585 txd->lso_hdrlen = 0;
1586
1587 tx_ring->wr_p++;
1588 tx_ring->wr_ptr_add++;
1589 return true;
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1604{
1605 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1606 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1607 struct nfp_net_tx_ring *tx_ring;
1608 struct bpf_prog *xdp_prog;
1609 bool xdp_tx_cmpl = false;
1610 unsigned int true_bufsz;
1611 struct sk_buff *skb;
1612 int pkts_polled = 0;
1613 int idx;
1614
1615 rcu_read_lock();
1616 xdp_prog = READ_ONCE(dp->xdp_prog);
1617 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1618 tx_ring = r_vec->xdp_ring;
1619
1620 while (pkts_polled < budget) {
1621 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1622 struct nfp_net_rx_buf *rxbuf;
1623 struct nfp_net_rx_desc *rxd;
1624 struct nfp_meta_parsed meta;
1625 struct net_device *netdev;
1626 dma_addr_t new_dma_addr;
1627 u32 meta_len_xdp = 0;
1628 void *new_frag;
1629
1630 idx = D_IDX(rx_ring, rx_ring->rd_p);
1631
1632 rxd = &rx_ring->rxds[idx];
1633 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1634 break;
1635
1636
1637
1638
1639 dma_rmb();
1640
1641 memset(&meta, 0, sizeof(meta));
1642
1643 rx_ring->rd_p++;
1644 pkts_polled++;
1645
1646 rxbuf = &rx_ring->rxbufs[idx];
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1660 data_len = le16_to_cpu(rxd->rxd.data_len);
1661 pkt_len = data_len - meta_len;
1662
1663 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1664 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1665 pkt_off += meta_len;
1666 else
1667 pkt_off += dp->rx_offset;
1668 meta_off = pkt_off - meta_len;
1669
1670
1671 u64_stats_update_begin(&r_vec->rx_sync);
1672 r_vec->rx_pkts++;
1673 r_vec->rx_bytes += pkt_len;
1674 u64_stats_update_end(&r_vec->rx_sync);
1675
1676 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1677 (dp->rx_offset && meta_len > dp->rx_offset))) {
1678 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1679 meta_len);
1680 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1681 continue;
1682 }
1683
1684 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1685 data_len);
1686
1687 if (!dp->chained_metadata_format) {
1688 nfp_net_set_hash_desc(dp->netdev, &meta,
1689 rxbuf->frag + meta_off, rxd);
1690 } else if (meta_len) {
1691 void *end;
1692
1693 end = nfp_net_parse_meta(dp->netdev, &meta,
1694 rxbuf->frag + meta_off,
1695 meta_len);
1696 if (unlikely(end != rxbuf->frag + pkt_off)) {
1697 nn_dp_warn(dp, "invalid RX packet metadata\n");
1698 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1699 NULL);
1700 continue;
1701 }
1702 }
1703
1704 if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
1705 dp->bpf_offload_xdp) && !meta.portid) {
1706 void *orig_data = rxbuf->frag + pkt_off;
1707 unsigned int dma_off;
1708 struct xdp_buff xdp;
1709 int act;
1710
1711 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1712 xdp.data = orig_data;
1713 xdp.data_meta = orig_data;
1714 xdp.data_end = orig_data + pkt_len;
1715
1716 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1717
1718 pkt_len -= xdp.data - orig_data;
1719 pkt_off += xdp.data - orig_data;
1720
1721 switch (act) {
1722 case XDP_PASS:
1723 meta_len_xdp = xdp.data - xdp.data_meta;
1724 break;
1725 case XDP_TX:
1726 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1727 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1728 tx_ring, rxbuf,
1729 dma_off,
1730 pkt_len,
1731 &xdp_tx_cmpl)))
1732 trace_xdp_exception(dp->netdev,
1733 xdp_prog, act);
1734 continue;
1735 default:
1736 bpf_warn_invalid_xdp_action(act);
1737
1738 case XDP_ABORTED:
1739 trace_xdp_exception(dp->netdev, xdp_prog, act);
1740
1741 case XDP_DROP:
1742 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1743 rxbuf->dma_addr);
1744 continue;
1745 }
1746 }
1747
1748 skb = build_skb(rxbuf->frag, true_bufsz);
1749 if (unlikely(!skb)) {
1750 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1751 continue;
1752 }
1753 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1754 if (unlikely(!new_frag)) {
1755 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1756 continue;
1757 }
1758
1759 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1760
1761 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1762
1763 if (likely(!meta.portid)) {
1764 netdev = dp->netdev;
1765 } else {
1766 struct nfp_net *nn;
1767
1768 nn = netdev_priv(dp->netdev);
1769 netdev = nfp_app_repr_get(nn->app, meta.portid);
1770 if (unlikely(!netdev)) {
1771 nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
1772 continue;
1773 }
1774 nfp_repr_inc_rx_stats(netdev, pkt_len);
1775 }
1776
1777 skb_reserve(skb, pkt_off);
1778 skb_put(skb, pkt_len);
1779
1780 skb->mark = meta.mark;
1781 skb_set_hash(skb, meta.hash, meta.hash_type);
1782
1783 skb_record_rx_queue(skb, rx_ring->idx);
1784 skb->protocol = eth_type_trans(skb, netdev);
1785
1786 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
1787
1788 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1789 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1790 le16_to_cpu(rxd->rxd.vlan));
1791 if (meta_len_xdp)
1792 skb_metadata_set(skb, meta_len_xdp);
1793
1794 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1795 }
1796
1797 if (xdp_prog) {
1798 if (tx_ring->wr_ptr_add)
1799 nfp_net_tx_xmit_more_flush(tx_ring);
1800 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1801 !xdp_tx_cmpl)
1802 if (!nfp_net_xdp_complete(tx_ring))
1803 pkts_polled = budget;
1804 }
1805 rcu_read_unlock();
1806
1807 return pkts_polled;
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817static int nfp_net_poll(struct napi_struct *napi, int budget)
1818{
1819 struct nfp_net_r_vector *r_vec =
1820 container_of(napi, struct nfp_net_r_vector, napi);
1821 unsigned int pkts_polled = 0;
1822
1823 if (r_vec->tx_ring)
1824 nfp_net_tx_complete(r_vec->tx_ring);
1825 if (r_vec->rx_ring)
1826 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1827
1828 if (pkts_polled < budget)
1829 if (napi_complete_done(napi, pkts_polled))
1830 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1831
1832 return pkts_polled;
1833}
1834
1835
1836
1837
1838static bool
1839nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1840 struct sk_buff *skb, bool old)
1841{
1842 unsigned int real_len = skb->len, meta_len = 0;
1843 struct nfp_net_tx_ring *tx_ring;
1844 struct nfp_net_tx_buf *txbuf;
1845 struct nfp_net_tx_desc *txd;
1846 struct nfp_net_dp *dp;
1847 dma_addr_t dma_addr;
1848 int wr_idx;
1849
1850 dp = &r_vec->nfp_net->dp;
1851 tx_ring = r_vec->tx_ring;
1852
1853 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1854 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1855 goto err_free;
1856 }
1857
1858 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1859 u64_stats_update_begin(&r_vec->tx_sync);
1860 r_vec->tx_busy++;
1861 u64_stats_update_end(&r_vec->tx_sync);
1862 if (!old)
1863 __skb_queue_tail(&r_vec->queue, skb);
1864 else
1865 __skb_queue_head(&r_vec->queue, skb);
1866 return true;
1867 }
1868
1869 if (nfp_app_ctrl_has_meta(nn->app)) {
1870 if (unlikely(skb_headroom(skb) < 8)) {
1871 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1872 goto err_free;
1873 }
1874 meta_len = 8;
1875 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1876 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
1877 }
1878
1879
1880 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
1881 DMA_TO_DEVICE);
1882 if (dma_mapping_error(dp->dev, dma_addr))
1883 goto err_dma_warn;
1884
1885 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1886
1887
1888 txbuf = &tx_ring->txbufs[wr_idx];
1889 txbuf->skb = skb;
1890 txbuf->dma_addr = dma_addr;
1891 txbuf->fidx = -1;
1892 txbuf->pkt_cnt = 1;
1893 txbuf->real_len = real_len;
1894
1895
1896 txd = &tx_ring->txds[wr_idx];
1897 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
1898 txd->dma_len = cpu_to_le16(skb_headlen(skb));
1899 nfp_desc_set_dma_addr(txd, dma_addr);
1900 txd->data_len = cpu_to_le16(skb->len);
1901
1902 txd->flags = 0;
1903 txd->mss = 0;
1904 txd->lso_hdrlen = 0;
1905
1906 tx_ring->wr_p++;
1907 tx_ring->wr_ptr_add++;
1908 nfp_net_tx_xmit_more_flush(tx_ring);
1909
1910 return false;
1911
1912err_dma_warn:
1913 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
1914err_free:
1915 u64_stats_update_begin(&r_vec->tx_sync);
1916 r_vec->tx_errors++;
1917 u64_stats_update_end(&r_vec->tx_sync);
1918 dev_kfree_skb_any(skb);
1919 return false;
1920}
1921
1922bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
1923{
1924 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
1925 bool ret;
1926
1927 spin_lock_bh(&r_vec->lock);
1928 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
1929 spin_unlock_bh(&r_vec->lock);
1930
1931 return ret;
1932}
1933
1934static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1935{
1936 struct sk_buff *skb;
1937
1938 while ((skb = __skb_dequeue(&r_vec->queue)))
1939 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1940 return;
1941}
1942
1943static bool
1944nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1945{
1946 u32 meta_type, meta_tag;
1947
1948 if (!nfp_app_ctrl_has_meta(nn->app))
1949 return !meta_len;
1950
1951 if (meta_len != 8)
1952 return false;
1953
1954 meta_type = get_unaligned_be32(data);
1955 meta_tag = get_unaligned_be32(data + 4);
1956
1957 return (meta_type == NFP_NET_META_PORTID &&
1958 meta_tag == NFP_META_PORT_ID_CTRL);
1959}
1960
1961static bool
1962nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
1963 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
1964{
1965 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1966 struct nfp_net_rx_buf *rxbuf;
1967 struct nfp_net_rx_desc *rxd;
1968 dma_addr_t new_dma_addr;
1969 struct sk_buff *skb;
1970 void *new_frag;
1971 int idx;
1972
1973 idx = D_IDX(rx_ring, rx_ring->rd_p);
1974
1975 rxd = &rx_ring->rxds[idx];
1976 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1977 return false;
1978
1979
1980
1981
1982 dma_rmb();
1983
1984 rx_ring->rd_p++;
1985
1986 rxbuf = &rx_ring->rxbufs[idx];
1987 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1988 data_len = le16_to_cpu(rxd->rxd.data_len);
1989 pkt_len = data_len - meta_len;
1990
1991 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1992 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1993 pkt_off += meta_len;
1994 else
1995 pkt_off += dp->rx_offset;
1996 meta_off = pkt_off - meta_len;
1997
1998
1999 u64_stats_update_begin(&r_vec->rx_sync);
2000 r_vec->rx_pkts++;
2001 r_vec->rx_bytes += pkt_len;
2002 u64_stats_update_end(&r_vec->rx_sync);
2003
2004 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
2005
2006 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
2007 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
2008 meta_len);
2009 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2010 return true;
2011 }
2012
2013 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
2014 if (unlikely(!skb)) {
2015 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2016 return true;
2017 }
2018 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
2019 if (unlikely(!new_frag)) {
2020 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
2021 return true;
2022 }
2023
2024 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
2025
2026 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
2027
2028 skb_reserve(skb, pkt_off);
2029 skb_put(skb, pkt_len);
2030
2031 nfp_app_ctrl_rx(nn->app, skb);
2032
2033 return true;
2034}
2035
2036static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2037{
2038 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2039 struct nfp_net *nn = r_vec->nfp_net;
2040 struct nfp_net_dp *dp = &nn->dp;
2041
2042 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
2043 continue;
2044}
2045
2046static void nfp_ctrl_poll(unsigned long arg)
2047{
2048 struct nfp_net_r_vector *r_vec = (void *)arg;
2049
2050 spin_lock_bh(&r_vec->lock);
2051 nfp_net_tx_complete(r_vec->tx_ring);
2052 __nfp_ctrl_tx_queued(r_vec);
2053 spin_unlock_bh(&r_vec->lock);
2054
2055 nfp_ctrl_rx(r_vec);
2056
2057 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2058}
2059
2060
2061
2062
2063
2064
2065
2066
2067static void nfp_net_vecs_init(struct nfp_net *nn)
2068{
2069 struct nfp_net_r_vector *r_vec;
2070 int r;
2071
2072 nn->lsc_handler = nfp_net_irq_lsc;
2073 nn->exn_handler = nfp_net_irq_exn;
2074
2075 for (r = 0; r < nn->max_r_vecs; r++) {
2076 struct msix_entry *entry;
2077
2078 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
2079
2080 r_vec = &nn->r_vecs[r];
2081 r_vec->nfp_net = nn;
2082 r_vec->irq_entry = entry->entry;
2083 r_vec->irq_vector = entry->vector;
2084
2085 if (nn->dp.netdev) {
2086 r_vec->handler = nfp_net_irq_rxtx;
2087 } else {
2088 r_vec->handler = nfp_ctrl_irq_rxtx;
2089
2090 __skb_queue_head_init(&r_vec->queue);
2091 spin_lock_init(&r_vec->lock);
2092 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
2093 (unsigned long)r_vec);
2094 tasklet_disable(&r_vec->tasklet);
2095 }
2096
2097 cpumask_set_cpu(r, &r_vec->affinity_mask);
2098 }
2099}
2100
2101
2102
2103
2104
2105static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
2106{
2107 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2108 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2109
2110 kfree(tx_ring->txbufs);
2111
2112 if (tx_ring->txds)
2113 dma_free_coherent(dp->dev, tx_ring->size,
2114 tx_ring->txds, tx_ring->dma);
2115
2116 tx_ring->cnt = 0;
2117 tx_ring->txbufs = NULL;
2118 tx_ring->txds = NULL;
2119 tx_ring->dma = 0;
2120 tx_ring->size = 0;
2121}
2122
2123
2124
2125
2126
2127
2128
2129
2130static int
2131nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2132{
2133 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2134 int sz;
2135
2136 tx_ring->cnt = dp->txd_cnt;
2137
2138 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
2139 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
2140 &tx_ring->dma, GFP_KERNEL);
2141 if (!tx_ring->txds)
2142 goto err_alloc;
2143
2144 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
2145 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
2146 if (!tx_ring->txbufs)
2147 goto err_alloc;
2148
2149 if (!tx_ring->is_xdp && dp->netdev)
2150 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
2151 tx_ring->idx);
2152
2153 return 0;
2154
2155err_alloc:
2156 nfp_net_tx_ring_free(tx_ring);
2157 return -ENOMEM;
2158}
2159
2160static void
2161nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
2162 struct nfp_net_tx_ring *tx_ring)
2163{
2164 unsigned int i;
2165
2166 if (!tx_ring->is_xdp)
2167 return;
2168
2169 for (i = 0; i < tx_ring->cnt; i++) {
2170 if (!tx_ring->txbufs[i].frag)
2171 return;
2172
2173 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
2174 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
2175 }
2176}
2177
2178static int
2179nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
2180 struct nfp_net_tx_ring *tx_ring)
2181{
2182 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
2183 unsigned int i;
2184
2185 if (!tx_ring->is_xdp)
2186 return 0;
2187
2188 for (i = 0; i < tx_ring->cnt; i++) {
2189 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
2190 if (!txbufs[i].frag) {
2191 nfp_net_tx_ring_bufs_free(dp, tx_ring);
2192 return -ENOMEM;
2193 }
2194 }
2195
2196 return 0;
2197}
2198
2199static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2200{
2201 unsigned int r;
2202
2203 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
2204 GFP_KERNEL);
2205 if (!dp->tx_rings)
2206 return -ENOMEM;
2207
2208 for (r = 0; r < dp->num_tx_rings; r++) {
2209 int bias = 0;
2210
2211 if (r >= dp->num_stack_tx_rings)
2212 bias = dp->num_stack_tx_rings;
2213
2214 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
2215 r, bias);
2216
2217 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
2218 goto err_free_prev;
2219
2220 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
2221 goto err_free_ring;
2222 }
2223
2224 return 0;
2225
2226err_free_prev:
2227 while (r--) {
2228 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2229err_free_ring:
2230 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2231 }
2232 kfree(dp->tx_rings);
2233 return -ENOMEM;
2234}
2235
2236static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
2237{
2238 unsigned int r;
2239
2240 for (r = 0; r < dp->num_tx_rings; r++) {
2241 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2242 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2243 }
2244
2245 kfree(dp->tx_rings);
2246}
2247
2248
2249
2250
2251
2252static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2253{
2254 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2255 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2256
2257 kfree(rx_ring->rxbufs);
2258
2259 if (rx_ring->rxds)
2260 dma_free_coherent(dp->dev, rx_ring->size,
2261 rx_ring->rxds, rx_ring->dma);
2262
2263 rx_ring->cnt = 0;
2264 rx_ring->rxbufs = NULL;
2265 rx_ring->rxds = NULL;
2266 rx_ring->dma = 0;
2267 rx_ring->size = 0;
2268}
2269
2270
2271
2272
2273
2274
2275
2276
2277static int
2278nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2279{
2280 int sz;
2281
2282 rx_ring->cnt = dp->rxd_cnt;
2283 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
2284 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
2285 &rx_ring->dma, GFP_KERNEL);
2286 if (!rx_ring->rxds)
2287 goto err_alloc;
2288
2289 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
2290 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
2291 if (!rx_ring->rxbufs)
2292 goto err_alloc;
2293
2294 return 0;
2295
2296err_alloc:
2297 nfp_net_rx_ring_free(rx_ring);
2298 return -ENOMEM;
2299}
2300
2301static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2302{
2303 unsigned int r;
2304
2305 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
2306 GFP_KERNEL);
2307 if (!dp->rx_rings)
2308 return -ENOMEM;
2309
2310 for (r = 0; r < dp->num_rx_rings; r++) {
2311 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
2312
2313 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
2314 goto err_free_prev;
2315
2316 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
2317 goto err_free_ring;
2318 }
2319
2320 return 0;
2321
2322err_free_prev:
2323 while (r--) {
2324 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2325err_free_ring:
2326 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2327 }
2328 kfree(dp->rx_rings);
2329 return -ENOMEM;
2330}
2331
2332static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
2333{
2334 unsigned int r;
2335
2336 for (r = 0; r < dp->num_rx_rings; r++) {
2337 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2338 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2339 }
2340
2341 kfree(dp->rx_rings);
2342}
2343
2344static void
2345nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
2346 struct nfp_net_r_vector *r_vec, int idx)
2347{
2348 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
2349 r_vec->tx_ring =
2350 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
2351
2352 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
2353 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
2354}
2355
2356static int
2357nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2358 int idx)
2359{
2360 int err;
2361
2362
2363 if (nn->dp.netdev)
2364 netif_napi_add(nn->dp.netdev, &r_vec->napi,
2365 nfp_net_poll, NAPI_POLL_WEIGHT);
2366 else
2367 tasklet_enable(&r_vec->tasklet);
2368
2369 snprintf(r_vec->name, sizeof(r_vec->name),
2370 "%s-rxtx-%d", nfp_net_name(nn), idx);
2371 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2372 r_vec);
2373 if (err) {
2374 if (nn->dp.netdev)
2375 netif_napi_del(&r_vec->napi);
2376 else
2377 tasklet_disable(&r_vec->tasklet);
2378
2379 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
2380 return err;
2381 }
2382 disable_irq(r_vec->irq_vector);
2383
2384 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
2385
2386 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
2387 r_vec->irq_entry);
2388
2389 return 0;
2390}
2391
2392static void
2393nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
2394{
2395 irq_set_affinity_hint(r_vec->irq_vector, NULL);
2396 if (nn->dp.netdev)
2397 netif_napi_del(&r_vec->napi);
2398 else
2399 tasklet_disable(&r_vec->tasklet);
2400
2401 free_irq(r_vec->irq_vector, r_vec);
2402}
2403
2404
2405
2406
2407
2408void nfp_net_rss_write_itbl(struct nfp_net *nn)
2409{
2410 int i;
2411
2412 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
2413 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
2414 get_unaligned_le32(nn->rss_itbl + i));
2415}
2416
2417
2418
2419
2420
2421void nfp_net_rss_write_key(struct nfp_net *nn)
2422{
2423 int i;
2424
2425 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2426 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2427 get_unaligned_le32(nn->rss_key + i));
2428}
2429
2430
2431
2432
2433
2434void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2435{
2436 u8 i;
2437 u32 factor;
2438 u32 value;
2439
2440
2441
2442
2443
2444 factor = nn->me_freq_mhz / 16;
2445
2446
2447 value = (nn->rx_coalesce_max_frames << 16) |
2448 (factor * nn->rx_coalesce_usecs);
2449 for (i = 0; i < nn->dp.num_rx_rings; i++)
2450 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2451
2452
2453 value = (nn->tx_coalesce_max_frames << 16) |
2454 (factor * nn->tx_coalesce_usecs);
2455 for (i = 0; i < nn->dp.num_tx_rings; i++)
2456 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2457}
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
2469{
2470 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
2471 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
2472}
2473
2474static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
2475{
2476 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
2477 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
2478 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
2479
2480 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
2481 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
2482 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
2483}
2484
2485
2486
2487
2488
2489static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2490{
2491 u32 new_ctrl, update;
2492 unsigned int r;
2493 int err;
2494
2495 new_ctrl = nn->dp.ctrl;
2496 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2497 update = NFP_NET_CFG_UPDATE_GEN;
2498 update |= NFP_NET_CFG_UPDATE_MSIX;
2499 update |= NFP_NET_CFG_UPDATE_RING;
2500
2501 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2502 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
2503
2504 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2505 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2506
2507 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2508 err = nfp_net_reconfig(nn, update);
2509 if (err)
2510 nn_err(nn, "Could not disable device: %d\n", err);
2511
2512 for (r = 0; r < nn->dp.num_rx_rings; r++)
2513 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2514 for (r = 0; r < nn->dp.num_tx_rings; r++)
2515 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2516 for (r = 0; r < nn->dp.num_r_vecs; r++)
2517 nfp_net_vec_clear_ring_data(nn, r);
2518
2519 nn->dp.ctrl = new_ctrl;
2520}
2521
2522static void
2523nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
2524 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
2525{
2526
2527 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
2528 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
2529 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
2530}
2531
2532static void
2533nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
2534 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
2535{
2536 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
2537 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
2538 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
2539}
2540
2541
2542
2543
2544
2545static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2546{
2547 u32 bufsz, new_ctrl, update = 0;
2548 unsigned int r;
2549 int err;
2550
2551 new_ctrl = nn->dp.ctrl;
2552
2553 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
2554 nfp_net_rss_write_key(nn);
2555 nfp_net_rss_write_itbl(nn);
2556 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
2557 update |= NFP_NET_CFG_UPDATE_RSS;
2558 }
2559
2560 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
2561 nfp_net_coalesce_write_cfg(nn);
2562 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2563 }
2564
2565 for (r = 0; r < nn->dp.num_tx_rings; r++)
2566 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2567 for (r = 0; r < nn->dp.num_rx_rings; r++)
2568 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2569
2570 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2571 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2572
2573 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2574 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2575
2576 if (nn->dp.netdev)
2577 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2578
2579 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
2580
2581 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
2582 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
2583
2584
2585 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2586 update |= NFP_NET_CFG_UPDATE_GEN;
2587 update |= NFP_NET_CFG_UPDATE_MSIX;
2588 update |= NFP_NET_CFG_UPDATE_RING;
2589 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2590 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2591
2592 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2593 err = nfp_net_reconfig(nn, update);
2594 if (err) {
2595 nfp_net_clear_config_and_disable(nn);
2596 return err;
2597 }
2598
2599 nn->dp.ctrl = new_ctrl;
2600
2601 for (r = 0; r < nn->dp.num_rx_rings; r++)
2602 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2603
2604
2605
2606
2607 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2608 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2609 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2610 udp_tunnel_get_rx_info(nn->dp.netdev);
2611 }
2612
2613 return 0;
2614}
2615
2616
2617
2618
2619
2620static void nfp_net_close_stack(struct nfp_net *nn)
2621{
2622 unsigned int r;
2623
2624 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2625 netif_carrier_off(nn->dp.netdev);
2626 nn->link_up = false;
2627
2628 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2629 disable_irq(nn->r_vecs[r].irq_vector);
2630 napi_disable(&nn->r_vecs[r].napi);
2631 }
2632
2633 netif_tx_disable(nn->dp.netdev);
2634}
2635
2636
2637
2638
2639
2640static void nfp_net_close_free_all(struct nfp_net *nn)
2641{
2642 unsigned int r;
2643
2644 nfp_net_tx_rings_free(&nn->dp);
2645 nfp_net_rx_rings_free(&nn->dp);
2646
2647 for (r = 0; r < nn->dp.num_r_vecs; r++)
2648 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2649
2650 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2651 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2652}
2653
2654
2655
2656
2657
2658static int nfp_net_netdev_close(struct net_device *netdev)
2659{
2660 struct nfp_net *nn = netdev_priv(netdev);
2661
2662
2663
2664 nfp_net_close_stack(nn);
2665
2666
2667
2668 nfp_net_clear_config_and_disable(nn);
2669 nfp_port_configure(netdev, false);
2670
2671
2672
2673 nfp_net_close_free_all(nn);
2674
2675 nn_dbg(nn, "%s down", netdev->name);
2676 return 0;
2677}
2678
2679void nfp_ctrl_close(struct nfp_net *nn)
2680{
2681 int r;
2682
2683 rtnl_lock();
2684
2685 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2686 disable_irq(nn->r_vecs[r].irq_vector);
2687 tasklet_disable(&nn->r_vecs[r].tasklet);
2688 }
2689
2690 nfp_net_clear_config_and_disable(nn);
2691
2692 nfp_net_close_free_all(nn);
2693
2694 rtnl_unlock();
2695}
2696
2697
2698
2699
2700
2701static void nfp_net_open_stack(struct nfp_net *nn)
2702{
2703 unsigned int r;
2704
2705 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2706 napi_enable(&nn->r_vecs[r].napi);
2707 enable_irq(nn->r_vecs[r].irq_vector);
2708 }
2709
2710 netif_tx_wake_all_queues(nn->dp.netdev);
2711
2712 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2713 nfp_net_read_link_status(nn);
2714}
2715
2716static int nfp_net_open_alloc_all(struct nfp_net *nn)
2717{
2718 int err, r;
2719
2720 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2721 nn->exn_name, sizeof(nn->exn_name),
2722 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2723 if (err)
2724 return err;
2725 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2726 nn->lsc_name, sizeof(nn->lsc_name),
2727 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2728 if (err)
2729 goto err_free_exn;
2730 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2731
2732 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2733 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2734 if (err)
2735 goto err_cleanup_vec_p;
2736 }
2737
2738 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2739 if (err)
2740 goto err_cleanup_vec;
2741
2742 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
2743 if (err)
2744 goto err_free_rx_rings;
2745
2746 for (r = 0; r < nn->max_r_vecs; r++)
2747 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2748
2749 return 0;
2750
2751err_free_rx_rings:
2752 nfp_net_rx_rings_free(&nn->dp);
2753err_cleanup_vec:
2754 r = nn->dp.num_r_vecs;
2755err_cleanup_vec_p:
2756 while (r--)
2757 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2758 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2759err_free_exn:
2760 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2761 return err;
2762}
2763
2764static int nfp_net_netdev_open(struct net_device *netdev)
2765{
2766 struct nfp_net *nn = netdev_priv(netdev);
2767 int err;
2768
2769
2770
2771
2772
2773
2774 err = nfp_net_open_alloc_all(nn);
2775 if (err)
2776 return err;
2777
2778 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
2779 if (err)
2780 goto err_free_all;
2781
2782 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
2783 if (err)
2784 goto err_free_all;
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794 err = nfp_port_configure(netdev, true);
2795 if (err)
2796 goto err_free_all;
2797
2798 err = nfp_net_set_config_and_enable(nn);
2799 if (err)
2800 goto err_port_disable;
2801
2802
2803
2804
2805
2806
2807
2808 nfp_net_open_stack(nn);
2809
2810 return 0;
2811
2812err_port_disable:
2813 nfp_port_configure(netdev, false);
2814err_free_all:
2815 nfp_net_close_free_all(nn);
2816 return err;
2817}
2818
2819int nfp_ctrl_open(struct nfp_net *nn)
2820{
2821 int err, r;
2822
2823
2824 rtnl_lock();
2825
2826 err = nfp_net_open_alloc_all(nn);
2827 if (err)
2828 goto err_unlock;
2829
2830 err = nfp_net_set_config_and_enable(nn);
2831 if (err)
2832 goto err_free_all;
2833
2834 for (r = 0; r < nn->dp.num_r_vecs; r++)
2835 enable_irq(nn->r_vecs[r].irq_vector);
2836
2837 rtnl_unlock();
2838
2839 return 0;
2840
2841err_free_all:
2842 nfp_net_close_free_all(nn);
2843err_unlock:
2844 rtnl_unlock();
2845 return err;
2846}
2847
2848static void nfp_net_set_rx_mode(struct net_device *netdev)
2849{
2850 struct nfp_net *nn = netdev_priv(netdev);
2851 u32 new_ctrl;
2852
2853 new_ctrl = nn->dp.ctrl;
2854
2855 if (netdev->flags & IFF_PROMISC) {
2856 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2857 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2858 else
2859 nn_warn(nn, "FW does not support promiscuous mode\n");
2860 } else {
2861 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2862 }
2863
2864 if (new_ctrl == nn->dp.ctrl)
2865 return;
2866
2867 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2868 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2869
2870 nn->dp.ctrl = new_ctrl;
2871}
2872
2873static void nfp_net_rss_init_itbl(struct nfp_net *nn)
2874{
2875 int i;
2876
2877 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2878 nn->rss_itbl[i] =
2879 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
2880}
2881
2882static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
2883{
2884 struct nfp_net_dp new_dp = *dp;
2885
2886 *dp = nn->dp;
2887 nn->dp = new_dp;
2888
2889 nn->dp.netdev->mtu = new_dp.mtu;
2890
2891 if (!netif_is_rxfh_configured(nn->dp.netdev))
2892 nfp_net_rss_init_itbl(nn);
2893}
2894
2895static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
2896{
2897 unsigned int r;
2898 int err;
2899
2900 nfp_net_dp_swap(nn, dp);
2901
2902 for (r = 0; r < nn->max_r_vecs; r++)
2903 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2904
2905 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
2906 if (err)
2907 return err;
2908
2909 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
2910 err = netif_set_real_num_tx_queues(nn->dp.netdev,
2911 nn->dp.num_stack_tx_rings);
2912 if (err)
2913 return err;
2914 }
2915
2916 return nfp_net_set_config_and_enable(nn);
2917}
2918
2919struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
2920{
2921 struct nfp_net_dp *new;
2922
2923 new = kmalloc(sizeof(*new), GFP_KERNEL);
2924 if (!new)
2925 return NULL;
2926
2927 *new = nn->dp;
2928
2929
2930 new->fl_bufsz = 0;
2931 new->tx_rings = NULL;
2932 new->rx_rings = NULL;
2933 new->num_r_vecs = 0;
2934 new->num_stack_tx_rings = 0;
2935
2936 return new;
2937}
2938
2939static int
2940nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
2941 struct netlink_ext_ack *extack)
2942{
2943
2944 if (!dp->xdp_prog)
2945 return 0;
2946 if (dp->fl_bufsz > PAGE_SIZE) {
2947 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
2948 return -EINVAL;
2949 }
2950 if (dp->num_tx_rings > nn->max_tx_rings) {
2951 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
2952 return -EINVAL;
2953 }
2954
2955 return 0;
2956}
2957
2958int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
2959 struct netlink_ext_ack *extack)
2960{
2961 int r, err;
2962
2963 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
2964
2965 dp->num_stack_tx_rings = dp->num_tx_rings;
2966 if (dp->xdp_prog)
2967 dp->num_stack_tx_rings -= dp->num_rx_rings;
2968
2969 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
2970
2971 err = nfp_net_check_config(nn, dp, extack);
2972 if (err)
2973 goto exit_free_dp;
2974
2975 if (!netif_running(dp->netdev)) {
2976 nfp_net_dp_swap(nn, dp);
2977 err = 0;
2978 goto exit_free_dp;
2979 }
2980
2981
2982 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
2983 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2984 if (err) {
2985 dp->num_r_vecs = r;
2986 goto err_cleanup_vecs;
2987 }
2988 }
2989
2990 err = nfp_net_rx_rings_prepare(nn, dp);
2991 if (err)
2992 goto err_cleanup_vecs;
2993
2994 err = nfp_net_tx_rings_prepare(nn, dp);
2995 if (err)
2996 goto err_free_rx;
2997
2998
2999 nfp_net_close_stack(nn);
3000 nfp_net_clear_config_and_disable(nn);
3001
3002 err = nfp_net_dp_swap_enable(nn, dp);
3003 if (err) {
3004 int err2;
3005
3006 nfp_net_clear_config_and_disable(nn);
3007
3008
3009 err2 = nfp_net_dp_swap_enable(nn, dp);
3010 if (err2)
3011 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
3012 err, err2);
3013 }
3014 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3015 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3016
3017 nfp_net_rx_rings_free(dp);
3018 nfp_net_tx_rings_free(dp);
3019
3020 nfp_net_open_stack(nn);
3021exit_free_dp:
3022 kfree(dp);
3023
3024 return err;
3025
3026err_free_rx:
3027 nfp_net_rx_rings_free(dp);
3028err_cleanup_vecs:
3029 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3030 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3031 kfree(dp);
3032 return err;
3033}
3034
3035static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
3036{
3037 struct nfp_net *nn = netdev_priv(netdev);
3038 struct nfp_net_dp *dp;
3039
3040 dp = nfp_net_clone_dp(nn);
3041 if (!dp)
3042 return -ENOMEM;
3043
3044 dp->mtu = new_mtu;
3045
3046 return nfp_net_ring_reconfig(nn, dp, NULL);
3047}
3048
3049static int
3050nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3051{
3052 struct nfp_net *nn = netdev_priv(netdev);
3053
3054
3055
3056
3057 if (!vid)
3058 return 0;
3059
3060 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
3061 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
3062
3063 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
3064}
3065
3066static int
3067nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3068{
3069 struct nfp_net *nn = netdev_priv(netdev);
3070
3071
3072
3073
3074 if (!vid)
3075 return 0;
3076
3077 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
3078 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
3079
3080 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
3081}
3082
3083static void nfp_net_stat64(struct net_device *netdev,
3084 struct rtnl_link_stats64 *stats)
3085{
3086 struct nfp_net *nn = netdev_priv(netdev);
3087 int r;
3088
3089 for (r = 0; r < nn->dp.num_r_vecs; r++) {
3090 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
3091 u64 data[3];
3092 unsigned int start;
3093
3094 do {
3095 start = u64_stats_fetch_begin(&r_vec->rx_sync);
3096 data[0] = r_vec->rx_pkts;
3097 data[1] = r_vec->rx_bytes;
3098 data[2] = r_vec->rx_drops;
3099 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
3100 stats->rx_packets += data[0];
3101 stats->rx_bytes += data[1];
3102 stats->rx_dropped += data[2];
3103
3104 do {
3105 start = u64_stats_fetch_begin(&r_vec->tx_sync);
3106 data[0] = r_vec->tx_pkts;
3107 data[1] = r_vec->tx_bytes;
3108 data[2] = r_vec->tx_errors;
3109 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
3110 stats->tx_packets += data[0];
3111 stats->tx_bytes += data[1];
3112 stats->tx_errors += data[2];
3113 }
3114}
3115
3116static int nfp_net_set_features(struct net_device *netdev,
3117 netdev_features_t features)
3118{
3119 netdev_features_t changed = netdev->features ^ features;
3120 struct nfp_net *nn = netdev_priv(netdev);
3121 u32 new_ctrl;
3122 int err;
3123
3124
3125
3126 new_ctrl = nn->dp.ctrl;
3127
3128 if (changed & NETIF_F_RXCSUM) {
3129 if (features & NETIF_F_RXCSUM)
3130 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3131 else
3132 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
3133 }
3134
3135 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3136 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
3137 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3138 else
3139 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
3140 }
3141
3142 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
3143 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
3144 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3145 NFP_NET_CFG_CTRL_LSO;
3146 else
3147 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3148 }
3149
3150 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
3151 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3152 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3153 else
3154 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
3155 }
3156
3157 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
3158 if (features & NETIF_F_HW_VLAN_CTAG_TX)
3159 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3160 else
3161 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
3162 }
3163
3164 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3165 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3166 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3167 else
3168 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
3169 }
3170
3171 if (changed & NETIF_F_SG) {
3172 if (features & NETIF_F_SG)
3173 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
3174 else
3175 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
3176 }
3177
3178 if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) {
3179 nn_err(nn, "Cannot disable HW TC offload while in use\n");
3180 return -EBUSY;
3181 }
3182
3183 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
3184 netdev->features, features, changed);
3185
3186 if (new_ctrl == nn->dp.ctrl)
3187 return 0;
3188
3189 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
3190 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
3191 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
3192 if (err)
3193 return err;
3194
3195 nn->dp.ctrl = new_ctrl;
3196
3197 return 0;
3198}
3199
3200static netdev_features_t
3201nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
3202 netdev_features_t features)
3203{
3204 u8 l4_hdr;
3205
3206
3207 features &= vlan_features_check(skb, features);
3208
3209 if (!skb->encapsulation)
3210 return features;
3211
3212
3213 if (skb_is_gso(skb)) {
3214 u32 hdrlen;
3215
3216 hdrlen = skb_inner_transport_header(skb) - skb->data +
3217 inner_tcp_hdrlen(skb);
3218
3219 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
3220 features &= ~NETIF_F_GSO_MASK;
3221 }
3222
3223
3224 switch (vlan_get_protocol(skb)) {
3225 case htons(ETH_P_IP):
3226 l4_hdr = ip_hdr(skb)->protocol;
3227 break;
3228 case htons(ETH_P_IPV6):
3229 l4_hdr = ipv6_hdr(skb)->nexthdr;
3230 break;
3231 default:
3232 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3233 }
3234
3235 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
3236 skb->inner_protocol != htons(ETH_P_TEB) ||
3237 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
3238 (l4_hdr == IPPROTO_UDP &&
3239 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
3240 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
3241 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3242
3243 return features;
3244}
3245
3246
3247
3248
3249
3250
3251
3252static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
3253{
3254 int i;
3255
3256 nn->vxlan_ports[idx] = port;
3257
3258 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
3259 return;
3260
3261 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3262 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
3263 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
3264 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
3265 be16_to_cpu(nn->vxlan_ports[i]));
3266
3267 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
3268}
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
3280{
3281 int i, free_idx = -ENOSPC;
3282
3283 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
3284 if (nn->vxlan_ports[i] == port)
3285 return i;
3286 if (!nn->vxlan_usecnt[i])
3287 free_idx = i;
3288 }
3289
3290 return free_idx;
3291}
3292
3293static void nfp_net_add_vxlan_port(struct net_device *netdev,
3294 struct udp_tunnel_info *ti)
3295{
3296 struct nfp_net *nn = netdev_priv(netdev);
3297 int idx;
3298
3299 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3300 return;
3301
3302 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3303 if (idx == -ENOSPC)
3304 return;
3305
3306 if (!nn->vxlan_usecnt[idx]++)
3307 nfp_net_set_vxlan_port(nn, idx, ti->port);
3308}
3309
3310static void nfp_net_del_vxlan_port(struct net_device *netdev,
3311 struct udp_tunnel_info *ti)
3312{
3313 struct nfp_net *nn = netdev_priv(netdev);
3314 int idx;
3315
3316 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3317 return;
3318
3319 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3320 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
3321 return;
3322
3323 if (!--nn->vxlan_usecnt[idx])
3324 nfp_net_set_vxlan_port(nn, idx, 0);
3325}
3326
3327static int
3328nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
3329 struct netlink_ext_ack *extack)
3330{
3331 struct nfp_net_dp *dp;
3332
3333 if (!prog == !nn->dp.xdp_prog) {
3334 WRITE_ONCE(nn->dp.xdp_prog, prog);
3335 return 0;
3336 }
3337
3338 dp = nfp_net_clone_dp(nn);
3339 if (!dp)
3340 return -ENOMEM;
3341
3342 dp->xdp_prog = prog;
3343 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
3344 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
3345 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
3346
3347
3348 return nfp_net_ring_reconfig(nn, dp, extack);
3349}
3350
3351static int
3352nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
3353 struct netlink_ext_ack *extack)
3354{
3355 struct bpf_prog *drv_prog, *offload_prog;
3356 int err;
3357
3358 if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
3359 return -EBUSY;
3360
3361
3362
3363
3364 drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
3365 offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
3366
3367 err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
3368 if (err)
3369 return err;
3370
3371 err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
3372 if (err && flags & XDP_FLAGS_HW_MODE)
3373 return err;
3374
3375 if (nn->xdp_prog)
3376 bpf_prog_put(nn->xdp_prog);
3377 nn->xdp_prog = prog;
3378 nn->xdp_flags = flags;
3379
3380 return 0;
3381}
3382
3383static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
3384{
3385 struct nfp_net *nn = netdev_priv(netdev);
3386
3387 switch (xdp->command) {
3388 case XDP_SETUP_PROG:
3389 case XDP_SETUP_PROG_HW:
3390 return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
3391 xdp->extack);
3392 case XDP_QUERY_PROG:
3393 xdp->prog_attached = !!nn->xdp_prog;
3394 if (nn->dp.bpf_offload_xdp)
3395 xdp->prog_attached = XDP_ATTACHED_HW;
3396 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
3397 return 0;
3398 case BPF_OFFLOAD_VERIFIER_PREP:
3399 return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
3400 case BPF_OFFLOAD_TRANSLATE:
3401 return nfp_app_bpf_translate(nn->app, nn,
3402 xdp->offload.prog);
3403 case BPF_OFFLOAD_DESTROY:
3404 return nfp_app_bpf_destroy(nn->app, nn,
3405 xdp->offload.prog);
3406 default:
3407 return -EINVAL;
3408 }
3409}
3410
3411static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
3412{
3413 struct nfp_net *nn = netdev_priv(netdev);
3414 struct sockaddr *saddr = addr;
3415 int err;
3416
3417 err = eth_prepare_mac_addr_change(netdev, addr);
3418 if (err)
3419 return err;
3420
3421 nfp_net_write_mac_addr(nn, saddr->sa_data);
3422
3423 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
3424 if (err)
3425 return err;
3426
3427 eth_commit_mac_addr_change(netdev, addr);
3428
3429 return 0;
3430}
3431
3432const struct net_device_ops nfp_net_netdev_ops = {
3433 .ndo_open = nfp_net_netdev_open,
3434 .ndo_stop = nfp_net_netdev_close,
3435 .ndo_start_xmit = nfp_net_tx,
3436 .ndo_get_stats64 = nfp_net_stat64,
3437 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3438 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3439 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3440 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3441 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
3442 .ndo_get_vf_config = nfp_app_get_vf_config,
3443 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
3444 .ndo_setup_tc = nfp_port_setup_tc,
3445 .ndo_tx_timeout = nfp_net_tx_timeout,
3446 .ndo_set_rx_mode = nfp_net_set_rx_mode,
3447 .ndo_change_mtu = nfp_net_change_mtu,
3448 .ndo_set_mac_address = nfp_net_set_mac_address,
3449 .ndo_set_features = nfp_net_set_features,
3450 .ndo_features_check = nfp_net_features_check,
3451 .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
3452 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3453 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3454 .ndo_bpf = nfp_net_xdp,
3455};
3456
3457
3458
3459
3460
3461void nfp_net_info(struct nfp_net *nn)
3462{
3463 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3464 nn->dp.is_vf ? "VF " : "",
3465 nn->dp.num_tx_rings, nn->max_tx_rings,
3466 nn->dp.num_rx_rings, nn->max_rx_rings);
3467 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3468 nn->fw_ver.resv, nn->fw_ver.class,
3469 nn->fw_ver.major, nn->fw_ver.minor,
3470 nn->max_mtu);
3471 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3472 nn->cap,
3473 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
3474 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
3475 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
3476 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
3477 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
3478 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
3479 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
3480 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
3481 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
3482 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
3483 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
3484 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
3485 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
3486 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
3487 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
3488 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
3489 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
3490 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
3491 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
3492 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
3493 "RXCSUM_COMPLETE " : "",
3494 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
3495 nfp_app_extra_cap(nn->app, nn));
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3512 unsigned int max_tx_rings,
3513 unsigned int max_rx_rings)
3514{
3515 struct nfp_net *nn;
3516
3517 if (needs_netdev) {
3518 struct net_device *netdev;
3519
3520 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
3521 max_tx_rings, max_rx_rings);
3522 if (!netdev)
3523 return ERR_PTR(-ENOMEM);
3524
3525 SET_NETDEV_DEV(netdev, &pdev->dev);
3526 nn = netdev_priv(netdev);
3527 nn->dp.netdev = netdev;
3528 } else {
3529 nn = vzalloc(sizeof(*nn));
3530 if (!nn)
3531 return ERR_PTR(-ENOMEM);
3532 }
3533
3534 nn->dp.dev = &pdev->dev;
3535 nn->pdev = pdev;
3536
3537 nn->max_tx_rings = max_tx_rings;
3538 nn->max_rx_rings = max_rx_rings;
3539
3540 nn->dp.num_tx_rings = min_t(unsigned int,
3541 max_tx_rings, num_online_cpus());
3542 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3543 netif_get_num_default_rss_queues());
3544
3545 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3546 nn->dp.num_r_vecs = min_t(unsigned int,
3547 nn->dp.num_r_vecs, num_online_cpus());
3548
3549 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3550 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3551
3552 spin_lock_init(&nn->reconfig_lock);
3553 spin_lock_init(&nn->link_status_lock);
3554
3555 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
3556
3557 return nn;
3558}
3559
3560
3561
3562
3563
3564void nfp_net_free(struct nfp_net *nn)
3565{
3566 if (nn->xdp_prog)
3567 bpf_prog_put(nn->xdp_prog);
3568
3569 if (nn->dp.netdev)
3570 free_netdev(nn->dp.netdev);
3571 else
3572 vfree(nn);
3573}
3574
3575
3576
3577
3578
3579
3580
3581unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3582{
3583 switch (nn->rss_hfunc) {
3584 case ETH_RSS_HASH_TOP:
3585 return NFP_NET_CFG_RSS_KEY_SZ;
3586 case ETH_RSS_HASH_XOR:
3587 return 0;
3588 case ETH_RSS_HASH_CRC32:
3589 return 4;
3590 }
3591
3592 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3593 return 0;
3594}
3595
3596
3597
3598
3599
3600static void nfp_net_rss_init(struct nfp_net *nn)
3601{
3602 unsigned long func_bit, rss_cap_hfunc;
3603 u32 reg;
3604
3605
3606 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3607 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3608 if (!rss_cap_hfunc)
3609 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3610 NFP_NET_CFG_RSS_TOEPLITZ);
3611
3612 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3613 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3614 dev_warn(nn->dp.dev,
3615 "Bad RSS config, defaulting to Toeplitz hash\n");
3616 func_bit = ETH_RSS_HASH_TOP_BIT;
3617 }
3618 nn->rss_hfunc = 1 << func_bit;
3619
3620 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3621
3622 nfp_net_rss_init_itbl(nn);
3623
3624
3625 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3626 NFP_NET_CFG_RSS_IPV6_TCP |
3627 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3628 NFP_NET_CFG_RSS_MASK;
3629}
3630
3631
3632
3633
3634
3635static void nfp_net_irqmod_init(struct nfp_net *nn)
3636{
3637 nn->rx_coalesce_usecs = 50;
3638 nn->rx_coalesce_max_frames = 64;
3639 nn->tx_coalesce_usecs = 50;
3640 nn->tx_coalesce_max_frames = 64;
3641}
3642
3643static void nfp_net_netdev_init(struct nfp_net *nn)
3644{
3645 struct net_device *netdev = nn->dp.netdev;
3646
3647 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
3648
3649 netdev->mtu = nn->dp.mtu;
3650
3651
3652
3653
3654
3655
3656
3657 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
3658 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3659
3660 netdev->hw_features = NETIF_F_HIGHDMA;
3661 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
3662 netdev->hw_features |= NETIF_F_RXCSUM;
3663 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3664 }
3665 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3666 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3667 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3668 }
3669 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3670 netdev->hw_features |= NETIF_F_SG;
3671 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3672 }
3673 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
3674 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3675 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3676 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3677 NFP_NET_CFG_CTRL_LSO;
3678 }
3679 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
3680 netdev->hw_features |= NETIF_F_RXHASH;
3681 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
3682 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3683 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3684 netdev->hw_features |= NETIF_F_GSO_GRE |
3685 NETIF_F_GSO_UDP_TUNNEL;
3686 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
3687
3688 netdev->hw_enc_features = netdev->hw_features;
3689 }
3690
3691 netdev->vlan_features = netdev->hw_features;
3692
3693 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3694 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3695 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3696 }
3697 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3698 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3699 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3700 } else {
3701 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3702 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3703 }
3704 }
3705 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
3706 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3707 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3708 }
3709
3710 netdev->features = netdev->hw_features;
3711
3712 if (nfp_app_has_tc(nn->app))
3713 netdev->hw_features |= NETIF_F_HW_TC;
3714
3715
3716 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3717 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3718
3719
3720 netdev->netdev_ops = &nfp_net_netdev_ops;
3721 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3722
3723 SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
3724
3725
3726 netdev->min_mtu = ETH_MIN_MTU;
3727 netdev->max_mtu = nn->max_mtu;
3728
3729 netif_carrier_off(netdev);
3730
3731 nfp_net_set_ethtool_ops(netdev);
3732}
3733
3734
3735
3736
3737
3738
3739
3740int nfp_net_init(struct nfp_net *nn)
3741{
3742 int err;
3743
3744 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
3745
3746
3747 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
3748 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
3749
3750
3751
3752
3753
3754 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
3755 !nn->dp.netdev ||
3756 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
3757 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
3758
3759
3760
3761 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
3762 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
3763
3764
3765 if (nn->fw_ver.major >= 2) {
3766 u32 reg;
3767
3768 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
3769 if (reg > NFP_NET_MAX_PREPEND) {
3770 nn_err(nn, "Invalid rx offset: %d\n", reg);
3771 return -EINVAL;
3772 }
3773 nn->dp.rx_offset = reg;
3774 } else {
3775 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
3776 }
3777
3778
3779 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
3780 nn->dp.mtu = nn->max_mtu;
3781 else
3782 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
3783 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
3784
3785 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
3786 nfp_net_rss_init(nn);
3787 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
3788 NFP_NET_CFG_CTRL_RSS;
3789 }
3790
3791
3792 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
3793 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
3794 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
3795 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
3796
3797
3798 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
3799 nfp_net_irqmod_init(nn);
3800 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
3801 }
3802
3803 if (nn->dp.netdev)
3804 nfp_net_netdev_init(nn);
3805
3806
3807 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
3808
3809
3810 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
3811 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
3812 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
3813 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
3814 NFP_NET_CFG_UPDATE_GEN);
3815 if (err)
3816 return err;
3817
3818 nfp_net_vecs_init(nn);
3819
3820 if (!nn->dp.netdev)
3821 return 0;
3822 return register_netdev(nn->dp.netdev);
3823}
3824
3825
3826
3827
3828
3829void nfp_net_clean(struct nfp_net *nn)
3830{
3831 if (!nn->dp.netdev)
3832 return;
3833
3834 unregister_netdev(nn->dp.netdev);
3835}
3836