1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/bitfield.h>
15#include <linux/bpf.h>
16#include <linux/bpf_trace.h>
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/interrupt.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <linux/mm.h>
27#include <linux/overflow.h>
28#include <linux/page_ref.h>
29#include <linux/pci.h>
30#include <linux/pci_regs.h>
31#include <linux/msi.h>
32#include <linux/ethtool.h>
33#include <linux/log2.h>
34#include <linux/if_vlan.h>
35#include <linux/random.h>
36#include <linux/vmalloc.h>
37#include <linux/ktime.h>
38
39#include <net/tls.h>
40#include <net/vxlan.h>
41
42#include "nfpcore/nfp_nsp.h"
43#include "ccm.h"
44#include "nfp_app.h"
45#include "nfp_net_ctrl.h"
46#include "nfp_net.h"
47#include "nfp_net_sriov.h"
48#include "nfp_port.h"
49#include "crypto/crypto.h"
50#include "crypto/fw.h"
51
52
53
54
55
56
57void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
58 void __iomem *ctrl_bar)
59{
60 u32 reg;
61
62 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
63 put_unaligned_le32(reg, fw_ver);
64}
65
66static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
67{
68 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
69 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
70 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
71}
72
73static void
74nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
75{
76 dma_sync_single_for_device(dp->dev, dma_addr,
77 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
78 dp->rx_dma_dir);
79}
80
81static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
82{
83 dma_unmap_single_attrs(dp->dev, dma_addr,
84 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
85 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
86}
87
88static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
89 unsigned int len)
90{
91 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
92 len, dp->rx_dma_dir);
93}
94
95
96
97
98
99
100
101static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
102{
103 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
104
105 nn_pci_flush(nn);
106 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
107 nn->reconfig_in_progress_update = update;
108}
109
110
111static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
112{
113 update |= nn->reconfig_posted;
114 nn->reconfig_posted = 0;
115
116 nfp_net_reconfig_start(nn, update);
117
118 nn->reconfig_timer_active = true;
119 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
120}
121
122static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
123{
124 u32 reg;
125
126 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
127 if (reg == 0)
128 return true;
129 if (reg & NFP_NET_CFG_UPDATE_ERR) {
130 nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
131 reg, nn->reconfig_in_progress_update,
132 nn_readl(nn, NFP_NET_CFG_CTRL));
133 return true;
134 } else if (last_check) {
135 nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
136 reg, nn->reconfig_in_progress_update,
137 nn_readl(nn, NFP_NET_CFG_CTRL));
138 return true;
139 }
140
141 return false;
142}
143
144static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
145{
146 bool timed_out = false;
147 int i;
148
149
150
151
152 for (i = 0; i < 50; i++) {
153 if (nfp_net_reconfig_check_done(nn, false))
154 return false;
155 udelay(4);
156 }
157
158 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
159 usleep_range(250, 500);
160 timed_out = time_is_before_eq_jiffies(deadline);
161 }
162
163 return timed_out;
164}
165
166static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
167{
168 if (__nfp_net_reconfig_wait(nn, deadline))
169 return -EIO;
170
171 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
172 return -EIO;
173
174 return 0;
175}
176
177static void nfp_net_reconfig_timer(struct timer_list *t)
178{
179 struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
180
181 spin_lock_bh(&nn->reconfig_lock);
182
183 nn->reconfig_timer_active = false;
184
185
186 if (nn->reconfig_sync_present)
187 goto done;
188
189
190 nfp_net_reconfig_check_done(nn, true);
191
192 if (nn->reconfig_posted)
193 nfp_net_reconfig_start_async(nn, 0);
194done:
195 spin_unlock_bh(&nn->reconfig_lock);
196}
197
198
199
200
201
202
203
204
205
206
207static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
208{
209 spin_lock_bh(&nn->reconfig_lock);
210
211
212 if (nn->reconfig_sync_present) {
213 nn->reconfig_posted |= update;
214 goto done;
215 }
216
217
218 if (!nn->reconfig_timer_active ||
219 nfp_net_reconfig_check_done(nn, false))
220 nfp_net_reconfig_start_async(nn, update);
221 else
222 nn->reconfig_posted |= update;
223done:
224 spin_unlock_bh(&nn->reconfig_lock);
225}
226
227static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
228{
229 bool cancelled_timer = false;
230 u32 pre_posted_requests;
231
232 spin_lock_bh(&nn->reconfig_lock);
233
234 WARN_ON(nn->reconfig_sync_present);
235 nn->reconfig_sync_present = true;
236
237 if (nn->reconfig_timer_active) {
238 nn->reconfig_timer_active = false;
239 cancelled_timer = true;
240 }
241 pre_posted_requests = nn->reconfig_posted;
242 nn->reconfig_posted = 0;
243
244 spin_unlock_bh(&nn->reconfig_lock);
245
246 if (cancelled_timer) {
247 del_timer_sync(&nn->reconfig_timer);
248 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
249 }
250
251
252 if (pre_posted_requests) {
253 nfp_net_reconfig_start(nn, pre_posted_requests);
254 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
255 }
256}
257
258static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
259{
260 nfp_net_reconfig_sync_enter(nn);
261
262 spin_lock_bh(&nn->reconfig_lock);
263 nn->reconfig_sync_present = false;
264 spin_unlock_bh(&nn->reconfig_lock);
265}
266
267
268
269
270
271
272
273
274
275
276
277
278int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
279{
280 int ret;
281
282 nfp_net_reconfig_sync_enter(nn);
283
284 nfp_net_reconfig_start(nn, update);
285 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
286
287 spin_lock_bh(&nn->reconfig_lock);
288
289 if (nn->reconfig_posted)
290 nfp_net_reconfig_start_async(nn, 0);
291
292 nn->reconfig_sync_present = false;
293
294 spin_unlock_bh(&nn->reconfig_lock);
295
296 return ret;
297}
298
299int nfp_net_reconfig(struct nfp_net *nn, u32 update)
300{
301 int ret;
302
303 nn_ctrl_bar_lock(nn);
304 ret = __nfp_net_reconfig(nn, update);
305 nn_ctrl_bar_unlock(nn);
306
307 return ret;
308}
309
310int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
311{
312 if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
313 nn_err(nn, "mailbox too small for %u of data (%u)\n",
314 data_size, nn->tlv_caps.mbox_len);
315 return -EIO;
316 }
317
318 nn_ctrl_bar_lock(nn);
319 return 0;
320}
321
322
323
324
325
326
327
328
329
330
331int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
332{
333 u32 mbox = nn->tlv_caps.mbox_off;
334 int ret;
335
336 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
337
338 ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
339 if (ret) {
340 nn_err(nn, "Mailbox update error\n");
341 return ret;
342 }
343
344 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
345}
346
347void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
348{
349 u32 mbox = nn->tlv_caps.mbox_off;
350
351 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
352
353 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
354}
355
356int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
357{
358 u32 mbox = nn->tlv_caps.mbox_off;
359
360 nfp_net_reconfig_wait_posted(nn);
361
362 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
363}
364
365int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
366{
367 int ret;
368
369 ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
370 nn_ctrl_bar_unlock(nn);
371 return ret;
372}
373
374
375
376
377
378
379
380
381
382
383
384static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
385{
386 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
387 nn_pci_flush(nn);
388}
389
390
391
392
393
394
395
396
397
398
399unsigned int
400nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
401 unsigned int min_irqs, unsigned int wanted_irqs)
402{
403 unsigned int i;
404 int got_irqs;
405
406 for (i = 0; i < wanted_irqs; i++)
407 irq_entries[i].entry = i;
408
409 got_irqs = pci_enable_msix_range(pdev, irq_entries,
410 min_irqs, wanted_irqs);
411 if (got_irqs < 0) {
412 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
413 min_irqs, wanted_irqs, got_irqs);
414 return 0;
415 }
416
417 if (got_irqs < wanted_irqs)
418 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
419 wanted_irqs, got_irqs);
420
421 return got_irqs;
422}
423
424
425
426
427
428
429
430
431
432
433void
434nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
435 unsigned int n)
436{
437 struct nfp_net_dp *dp = &nn->dp;
438
439 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
440 dp->num_r_vecs = nn->max_r_vecs;
441
442 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
443
444 if (dp->num_rx_rings > dp->num_r_vecs ||
445 dp->num_tx_rings > dp->num_r_vecs)
446 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
447 dp->num_rx_rings, dp->num_tx_rings,
448 dp->num_r_vecs);
449
450 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
451 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
452 dp->num_stack_tx_rings = dp->num_tx_rings;
453}
454
455
456
457
458
459
460
461void nfp_net_irqs_disable(struct pci_dev *pdev)
462{
463 pci_disable_msix(pdev);
464}
465
466
467
468
469
470
471
472
473static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
474{
475 struct nfp_net_r_vector *r_vec = data;
476
477 napi_schedule_irqoff(&r_vec->napi);
478
479
480
481
482
483 return IRQ_HANDLED;
484}
485
486static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
487{
488 struct nfp_net_r_vector *r_vec = data;
489
490 tasklet_schedule(&r_vec->tasklet);
491
492 return IRQ_HANDLED;
493}
494
495
496
497
498
499static void nfp_net_read_link_status(struct nfp_net *nn)
500{
501 unsigned long flags;
502 bool link_up;
503 u32 sts;
504
505 spin_lock_irqsave(&nn->link_status_lock, flags);
506
507 sts = nn_readl(nn, NFP_NET_CFG_STS);
508 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
509
510 if (nn->link_up == link_up)
511 goto out;
512
513 nn->link_up = link_up;
514 if (nn->port)
515 set_bit(NFP_PORT_CHANGED, &nn->port->flags);
516
517 if (nn->link_up) {
518 netif_carrier_on(nn->dp.netdev);
519 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
520 } else {
521 netif_carrier_off(nn->dp.netdev);
522 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
523 }
524out:
525 spin_unlock_irqrestore(&nn->link_status_lock, flags);
526}
527
528
529
530
531
532
533
534
535static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
536{
537 struct nfp_net *nn = data;
538 struct msix_entry *entry;
539
540 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
541
542 nfp_net_read_link_status(nn);
543
544 nfp_net_irq_unmask(nn, entry->entry);
545
546 return IRQ_HANDLED;
547}
548
549
550
551
552
553
554
555
556static irqreturn_t nfp_net_irq_exn(int irq, void *data)
557{
558 struct nfp_net *nn = data;
559
560 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
561
562 return IRQ_HANDLED;
563}
564
565
566
567
568
569
570
571
572static void
573nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
574 struct nfp_net_r_vector *r_vec, unsigned int idx,
575 bool is_xdp)
576{
577 struct nfp_net *nn = r_vec->nfp_net;
578
579 tx_ring->idx = idx;
580 tx_ring->r_vec = r_vec;
581 tx_ring->is_xdp = is_xdp;
582 u64_stats_init(&tx_ring->r_vec->tx_sync);
583
584 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
585 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
586}
587
588
589
590
591
592
593
594static void
595nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
596 struct nfp_net_r_vector *r_vec, unsigned int idx)
597{
598 struct nfp_net *nn = r_vec->nfp_net;
599
600 rx_ring->idx = idx;
601 rx_ring->r_vec = r_vec;
602 u64_stats_init(&rx_ring->r_vec->rx_sync);
603
604 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
605 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
606}
607
608
609
610
611
612
613
614
615
616
617
618static int
619nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
620 const char *format, char *name, size_t name_sz,
621 unsigned int vector_idx, irq_handler_t handler)
622{
623 struct msix_entry *entry;
624 int err;
625
626 entry = &nn->irq_entries[vector_idx];
627
628 snprintf(name, name_sz, format, nfp_net_name(nn));
629 err = request_irq(entry->vector, handler, 0, name, nn);
630 if (err) {
631 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
632 entry->vector, err);
633 return err;
634 }
635 nn_writeb(nn, ctrl_offset, entry->entry);
636 nfp_net_irq_unmask(nn, entry->entry);
637
638 return 0;
639}
640
641
642
643
644
645
646
647static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
648 unsigned int vector_idx)
649{
650 nn_writeb(nn, ctrl_offset, 0xff);
651 nn_pci_flush(nn);
652 free_irq(nn->irq_entries[vector_idx].vector, nn);
653}
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
680{
681 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
682}
683
684
685static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
686{
687 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
688}
689
690static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
691{
692 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
693}
694
695
696
697
698
699
700
701
702
703
704static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
705 struct nfp_net_tx_ring *tx_ring)
706{
707 netif_tx_stop_queue(nd_q);
708
709
710 smp_mb();
711 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
712 netif_tx_start_queue(nd_q);
713}
714
715
716
717
718
719
720
721
722
723
724
725
726static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
727 struct nfp_net_tx_buf *txbuf,
728 struct nfp_net_tx_desc *txd, struct sk_buff *skb,
729 u32 md_bytes)
730{
731 u32 l3_offset, l4_offset, hdrlen;
732 u16 mss;
733
734 if (!skb_is_gso(skb))
735 return;
736
737 if (!skb->encapsulation) {
738 l3_offset = skb_network_offset(skb);
739 l4_offset = skb_transport_offset(skb);
740 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
741 } else {
742 l3_offset = skb_inner_network_offset(skb);
743 l4_offset = skb_inner_transport_offset(skb);
744 hdrlen = skb_inner_transport_header(skb) - skb->data +
745 inner_tcp_hdrlen(skb);
746 }
747
748 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
749 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
750
751 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
752 txd->l3_offset = l3_offset - md_bytes;
753 txd->l4_offset = l4_offset - md_bytes;
754 txd->lso_hdrlen = hdrlen - md_bytes;
755 txd->mss = cpu_to_le16(mss);
756 txd->flags |= PCIE_DESC_TX_LSO;
757
758 u64_stats_update_begin(&r_vec->tx_sync);
759 r_vec->tx_lso++;
760 u64_stats_update_end(&r_vec->tx_sync);
761}
762
763
764
765
766
767
768
769
770
771
772
773
774static void nfp_net_tx_csum(struct nfp_net_dp *dp,
775 struct nfp_net_r_vector *r_vec,
776 struct nfp_net_tx_buf *txbuf,
777 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
778{
779 struct ipv6hdr *ipv6h;
780 struct iphdr *iph;
781 u8 l4_hdr;
782
783 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
784 return;
785
786 if (skb->ip_summed != CHECKSUM_PARTIAL)
787 return;
788
789 txd->flags |= PCIE_DESC_TX_CSUM;
790 if (skb->encapsulation)
791 txd->flags |= PCIE_DESC_TX_ENCAP;
792
793 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
794 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
795
796 if (iph->version == 4) {
797 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
798 l4_hdr = iph->protocol;
799 } else if (ipv6h->version == 6) {
800 l4_hdr = ipv6h->nexthdr;
801 } else {
802 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
803 return;
804 }
805
806 switch (l4_hdr) {
807 case IPPROTO_TCP:
808 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
809 break;
810 case IPPROTO_UDP:
811 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
812 break;
813 default:
814 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
815 return;
816 }
817
818 u64_stats_update_begin(&r_vec->tx_sync);
819 if (skb->encapsulation)
820 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
821 else
822 r_vec->hw_csum_tx += txbuf->pkt_cnt;
823 u64_stats_update_end(&r_vec->tx_sync);
824}
825
826static struct sk_buff *
827nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
828 struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
829{
830#ifdef CONFIG_TLS_DEVICE
831 struct nfp_net_tls_offload_ctx *ntls;
832 struct sk_buff *nskb;
833 bool resync_pending;
834 u32 datalen, seq;
835
836 if (likely(!dp->ktls_tx))
837 return skb;
838 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
839 return skb;
840
841 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
842 seq = ntohl(tcp_hdr(skb)->seq);
843 ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
844 resync_pending = tls_offload_tx_resync_pending(skb->sk);
845 if (unlikely(resync_pending || ntls->next_seq != seq)) {
846
847 if (!datalen)
848 return skb;
849
850 u64_stats_update_begin(&r_vec->tx_sync);
851 r_vec->tls_tx_fallback++;
852 u64_stats_update_end(&r_vec->tx_sync);
853
854 nskb = tls_encrypt_skb(skb);
855 if (!nskb) {
856 u64_stats_update_begin(&r_vec->tx_sync);
857 r_vec->tls_tx_no_fallback++;
858 u64_stats_update_end(&r_vec->tx_sync);
859 return NULL;
860 }
861
862 if (nskb == skb)
863 return skb;
864
865 if (unlikely(skb_is_nonlinear(nskb))) {
866 nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
867 u64_stats_update_begin(&r_vec->tx_sync);
868 r_vec->tx_errors++;
869 u64_stats_update_end(&r_vec->tx_sync);
870 dev_kfree_skb_any(nskb);
871 return NULL;
872 }
873
874
875 if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
876 tls_offload_tx_resync_request(nskb->sk, seq,
877 ntls->next_seq);
878
879 *nr_frags = 0;
880 return nskb;
881 }
882
883 if (datalen) {
884 u64_stats_update_begin(&r_vec->tx_sync);
885 if (!skb_is_gso(skb))
886 r_vec->hw_tls_tx++;
887 else
888 r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
889 u64_stats_update_end(&r_vec->tx_sync);
890 }
891
892 memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
893 ntls->next_seq += datalen;
894#endif
895 return skb;
896}
897
898static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
899{
900#ifdef CONFIG_TLS_DEVICE
901 struct nfp_net_tls_offload_ctx *ntls;
902 u32 datalen, seq;
903
904 if (!tls_handle)
905 return;
906 if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
907 return;
908
909 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
910 seq = ntohl(tcp_hdr(skb)->seq);
911
912 ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
913 if (ntls->next_seq == seq + datalen)
914 ntls->next_seq = seq;
915 else
916 WARN_ON_ONCE(1);
917#endif
918}
919
920static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
921{
922 wmb();
923 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
924 tx_ring->wr_ptr_add = 0;
925}
926
927static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
928{
929 struct metadata_dst *md_dst = skb_metadata_dst(skb);
930 unsigned char *data;
931 u32 meta_id = 0;
932 int md_bytes;
933
934 if (likely(!md_dst && !tls_handle))
935 return 0;
936 if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
937 if (!tls_handle)
938 return 0;
939 md_dst = NULL;
940 }
941
942 md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
943
944 if (unlikely(skb_cow_head(skb, md_bytes)))
945 return -ENOMEM;
946
947 meta_id = 0;
948 data = skb_push(skb, md_bytes) + md_bytes;
949 if (md_dst) {
950 data -= 4;
951 put_unaligned_be32(md_dst->u.port_info.port_id, data);
952 meta_id = NFP_NET_META_PORTID;
953 }
954 if (tls_handle) {
955
956
957
958 data -= 8;
959 memcpy(data, &tls_handle, sizeof(tls_handle));
960 meta_id <<= NFP_NET_META_FIELD_SIZE;
961 meta_id |= NFP_NET_META_CONN_HANDLE;
962 }
963
964 data -= 4;
965 put_unaligned_be32(meta_id, data);
966
967 return md_bytes;
968}
969
970
971
972
973
974
975
976
977static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
978{
979 struct nfp_net *nn = netdev_priv(netdev);
980 const skb_frag_t *frag;
981 int f, nr_frags, wr_idx, md_bytes;
982 struct nfp_net_tx_ring *tx_ring;
983 struct nfp_net_r_vector *r_vec;
984 struct nfp_net_tx_buf *txbuf;
985 struct nfp_net_tx_desc *txd;
986 struct netdev_queue *nd_q;
987 struct nfp_net_dp *dp;
988 dma_addr_t dma_addr;
989 unsigned int fsize;
990 u64 tls_handle = 0;
991 u16 qidx;
992
993 dp = &nn->dp;
994 qidx = skb_get_queue_mapping(skb);
995 tx_ring = &dp->tx_rings[qidx];
996 r_vec = tx_ring->r_vec;
997
998 nr_frags = skb_shinfo(skb)->nr_frags;
999
1000 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
1001 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
1002 qidx, tx_ring->wr_p, tx_ring->rd_p);
1003 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
1004 netif_tx_stop_queue(nd_q);
1005 nfp_net_tx_xmit_more_flush(tx_ring);
1006 u64_stats_update_begin(&r_vec->tx_sync);
1007 r_vec->tx_busy++;
1008 u64_stats_update_end(&r_vec->tx_sync);
1009 return NETDEV_TX_BUSY;
1010 }
1011
1012 skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
1013 if (unlikely(!skb)) {
1014 nfp_net_tx_xmit_more_flush(tx_ring);
1015 return NETDEV_TX_OK;
1016 }
1017
1018 md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
1019 if (unlikely(md_bytes < 0))
1020 goto err_flush;
1021
1022
1023 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
1024 DMA_TO_DEVICE);
1025 if (dma_mapping_error(dp->dev, dma_addr))
1026 goto err_dma_err;
1027
1028 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1029
1030
1031 txbuf = &tx_ring->txbufs[wr_idx];
1032 txbuf->skb = skb;
1033 txbuf->dma_addr = dma_addr;
1034 txbuf->fidx = -1;
1035 txbuf->pkt_cnt = 1;
1036 txbuf->real_len = skb->len;
1037
1038
1039 txd = &tx_ring->txds[wr_idx];
1040 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
1041 txd->dma_len = cpu_to_le16(skb_headlen(skb));
1042 nfp_desc_set_dma_addr(txd, dma_addr);
1043 txd->data_len = cpu_to_le16(skb->len);
1044
1045 txd->flags = 0;
1046 txd->mss = 0;
1047 txd->lso_hdrlen = 0;
1048
1049
1050 nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
1051 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
1052 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
1053 txd->flags |= PCIE_DESC_TX_VLAN;
1054 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1055 }
1056
1057
1058 if (nr_frags > 0) {
1059 __le64 second_half;
1060
1061
1062 second_half = txd->vals8[1];
1063
1064 for (f = 0; f < nr_frags; f++) {
1065 frag = &skb_shinfo(skb)->frags[f];
1066 fsize = skb_frag_size(frag);
1067
1068 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
1069 fsize, DMA_TO_DEVICE);
1070 if (dma_mapping_error(dp->dev, dma_addr))
1071 goto err_unmap;
1072
1073 wr_idx = D_IDX(tx_ring, wr_idx + 1);
1074 tx_ring->txbufs[wr_idx].skb = skb;
1075 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
1076 tx_ring->txbufs[wr_idx].fidx = f;
1077
1078 txd = &tx_ring->txds[wr_idx];
1079 txd->dma_len = cpu_to_le16(fsize);
1080 nfp_desc_set_dma_addr(txd, dma_addr);
1081 txd->offset_eop = md_bytes |
1082 ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
1083 txd->vals8[1] = second_half;
1084 }
1085
1086 u64_stats_update_begin(&r_vec->tx_sync);
1087 r_vec->tx_gather++;
1088 u64_stats_update_end(&r_vec->tx_sync);
1089 }
1090
1091 skb_tx_timestamp(skb);
1092
1093 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1094
1095 tx_ring->wr_p += nr_frags + 1;
1096 if (nfp_net_tx_ring_should_stop(tx_ring))
1097 nfp_net_tx_ring_stop(nd_q, tx_ring);
1098
1099 tx_ring->wr_ptr_add += nr_frags + 1;
1100 if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
1101 nfp_net_tx_xmit_more_flush(tx_ring);
1102
1103 return NETDEV_TX_OK;
1104
1105err_unmap:
1106 while (--f >= 0) {
1107 frag = &skb_shinfo(skb)->frags[f];
1108 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
1109 skb_frag_size(frag), DMA_TO_DEVICE);
1110 tx_ring->txbufs[wr_idx].skb = NULL;
1111 tx_ring->txbufs[wr_idx].dma_addr = 0;
1112 tx_ring->txbufs[wr_idx].fidx = -2;
1113 wr_idx = wr_idx - 1;
1114 if (wr_idx < 0)
1115 wr_idx += tx_ring->cnt;
1116 }
1117 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
1118 skb_headlen(skb), DMA_TO_DEVICE);
1119 tx_ring->txbufs[wr_idx].skb = NULL;
1120 tx_ring->txbufs[wr_idx].dma_addr = 0;
1121 tx_ring->txbufs[wr_idx].fidx = -2;
1122err_dma_err:
1123 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
1124err_flush:
1125 nfp_net_tx_xmit_more_flush(tx_ring);
1126 u64_stats_update_begin(&r_vec->tx_sync);
1127 r_vec->tx_errors++;
1128 u64_stats_update_end(&r_vec->tx_sync);
1129 nfp_net_tls_tx_undo(skb, tls_handle);
1130 dev_kfree_skb_any(skb);
1131 return NETDEV_TX_OK;
1132}
1133
1134
1135
1136
1137
1138
1139static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
1140{
1141 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1142 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1143 struct netdev_queue *nd_q;
1144 u32 done_pkts = 0, done_bytes = 0;
1145 u32 qcp_rd_p;
1146 int todo;
1147
1148 if (tx_ring->wr_p == tx_ring->rd_p)
1149 return;
1150
1151
1152 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1153
1154 if (qcp_rd_p == tx_ring->qcp_rd_p)
1155 return;
1156
1157 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1158
1159 while (todo--) {
1160 const skb_frag_t *frag;
1161 struct nfp_net_tx_buf *tx_buf;
1162 struct sk_buff *skb;
1163 int fidx, nr_frags;
1164 int idx;
1165
1166 idx = D_IDX(tx_ring, tx_ring->rd_p++);
1167 tx_buf = &tx_ring->txbufs[idx];
1168
1169 skb = tx_buf->skb;
1170 if (!skb)
1171 continue;
1172
1173 nr_frags = skb_shinfo(skb)->nr_frags;
1174 fidx = tx_buf->fidx;
1175
1176 if (fidx == -1) {
1177
1178 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1179 skb_headlen(skb), DMA_TO_DEVICE);
1180
1181 done_pkts += tx_buf->pkt_cnt;
1182 done_bytes += tx_buf->real_len;
1183 } else {
1184
1185 frag = &skb_shinfo(skb)->frags[fidx];
1186 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1187 skb_frag_size(frag), DMA_TO_DEVICE);
1188 }
1189
1190
1191 if (fidx == nr_frags - 1)
1192 napi_consume_skb(skb, budget);
1193
1194 tx_buf->dma_addr = 0;
1195 tx_buf->skb = NULL;
1196 tx_buf->fidx = -2;
1197 }
1198
1199 tx_ring->qcp_rd_p = qcp_rd_p;
1200
1201 u64_stats_update_begin(&r_vec->tx_sync);
1202 r_vec->tx_bytes += done_bytes;
1203 r_vec->tx_pkts += done_pkts;
1204 u64_stats_update_end(&r_vec->tx_sync);
1205
1206 if (!dp->netdev)
1207 return;
1208
1209 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1210 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
1211 if (nfp_net_tx_ring_should_wake(tx_ring)) {
1212
1213 smp_mb();
1214
1215 if (unlikely(netif_tx_queue_stopped(nd_q)))
1216 netif_tx_wake_queue(nd_q);
1217 }
1218
1219 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1220 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1221 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1222}
1223
1224static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1225{
1226 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1227 u32 done_pkts = 0, done_bytes = 0;
1228 bool done_all;
1229 int idx, todo;
1230 u32 qcp_rd_p;
1231
1232
1233 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1234
1235 if (qcp_rd_p == tx_ring->qcp_rd_p)
1236 return true;
1237
1238 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1239
1240 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
1241 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
1242
1243 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
1244
1245 done_pkts = todo;
1246 while (todo--) {
1247 idx = D_IDX(tx_ring, tx_ring->rd_p);
1248 tx_ring->rd_p++;
1249
1250 done_bytes += tx_ring->txbufs[idx].real_len;
1251 }
1252
1253 u64_stats_update_begin(&r_vec->tx_sync);
1254 r_vec->tx_bytes += done_bytes;
1255 r_vec->tx_pkts += done_pkts;
1256 u64_stats_update_end(&r_vec->tx_sync);
1257
1258 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1259 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1260 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1261
1262 return done_all;
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272static void
1273nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1274{
1275 const skb_frag_t *frag;
1276 struct netdev_queue *nd_q;
1277
1278 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
1279 struct nfp_net_tx_buf *tx_buf;
1280 struct sk_buff *skb;
1281 int idx, nr_frags;
1282
1283 idx = D_IDX(tx_ring, tx_ring->rd_p);
1284 tx_buf = &tx_ring->txbufs[idx];
1285
1286 skb = tx_ring->txbufs[idx].skb;
1287 nr_frags = skb_shinfo(skb)->nr_frags;
1288
1289 if (tx_buf->fidx == -1) {
1290
1291 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1292 skb_headlen(skb), DMA_TO_DEVICE);
1293 } else {
1294
1295 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1296 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1297 skb_frag_size(frag), DMA_TO_DEVICE);
1298 }
1299
1300
1301 if (tx_buf->fidx == nr_frags - 1)
1302 dev_kfree_skb_any(skb);
1303
1304 tx_buf->dma_addr = 0;
1305 tx_buf->skb = NULL;
1306 tx_buf->fidx = -2;
1307
1308 tx_ring->qcp_rd_p++;
1309 tx_ring->rd_p++;
1310 }
1311
1312 memset(tx_ring->txds, 0, tx_ring->size);
1313 tx_ring->wr_p = 0;
1314 tx_ring->rd_p = 0;
1315 tx_ring->qcp_rd_p = 0;
1316 tx_ring->wr_ptr_add = 0;
1317
1318 if (tx_ring->is_xdp || !dp->netdev)
1319 return;
1320
1321 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1322 netdev_tx_reset_queue(nd_q);
1323}
1324
1325static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1326{
1327 struct nfp_net *nn = netdev_priv(netdev);
1328
1329 nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
1330}
1331
1332
1333
1334static unsigned int
1335nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1336{
1337 unsigned int fl_bufsz;
1338
1339 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1340 fl_bufsz += dp->rx_dma_off;
1341 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1342 fl_bufsz += NFP_NET_MAX_PREPEND;
1343 else
1344 fl_bufsz += dp->rx_offset;
1345 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1346
1347 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1348 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1349
1350 return fl_bufsz;
1351}
1352
1353static void
1354nfp_net_free_frag(void *frag, bool xdp)
1355{
1356 if (!xdp)
1357 skb_free_frag(frag);
1358 else
1359 __free_page(virt_to_page(frag));
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1372{
1373 void *frag;
1374
1375 if (!dp->xdp_prog) {
1376 frag = netdev_alloc_frag(dp->fl_bufsz);
1377 } else {
1378 struct page *page;
1379
1380 page = alloc_page(GFP_KERNEL);
1381 frag = page ? page_address(page) : NULL;
1382 }
1383 if (!frag) {
1384 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1385 return NULL;
1386 }
1387
1388 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1389 if (dma_mapping_error(dp->dev, *dma_addr)) {
1390 nfp_net_free_frag(frag, dp->xdp_prog);
1391 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1392 return NULL;
1393 }
1394
1395 return frag;
1396}
1397
1398static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1399{
1400 void *frag;
1401
1402 if (!dp->xdp_prog) {
1403 frag = napi_alloc_frag(dp->fl_bufsz);
1404 if (unlikely(!frag))
1405 return NULL;
1406 } else {
1407 struct page *page;
1408
1409 page = dev_alloc_page();
1410 if (unlikely(!page))
1411 return NULL;
1412 frag = page_address(page);
1413 }
1414
1415 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1416 if (dma_mapping_error(dp->dev, *dma_addr)) {
1417 nfp_net_free_frag(frag, dp->xdp_prog);
1418 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1419 return NULL;
1420 }
1421
1422 return frag;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1433 struct nfp_net_rx_ring *rx_ring,
1434 void *frag, dma_addr_t dma_addr)
1435{
1436 unsigned int wr_idx;
1437
1438 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1439
1440 nfp_net_dma_sync_dev_rx(dp, dma_addr);
1441
1442
1443 rx_ring->rxbufs[wr_idx].frag = frag;
1444 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1445
1446
1447 rx_ring->rxds[wr_idx].fld.reserved = 0;
1448 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1449 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1450 dma_addr + dp->rx_dma_off);
1451
1452 rx_ring->wr_p++;
1453 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
1454
1455
1456
1457 wmb();
1458 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
1459 }
1460}
1461
1462
1463
1464
1465
1466
1467
1468static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1469{
1470 unsigned int wr_idx, last_idx;
1471
1472
1473
1474
1475 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
1476 return;
1477
1478
1479 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1480 last_idx = rx_ring->cnt - 1;
1481 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1482 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1483 rx_ring->rxbufs[last_idx].dma_addr = 0;
1484 rx_ring->rxbufs[last_idx].frag = NULL;
1485
1486 memset(rx_ring->rxds, 0, rx_ring->size);
1487 rx_ring->wr_p = 0;
1488 rx_ring->rd_p = 0;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static void
1501nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1502 struct nfp_net_rx_ring *rx_ring)
1503{
1504 unsigned int i;
1505
1506 for (i = 0; i < rx_ring->cnt - 1; i++) {
1507
1508
1509
1510
1511 if (!rx_ring->rxbufs[i].frag)
1512 continue;
1513
1514 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1515 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1516 rx_ring->rxbufs[i].dma_addr = 0;
1517 rx_ring->rxbufs[i].frag = NULL;
1518 }
1519}
1520
1521
1522
1523
1524
1525
1526static int
1527nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1528 struct nfp_net_rx_ring *rx_ring)
1529{
1530 struct nfp_net_rx_buf *rxbufs;
1531 unsigned int i;
1532
1533 rxbufs = rx_ring->rxbufs;
1534
1535 for (i = 0; i < rx_ring->cnt - 1; i++) {
1536 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
1537 if (!rxbufs[i].frag) {
1538 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1539 return -ENOMEM;
1540 }
1541 }
1542
1543 return 0;
1544}
1545
1546
1547
1548
1549
1550
1551static void
1552nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1553 struct nfp_net_rx_ring *rx_ring)
1554{
1555 unsigned int i;
1556
1557 for (i = 0; i < rx_ring->cnt - 1; i++)
1558 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1559 rx_ring->rxbufs[i].dma_addr);
1560}
1561
1562
1563
1564
1565
1566static int nfp_net_rx_csum_has_errors(u16 flags)
1567{
1568 u16 csum_all_checked, csum_all_ok;
1569
1570 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1571 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1572
1573 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1574}
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1585 struct nfp_net_r_vector *r_vec,
1586 struct nfp_net_rx_desc *rxd,
1587 struct nfp_meta_parsed *meta, struct sk_buff *skb)
1588{
1589 skb_checksum_none_assert(skb);
1590
1591 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1592 return;
1593
1594 if (meta->csum_type) {
1595 skb->ip_summed = meta->csum_type;
1596 skb->csum = meta->csum;
1597 u64_stats_update_begin(&r_vec->rx_sync);
1598 r_vec->hw_csum_rx_complete++;
1599 u64_stats_update_end(&r_vec->rx_sync);
1600 return;
1601 }
1602
1603 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1604 u64_stats_update_begin(&r_vec->rx_sync);
1605 r_vec->hw_csum_rx_error++;
1606 u64_stats_update_end(&r_vec->rx_sync);
1607 return;
1608 }
1609
1610
1611
1612
1613
1614 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1615 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1616 __skb_incr_checksum_unnecessary(skb);
1617 u64_stats_update_begin(&r_vec->rx_sync);
1618 r_vec->hw_csum_rx_ok++;
1619 u64_stats_update_end(&r_vec->rx_sync);
1620 }
1621
1622 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1623 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1624 __skb_incr_checksum_unnecessary(skb);
1625 u64_stats_update_begin(&r_vec->rx_sync);
1626 r_vec->hw_csum_rx_inner_ok++;
1627 u64_stats_update_end(&r_vec->rx_sync);
1628 }
1629}
1630
1631static void
1632nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
1633 unsigned int type, __be32 *hash)
1634{
1635 if (!(netdev->features & NETIF_F_RXHASH))
1636 return;
1637
1638 switch (type) {
1639 case NFP_NET_RSS_IPV4:
1640 case NFP_NET_RSS_IPV6:
1641 case NFP_NET_RSS_IPV6_EX:
1642 meta->hash_type = PKT_HASH_TYPE_L3;
1643 break;
1644 default:
1645 meta->hash_type = PKT_HASH_TYPE_L4;
1646 break;
1647 }
1648
1649 meta->hash = get_unaligned_be32(hash);
1650}
1651
1652static void
1653nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
1654 void *data, struct nfp_net_rx_desc *rxd)
1655{
1656 struct nfp_net_rx_hash *rx_hash = data;
1657
1658 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1659 return;
1660
1661 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
1662 &rx_hash->hash);
1663}
1664
1665static bool
1666nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1667 void *data, void *pkt, unsigned int pkt_len, int meta_len)
1668{
1669 u32 meta_info;
1670
1671 meta_info = get_unaligned_be32(data);
1672 data += 4;
1673
1674 while (meta_info) {
1675 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1676 case NFP_NET_META_HASH:
1677 meta_info >>= NFP_NET_META_FIELD_SIZE;
1678 nfp_net_set_hash(netdev, meta,
1679 meta_info & NFP_NET_META_FIELD_MASK,
1680 (__be32 *)data);
1681 data += 4;
1682 break;
1683 case NFP_NET_META_MARK:
1684 meta->mark = get_unaligned_be32(data);
1685 data += 4;
1686 break;
1687 case NFP_NET_META_PORTID:
1688 meta->portid = get_unaligned_be32(data);
1689 data += 4;
1690 break;
1691 case NFP_NET_META_CSUM:
1692 meta->csum_type = CHECKSUM_COMPLETE;
1693 meta->csum =
1694 (__force __wsum)__get_unaligned_cpu32(data);
1695 data += 4;
1696 break;
1697 case NFP_NET_META_RESYNC_INFO:
1698 if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
1699 pkt_len))
1700 return NULL;
1701 data += sizeof(struct nfp_net_tls_resync_req);
1702 break;
1703 default:
1704 return true;
1705 }
1706
1707 meta_info >>= NFP_NET_META_FIELD_SIZE;
1708 }
1709
1710 return data != pkt;
1711}
1712
1713static void
1714nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1715 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1716 struct sk_buff *skb)
1717{
1718 u64_stats_update_begin(&r_vec->rx_sync);
1719 r_vec->rx_drops++;
1720
1721
1722
1723 if (skb && rxbuf)
1724 r_vec->rx_replace_buf_alloc_fail++;
1725 u64_stats_update_end(&r_vec->rx_sync);
1726
1727
1728
1729
1730 if (skb && rxbuf && skb->head == rxbuf->frag)
1731 page_ref_inc(virt_to_head_page(rxbuf->frag));
1732 if (rxbuf)
1733 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1734 if (skb)
1735 dev_kfree_skb_any(skb);
1736}
1737
1738static bool
1739nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1740 struct nfp_net_tx_ring *tx_ring,
1741 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1742 unsigned int pkt_len, bool *completed)
1743{
1744 unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
1745 struct nfp_net_tx_buf *txbuf;
1746 struct nfp_net_tx_desc *txd;
1747 int wr_idx;
1748
1749
1750 if (pkt_len + dma_off > dma_map_sz)
1751 return false;
1752
1753 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1754 if (!*completed) {
1755 nfp_net_xdp_complete(tx_ring);
1756 *completed = true;
1757 }
1758
1759 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1760 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
1761 NULL);
1762 return false;
1763 }
1764 }
1765
1766 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1767
1768
1769 txbuf = &tx_ring->txbufs[wr_idx];
1770
1771 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
1772
1773 txbuf->frag = rxbuf->frag;
1774 txbuf->dma_addr = rxbuf->dma_addr;
1775 txbuf->fidx = -1;
1776 txbuf->pkt_cnt = 1;
1777 txbuf->real_len = pkt_len;
1778
1779 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1780 pkt_len, DMA_BIDIRECTIONAL);
1781
1782
1783 txd = &tx_ring->txds[wr_idx];
1784 txd->offset_eop = PCIE_DESC_TX_EOP;
1785 txd->dma_len = cpu_to_le16(pkt_len);
1786 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1787 txd->data_len = cpu_to_le16(pkt_len);
1788
1789 txd->flags = 0;
1790 txd->mss = 0;
1791 txd->lso_hdrlen = 0;
1792
1793 tx_ring->wr_p++;
1794 tx_ring->wr_ptr_add++;
1795 return true;
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1810{
1811 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1812 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1813 struct nfp_net_tx_ring *tx_ring;
1814 struct bpf_prog *xdp_prog;
1815 bool xdp_tx_cmpl = false;
1816 unsigned int true_bufsz;
1817 struct sk_buff *skb;
1818 int pkts_polled = 0;
1819 struct xdp_buff xdp;
1820 int idx;
1821
1822 rcu_read_lock();
1823 xdp_prog = READ_ONCE(dp->xdp_prog);
1824 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1825 xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
1826 xdp.rxq = &rx_ring->xdp_rxq;
1827 tx_ring = r_vec->xdp_ring;
1828
1829 while (pkts_polled < budget) {
1830 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1831 struct nfp_net_rx_buf *rxbuf;
1832 struct nfp_net_rx_desc *rxd;
1833 struct nfp_meta_parsed meta;
1834 bool redir_egress = false;
1835 struct net_device *netdev;
1836 dma_addr_t new_dma_addr;
1837 u32 meta_len_xdp = 0;
1838 void *new_frag;
1839
1840 idx = D_IDX(rx_ring, rx_ring->rd_p);
1841
1842 rxd = &rx_ring->rxds[idx];
1843 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1844 break;
1845
1846
1847
1848
1849 dma_rmb();
1850
1851 memset(&meta, 0, sizeof(meta));
1852
1853 rx_ring->rd_p++;
1854 pkts_polled++;
1855
1856 rxbuf = &rx_ring->rxbufs[idx];
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1870 data_len = le16_to_cpu(rxd->rxd.data_len);
1871 pkt_len = data_len - meta_len;
1872
1873 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1874 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1875 pkt_off += meta_len;
1876 else
1877 pkt_off += dp->rx_offset;
1878 meta_off = pkt_off - meta_len;
1879
1880
1881 u64_stats_update_begin(&r_vec->rx_sync);
1882 r_vec->rx_pkts++;
1883 r_vec->rx_bytes += pkt_len;
1884 u64_stats_update_end(&r_vec->rx_sync);
1885
1886 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1887 (dp->rx_offset && meta_len > dp->rx_offset))) {
1888 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1889 meta_len);
1890 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1891 continue;
1892 }
1893
1894 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1895 data_len);
1896
1897 if (!dp->chained_metadata_format) {
1898 nfp_net_set_hash_desc(dp->netdev, &meta,
1899 rxbuf->frag + meta_off, rxd);
1900 } else if (meta_len) {
1901 if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
1902 rxbuf->frag + meta_off,
1903 rxbuf->frag + pkt_off,
1904 pkt_len, meta_len))) {
1905 nn_dp_warn(dp, "invalid RX packet metadata\n");
1906 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1907 NULL);
1908 continue;
1909 }
1910 }
1911
1912 if (xdp_prog && !meta.portid) {
1913 void *orig_data = rxbuf->frag + pkt_off;
1914 unsigned int dma_off;
1915 int act;
1916
1917 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1918 xdp.data = orig_data;
1919 xdp.data_meta = orig_data;
1920 xdp.data_end = orig_data + pkt_len;
1921
1922 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1923
1924 pkt_len = xdp.data_end - xdp.data;
1925 pkt_off += xdp.data - orig_data;
1926
1927 switch (act) {
1928 case XDP_PASS:
1929 meta_len_xdp = xdp.data - xdp.data_meta;
1930 break;
1931 case XDP_TX:
1932 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1933 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1934 tx_ring, rxbuf,
1935 dma_off,
1936 pkt_len,
1937 &xdp_tx_cmpl)))
1938 trace_xdp_exception(dp->netdev,
1939 xdp_prog, act);
1940 continue;
1941 default:
1942 bpf_warn_invalid_xdp_action(act);
1943 fallthrough;
1944 case XDP_ABORTED:
1945 trace_xdp_exception(dp->netdev, xdp_prog, act);
1946 fallthrough;
1947 case XDP_DROP:
1948 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1949 rxbuf->dma_addr);
1950 continue;
1951 }
1952 }
1953
1954 if (likely(!meta.portid)) {
1955 netdev = dp->netdev;
1956 } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
1957 struct nfp_net *nn = netdev_priv(dp->netdev);
1958
1959 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
1960 pkt_len);
1961 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1962 rxbuf->dma_addr);
1963 continue;
1964 } else {
1965 struct nfp_net *nn;
1966
1967 nn = netdev_priv(dp->netdev);
1968 netdev = nfp_app_dev_get(nn->app, meta.portid,
1969 &redir_egress);
1970 if (unlikely(!netdev)) {
1971 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1972 NULL);
1973 continue;
1974 }
1975
1976 if (nfp_netdev_is_nfp_repr(netdev))
1977 nfp_repr_inc_rx_stats(netdev, pkt_len);
1978 }
1979
1980 skb = build_skb(rxbuf->frag, true_bufsz);
1981 if (unlikely(!skb)) {
1982 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1983 continue;
1984 }
1985 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1986 if (unlikely(!new_frag)) {
1987 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1988 continue;
1989 }
1990
1991 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1992
1993 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1994
1995 skb_reserve(skb, pkt_off);
1996 skb_put(skb, pkt_len);
1997
1998 skb->mark = meta.mark;
1999 skb_set_hash(skb, meta.hash, meta.hash_type);
2000
2001 skb_record_rx_queue(skb, rx_ring->idx);
2002 skb->protocol = eth_type_trans(skb, netdev);
2003
2004 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
2005
2006#ifdef CONFIG_TLS_DEVICE
2007 if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
2008 skb->decrypted = true;
2009 u64_stats_update_begin(&r_vec->rx_sync);
2010 r_vec->hw_tls_rx++;
2011 u64_stats_update_end(&r_vec->rx_sync);
2012 }
2013#endif
2014
2015 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
2016 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2017 le16_to_cpu(rxd->rxd.vlan));
2018 if (meta_len_xdp)
2019 skb_metadata_set(skb, meta_len_xdp);
2020
2021 if (likely(!redir_egress)) {
2022 napi_gro_receive(&rx_ring->r_vec->napi, skb);
2023 } else {
2024 skb->dev = netdev;
2025 skb_reset_network_header(skb);
2026 __skb_push(skb, ETH_HLEN);
2027 dev_queue_xmit(skb);
2028 }
2029 }
2030
2031 if (xdp_prog) {
2032 if (tx_ring->wr_ptr_add)
2033 nfp_net_tx_xmit_more_flush(tx_ring);
2034 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
2035 !xdp_tx_cmpl)
2036 if (!nfp_net_xdp_complete(tx_ring))
2037 pkts_polled = budget;
2038 }
2039 rcu_read_unlock();
2040
2041 return pkts_polled;
2042}
2043
2044
2045
2046
2047
2048
2049
2050
2051static int nfp_net_poll(struct napi_struct *napi, int budget)
2052{
2053 struct nfp_net_r_vector *r_vec =
2054 container_of(napi, struct nfp_net_r_vector, napi);
2055 unsigned int pkts_polled = 0;
2056
2057 if (r_vec->tx_ring)
2058 nfp_net_tx_complete(r_vec->tx_ring, budget);
2059 if (r_vec->rx_ring)
2060 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
2061
2062 if (pkts_polled < budget)
2063 if (napi_complete_done(napi, pkts_polled))
2064 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2065
2066 return pkts_polled;
2067}
2068
2069
2070
2071
2072static bool
2073nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2074 struct sk_buff *skb, bool old)
2075{
2076 unsigned int real_len = skb->len, meta_len = 0;
2077 struct nfp_net_tx_ring *tx_ring;
2078 struct nfp_net_tx_buf *txbuf;
2079 struct nfp_net_tx_desc *txd;
2080 struct nfp_net_dp *dp;
2081 dma_addr_t dma_addr;
2082 int wr_idx;
2083
2084 dp = &r_vec->nfp_net->dp;
2085 tx_ring = r_vec->tx_ring;
2086
2087 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
2088 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
2089 goto err_free;
2090 }
2091
2092 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
2093 u64_stats_update_begin(&r_vec->tx_sync);
2094 r_vec->tx_busy++;
2095 u64_stats_update_end(&r_vec->tx_sync);
2096 if (!old)
2097 __skb_queue_tail(&r_vec->queue, skb);
2098 else
2099 __skb_queue_head(&r_vec->queue, skb);
2100 return true;
2101 }
2102
2103 if (nfp_app_ctrl_has_meta(nn->app)) {
2104 if (unlikely(skb_headroom(skb) < 8)) {
2105 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
2106 goto err_free;
2107 }
2108 meta_len = 8;
2109 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
2110 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
2111 }
2112
2113
2114 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
2115 DMA_TO_DEVICE);
2116 if (dma_mapping_error(dp->dev, dma_addr))
2117 goto err_dma_warn;
2118
2119 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
2120
2121
2122 txbuf = &tx_ring->txbufs[wr_idx];
2123 txbuf->skb = skb;
2124 txbuf->dma_addr = dma_addr;
2125 txbuf->fidx = -1;
2126 txbuf->pkt_cnt = 1;
2127 txbuf->real_len = real_len;
2128
2129
2130 txd = &tx_ring->txds[wr_idx];
2131 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
2132 txd->dma_len = cpu_to_le16(skb_headlen(skb));
2133 nfp_desc_set_dma_addr(txd, dma_addr);
2134 txd->data_len = cpu_to_le16(skb->len);
2135
2136 txd->flags = 0;
2137 txd->mss = 0;
2138 txd->lso_hdrlen = 0;
2139
2140 tx_ring->wr_p++;
2141 tx_ring->wr_ptr_add++;
2142 nfp_net_tx_xmit_more_flush(tx_ring);
2143
2144 return false;
2145
2146err_dma_warn:
2147 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
2148err_free:
2149 u64_stats_update_begin(&r_vec->tx_sync);
2150 r_vec->tx_errors++;
2151 u64_stats_update_end(&r_vec->tx_sync);
2152 dev_kfree_skb_any(skb);
2153 return false;
2154}
2155
2156bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
2157{
2158 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
2159
2160 return nfp_ctrl_tx_one(nn, r_vec, skb, false);
2161}
2162
2163bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
2164{
2165 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
2166 bool ret;
2167
2168 spin_lock_bh(&r_vec->lock);
2169 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
2170 spin_unlock_bh(&r_vec->lock);
2171
2172 return ret;
2173}
2174
2175static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
2176{
2177 struct sk_buff *skb;
2178
2179 while ((skb = __skb_dequeue(&r_vec->queue)))
2180 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
2181 return;
2182}
2183
2184static bool
2185nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
2186{
2187 u32 meta_type, meta_tag;
2188
2189 if (!nfp_app_ctrl_has_meta(nn->app))
2190 return !meta_len;
2191
2192 if (meta_len != 8)
2193 return false;
2194
2195 meta_type = get_unaligned_be32(data);
2196 meta_tag = get_unaligned_be32(data + 4);
2197
2198 return (meta_type == NFP_NET_META_PORTID &&
2199 meta_tag == NFP_META_PORT_ID_CTRL);
2200}
2201
2202static bool
2203nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
2204 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
2205{
2206 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
2207 struct nfp_net_rx_buf *rxbuf;
2208 struct nfp_net_rx_desc *rxd;
2209 dma_addr_t new_dma_addr;
2210 struct sk_buff *skb;
2211 void *new_frag;
2212 int idx;
2213
2214 idx = D_IDX(rx_ring, rx_ring->rd_p);
2215
2216 rxd = &rx_ring->rxds[idx];
2217 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
2218 return false;
2219
2220
2221
2222
2223 dma_rmb();
2224
2225 rx_ring->rd_p++;
2226
2227 rxbuf = &rx_ring->rxbufs[idx];
2228 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
2229 data_len = le16_to_cpu(rxd->rxd.data_len);
2230 pkt_len = data_len - meta_len;
2231
2232 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
2233 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
2234 pkt_off += meta_len;
2235 else
2236 pkt_off += dp->rx_offset;
2237 meta_off = pkt_off - meta_len;
2238
2239
2240 u64_stats_update_begin(&r_vec->rx_sync);
2241 r_vec->rx_pkts++;
2242 r_vec->rx_bytes += pkt_len;
2243 u64_stats_update_end(&r_vec->rx_sync);
2244
2245 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
2246
2247 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
2248 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
2249 meta_len);
2250 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2251 return true;
2252 }
2253
2254 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
2255 if (unlikely(!skb)) {
2256 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2257 return true;
2258 }
2259 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
2260 if (unlikely(!new_frag)) {
2261 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
2262 return true;
2263 }
2264
2265 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
2266
2267 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
2268
2269 skb_reserve(skb, pkt_off);
2270 skb_put(skb, pkt_len);
2271
2272 nfp_app_ctrl_rx(nn->app, skb);
2273
2274 return true;
2275}
2276
2277static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2278{
2279 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2280 struct nfp_net *nn = r_vec->nfp_net;
2281 struct nfp_net_dp *dp = &nn->dp;
2282 unsigned int budget = 512;
2283
2284 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
2285 continue;
2286
2287 return budget;
2288}
2289
2290static void nfp_ctrl_poll(unsigned long arg)
2291{
2292 struct nfp_net_r_vector *r_vec = (void *)arg;
2293
2294 spin_lock(&r_vec->lock);
2295 nfp_net_tx_complete(r_vec->tx_ring, 0);
2296 __nfp_ctrl_tx_queued(r_vec);
2297 spin_unlock(&r_vec->lock);
2298
2299 if (nfp_ctrl_rx(r_vec)) {
2300 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2301 } else {
2302 tasklet_schedule(&r_vec->tasklet);
2303 nn_dp_warn(&r_vec->nfp_net->dp,
2304 "control message budget exceeded!\n");
2305 }
2306}
2307
2308
2309
2310
2311
2312
2313
2314
2315static void nfp_net_vecs_init(struct nfp_net *nn)
2316{
2317 struct nfp_net_r_vector *r_vec;
2318 int r;
2319
2320 nn->lsc_handler = nfp_net_irq_lsc;
2321 nn->exn_handler = nfp_net_irq_exn;
2322
2323 for (r = 0; r < nn->max_r_vecs; r++) {
2324 struct msix_entry *entry;
2325
2326 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
2327
2328 r_vec = &nn->r_vecs[r];
2329 r_vec->nfp_net = nn;
2330 r_vec->irq_entry = entry->entry;
2331 r_vec->irq_vector = entry->vector;
2332
2333 if (nn->dp.netdev) {
2334 r_vec->handler = nfp_net_irq_rxtx;
2335 } else {
2336 r_vec->handler = nfp_ctrl_irq_rxtx;
2337
2338 __skb_queue_head_init(&r_vec->queue);
2339 spin_lock_init(&r_vec->lock);
2340 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
2341 (unsigned long)r_vec);
2342 tasklet_disable(&r_vec->tasklet);
2343 }
2344
2345 cpumask_set_cpu(r, &r_vec->affinity_mask);
2346 }
2347}
2348
2349
2350
2351
2352
2353static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
2354{
2355 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2356 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2357
2358 kvfree(tx_ring->txbufs);
2359
2360 if (tx_ring->txds)
2361 dma_free_coherent(dp->dev, tx_ring->size,
2362 tx_ring->txds, tx_ring->dma);
2363
2364 tx_ring->cnt = 0;
2365 tx_ring->txbufs = NULL;
2366 tx_ring->txds = NULL;
2367 tx_ring->dma = 0;
2368 tx_ring->size = 0;
2369}
2370
2371
2372
2373
2374
2375
2376
2377
2378static int
2379nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2380{
2381 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2382
2383 tx_ring->cnt = dp->txd_cnt;
2384
2385 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
2386 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
2387 &tx_ring->dma,
2388 GFP_KERNEL | __GFP_NOWARN);
2389 if (!tx_ring->txds) {
2390 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2391 tx_ring->cnt);
2392 goto err_alloc;
2393 }
2394
2395 tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
2396 GFP_KERNEL);
2397 if (!tx_ring->txbufs)
2398 goto err_alloc;
2399
2400 if (!tx_ring->is_xdp && dp->netdev)
2401 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
2402 tx_ring->idx);
2403
2404 return 0;
2405
2406err_alloc:
2407 nfp_net_tx_ring_free(tx_ring);
2408 return -ENOMEM;
2409}
2410
2411static void
2412nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
2413 struct nfp_net_tx_ring *tx_ring)
2414{
2415 unsigned int i;
2416
2417 if (!tx_ring->is_xdp)
2418 return;
2419
2420 for (i = 0; i < tx_ring->cnt; i++) {
2421 if (!tx_ring->txbufs[i].frag)
2422 return;
2423
2424 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
2425 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
2426 }
2427}
2428
2429static int
2430nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
2431 struct nfp_net_tx_ring *tx_ring)
2432{
2433 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
2434 unsigned int i;
2435
2436 if (!tx_ring->is_xdp)
2437 return 0;
2438
2439 for (i = 0; i < tx_ring->cnt; i++) {
2440 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
2441 if (!txbufs[i].frag) {
2442 nfp_net_tx_ring_bufs_free(dp, tx_ring);
2443 return -ENOMEM;
2444 }
2445 }
2446
2447 return 0;
2448}
2449
2450static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2451{
2452 unsigned int r;
2453
2454 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
2455 GFP_KERNEL);
2456 if (!dp->tx_rings)
2457 return -ENOMEM;
2458
2459 for (r = 0; r < dp->num_tx_rings; r++) {
2460 int bias = 0;
2461
2462 if (r >= dp->num_stack_tx_rings)
2463 bias = dp->num_stack_tx_rings;
2464
2465 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
2466 r, bias);
2467
2468 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
2469 goto err_free_prev;
2470
2471 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
2472 goto err_free_ring;
2473 }
2474
2475 return 0;
2476
2477err_free_prev:
2478 while (r--) {
2479 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2480err_free_ring:
2481 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2482 }
2483 kfree(dp->tx_rings);
2484 return -ENOMEM;
2485}
2486
2487static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
2488{
2489 unsigned int r;
2490
2491 for (r = 0; r < dp->num_tx_rings; r++) {
2492 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2493 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2494 }
2495
2496 kfree(dp->tx_rings);
2497}
2498
2499
2500
2501
2502
2503static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2504{
2505 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2506 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2507
2508 if (dp->netdev)
2509 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
2510 kvfree(rx_ring->rxbufs);
2511
2512 if (rx_ring->rxds)
2513 dma_free_coherent(dp->dev, rx_ring->size,
2514 rx_ring->rxds, rx_ring->dma);
2515
2516 rx_ring->cnt = 0;
2517 rx_ring->rxbufs = NULL;
2518 rx_ring->rxds = NULL;
2519 rx_ring->dma = 0;
2520 rx_ring->size = 0;
2521}
2522
2523
2524
2525
2526
2527
2528
2529
2530static int
2531nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2532{
2533 int err;
2534
2535 if (dp->netdev) {
2536 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
2537 rx_ring->idx);
2538 if (err < 0)
2539 return err;
2540 }
2541
2542 rx_ring->cnt = dp->rxd_cnt;
2543 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
2544 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
2545 &rx_ring->dma,
2546 GFP_KERNEL | __GFP_NOWARN);
2547 if (!rx_ring->rxds) {
2548 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2549 rx_ring->cnt);
2550 goto err_alloc;
2551 }
2552
2553 rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
2554 GFP_KERNEL);
2555 if (!rx_ring->rxbufs)
2556 goto err_alloc;
2557
2558 return 0;
2559
2560err_alloc:
2561 nfp_net_rx_ring_free(rx_ring);
2562 return -ENOMEM;
2563}
2564
2565static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2566{
2567 unsigned int r;
2568
2569 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
2570 GFP_KERNEL);
2571 if (!dp->rx_rings)
2572 return -ENOMEM;
2573
2574 for (r = 0; r < dp->num_rx_rings; r++) {
2575 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
2576
2577 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
2578 goto err_free_prev;
2579
2580 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
2581 goto err_free_ring;
2582 }
2583
2584 return 0;
2585
2586err_free_prev:
2587 while (r--) {
2588 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2589err_free_ring:
2590 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2591 }
2592 kfree(dp->rx_rings);
2593 return -ENOMEM;
2594}
2595
2596static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
2597{
2598 unsigned int r;
2599
2600 for (r = 0; r < dp->num_rx_rings; r++) {
2601 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2602 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2603 }
2604
2605 kfree(dp->rx_rings);
2606}
2607
2608static void
2609nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
2610 struct nfp_net_r_vector *r_vec, int idx)
2611{
2612 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
2613 r_vec->tx_ring =
2614 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
2615
2616 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
2617 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
2618}
2619
2620static int
2621nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2622 int idx)
2623{
2624 int err;
2625
2626
2627 if (nn->dp.netdev)
2628 netif_napi_add(nn->dp.netdev, &r_vec->napi,
2629 nfp_net_poll, NAPI_POLL_WEIGHT);
2630 else
2631 tasklet_enable(&r_vec->tasklet);
2632
2633 snprintf(r_vec->name, sizeof(r_vec->name),
2634 "%s-rxtx-%d", nfp_net_name(nn), idx);
2635 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2636 r_vec);
2637 if (err) {
2638 if (nn->dp.netdev)
2639 netif_napi_del(&r_vec->napi);
2640 else
2641 tasklet_disable(&r_vec->tasklet);
2642
2643 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
2644 return err;
2645 }
2646 disable_irq(r_vec->irq_vector);
2647
2648 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
2649
2650 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
2651 r_vec->irq_entry);
2652
2653 return 0;
2654}
2655
2656static void
2657nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
2658{
2659 irq_set_affinity_hint(r_vec->irq_vector, NULL);
2660 if (nn->dp.netdev)
2661 netif_napi_del(&r_vec->napi);
2662 else
2663 tasklet_disable(&r_vec->tasklet);
2664
2665 free_irq(r_vec->irq_vector, r_vec);
2666}
2667
2668
2669
2670
2671
2672void nfp_net_rss_write_itbl(struct nfp_net *nn)
2673{
2674 int i;
2675
2676 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
2677 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
2678 get_unaligned_le32(nn->rss_itbl + i));
2679}
2680
2681
2682
2683
2684
2685void nfp_net_rss_write_key(struct nfp_net *nn)
2686{
2687 int i;
2688
2689 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2690 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2691 get_unaligned_le32(nn->rss_key + i));
2692}
2693
2694
2695
2696
2697
2698void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2699{
2700 u8 i;
2701 u32 factor;
2702 u32 value;
2703
2704
2705
2706
2707
2708 factor = nn->tlv_caps.me_freq_mhz / 16;
2709
2710
2711 value = (nn->rx_coalesce_max_frames << 16) |
2712 (factor * nn->rx_coalesce_usecs);
2713 for (i = 0; i < nn->dp.num_rx_rings; i++)
2714 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2715
2716
2717 value = (nn->tx_coalesce_max_frames << 16) |
2718 (factor * nn->tx_coalesce_usecs);
2719 for (i = 0; i < nn->dp.num_tx_rings; i++)
2720 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2721}
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
2733{
2734 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
2735 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
2736}
2737
2738static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
2739{
2740 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
2741 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
2742 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
2743
2744 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
2745 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
2746 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
2747}
2748
2749
2750
2751
2752
2753
2754
2755static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2756{
2757 u32 new_ctrl, update;
2758 unsigned int r;
2759 int err;
2760
2761 new_ctrl = nn->dp.ctrl;
2762 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2763 update = NFP_NET_CFG_UPDATE_GEN;
2764 update |= NFP_NET_CFG_UPDATE_MSIX;
2765 update |= NFP_NET_CFG_UPDATE_RING;
2766
2767 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2768 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
2769
2770 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2771 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2772
2773 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2774 err = nfp_net_reconfig(nn, update);
2775 if (err)
2776 nn_err(nn, "Could not disable device: %d\n", err);
2777
2778 for (r = 0; r < nn->dp.num_rx_rings; r++)
2779 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2780 for (r = 0; r < nn->dp.num_tx_rings; r++)
2781 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2782 for (r = 0; r < nn->dp.num_r_vecs; r++)
2783 nfp_net_vec_clear_ring_data(nn, r);
2784
2785 nn->dp.ctrl = new_ctrl;
2786}
2787
2788static void
2789nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
2790 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
2791{
2792
2793 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
2794 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
2795 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
2796}
2797
2798static void
2799nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
2800 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
2801{
2802 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
2803 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
2804 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
2805}
2806
2807
2808
2809
2810
2811static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2812{
2813 u32 bufsz, new_ctrl, update = 0;
2814 unsigned int r;
2815 int err;
2816
2817 new_ctrl = nn->dp.ctrl;
2818
2819 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
2820 nfp_net_rss_write_key(nn);
2821 nfp_net_rss_write_itbl(nn);
2822 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
2823 update |= NFP_NET_CFG_UPDATE_RSS;
2824 }
2825
2826 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
2827 nfp_net_coalesce_write_cfg(nn);
2828 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2829 }
2830
2831 for (r = 0; r < nn->dp.num_tx_rings; r++)
2832 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2833 for (r = 0; r < nn->dp.num_rx_rings; r++)
2834 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2835
2836 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2837 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2838
2839 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2840 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2841
2842 if (nn->dp.netdev)
2843 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2844
2845 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
2846
2847 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
2848 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
2849
2850
2851 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2852 update |= NFP_NET_CFG_UPDATE_GEN;
2853 update |= NFP_NET_CFG_UPDATE_MSIX;
2854 update |= NFP_NET_CFG_UPDATE_RING;
2855 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2856 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2857
2858 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2859 err = nfp_net_reconfig(nn, update);
2860 if (err) {
2861 nfp_net_clear_config_and_disable(nn);
2862 return err;
2863 }
2864
2865 nn->dp.ctrl = new_ctrl;
2866
2867 for (r = 0; r < nn->dp.num_rx_rings; r++)
2868 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2869
2870 return 0;
2871}
2872
2873
2874
2875
2876
2877static void nfp_net_close_stack(struct nfp_net *nn)
2878{
2879 unsigned int r;
2880
2881 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2882 netif_carrier_off(nn->dp.netdev);
2883 nn->link_up = false;
2884
2885 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2886 disable_irq(nn->r_vecs[r].irq_vector);
2887 napi_disable(&nn->r_vecs[r].napi);
2888 }
2889
2890 netif_tx_disable(nn->dp.netdev);
2891}
2892
2893
2894
2895
2896
2897static void nfp_net_close_free_all(struct nfp_net *nn)
2898{
2899 unsigned int r;
2900
2901 nfp_net_tx_rings_free(&nn->dp);
2902 nfp_net_rx_rings_free(&nn->dp);
2903
2904 for (r = 0; r < nn->dp.num_r_vecs; r++)
2905 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2906
2907 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2908 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2909}
2910
2911
2912
2913
2914
2915static int nfp_net_netdev_close(struct net_device *netdev)
2916{
2917 struct nfp_net *nn = netdev_priv(netdev);
2918
2919
2920
2921 nfp_net_close_stack(nn);
2922
2923
2924
2925 nfp_net_clear_config_and_disable(nn);
2926 nfp_port_configure(netdev, false);
2927
2928
2929
2930 nfp_net_close_free_all(nn);
2931
2932 nn_dbg(nn, "%s down", netdev->name);
2933 return 0;
2934}
2935
2936void nfp_ctrl_close(struct nfp_net *nn)
2937{
2938 int r;
2939
2940 rtnl_lock();
2941
2942 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2943 disable_irq(nn->r_vecs[r].irq_vector);
2944 tasklet_disable(&nn->r_vecs[r].tasklet);
2945 }
2946
2947 nfp_net_clear_config_and_disable(nn);
2948
2949 nfp_net_close_free_all(nn);
2950
2951 rtnl_unlock();
2952}
2953
2954
2955
2956
2957
2958static void nfp_net_open_stack(struct nfp_net *nn)
2959{
2960 unsigned int r;
2961
2962 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2963 napi_enable(&nn->r_vecs[r].napi);
2964 enable_irq(nn->r_vecs[r].irq_vector);
2965 }
2966
2967 netif_tx_wake_all_queues(nn->dp.netdev);
2968
2969 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2970 nfp_net_read_link_status(nn);
2971}
2972
2973static int nfp_net_open_alloc_all(struct nfp_net *nn)
2974{
2975 int err, r;
2976
2977 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2978 nn->exn_name, sizeof(nn->exn_name),
2979 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2980 if (err)
2981 return err;
2982 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2983 nn->lsc_name, sizeof(nn->lsc_name),
2984 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2985 if (err)
2986 goto err_free_exn;
2987 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2988
2989 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2990 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2991 if (err)
2992 goto err_cleanup_vec_p;
2993 }
2994
2995 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2996 if (err)
2997 goto err_cleanup_vec;
2998
2999 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
3000 if (err)
3001 goto err_free_rx_rings;
3002
3003 for (r = 0; r < nn->max_r_vecs; r++)
3004 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
3005
3006 return 0;
3007
3008err_free_rx_rings:
3009 nfp_net_rx_rings_free(&nn->dp);
3010err_cleanup_vec:
3011 r = nn->dp.num_r_vecs;
3012err_cleanup_vec_p:
3013 while (r--)
3014 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3015 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
3016err_free_exn:
3017 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
3018 return err;
3019}
3020
3021static int nfp_net_netdev_open(struct net_device *netdev)
3022{
3023 struct nfp_net *nn = netdev_priv(netdev);
3024 int err;
3025
3026
3027
3028
3029
3030
3031 err = nfp_net_open_alloc_all(nn);
3032 if (err)
3033 return err;
3034
3035 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
3036 if (err)
3037 goto err_free_all;
3038
3039 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
3040 if (err)
3041 goto err_free_all;
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051 err = nfp_port_configure(netdev, true);
3052 if (err)
3053 goto err_free_all;
3054
3055 err = nfp_net_set_config_and_enable(nn);
3056 if (err)
3057 goto err_port_disable;
3058
3059
3060
3061
3062
3063
3064
3065 nfp_net_open_stack(nn);
3066
3067 return 0;
3068
3069err_port_disable:
3070 nfp_port_configure(netdev, false);
3071err_free_all:
3072 nfp_net_close_free_all(nn);
3073 return err;
3074}
3075
3076int nfp_ctrl_open(struct nfp_net *nn)
3077{
3078 int err, r;
3079
3080
3081 rtnl_lock();
3082
3083 err = nfp_net_open_alloc_all(nn);
3084 if (err)
3085 goto err_unlock;
3086
3087 err = nfp_net_set_config_and_enable(nn);
3088 if (err)
3089 goto err_free_all;
3090
3091 for (r = 0; r < nn->dp.num_r_vecs; r++)
3092 enable_irq(nn->r_vecs[r].irq_vector);
3093
3094 rtnl_unlock();
3095
3096 return 0;
3097
3098err_free_all:
3099 nfp_net_close_free_all(nn);
3100err_unlock:
3101 rtnl_unlock();
3102 return err;
3103}
3104
3105static void nfp_net_set_rx_mode(struct net_device *netdev)
3106{
3107 struct nfp_net *nn = netdev_priv(netdev);
3108 u32 new_ctrl;
3109
3110 new_ctrl = nn->dp.ctrl;
3111
3112 if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
3113 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
3114 else
3115 new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
3116
3117 if (netdev->flags & IFF_PROMISC) {
3118 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
3119 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
3120 else
3121 nn_warn(nn, "FW does not support promiscuous mode\n");
3122 } else {
3123 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
3124 }
3125
3126 if (new_ctrl == nn->dp.ctrl)
3127 return;
3128
3129 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
3130 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
3131
3132 nn->dp.ctrl = new_ctrl;
3133}
3134
3135static void nfp_net_rss_init_itbl(struct nfp_net *nn)
3136{
3137 int i;
3138
3139 for (i = 0; i < sizeof(nn->rss_itbl); i++)
3140 nn->rss_itbl[i] =
3141 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
3142}
3143
3144static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
3145{
3146 struct nfp_net_dp new_dp = *dp;
3147
3148 *dp = nn->dp;
3149 nn->dp = new_dp;
3150
3151 nn->dp.netdev->mtu = new_dp.mtu;
3152
3153 if (!netif_is_rxfh_configured(nn->dp.netdev))
3154 nfp_net_rss_init_itbl(nn);
3155}
3156
3157static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
3158{
3159 unsigned int r;
3160 int err;
3161
3162 nfp_net_dp_swap(nn, dp);
3163
3164 for (r = 0; r < nn->max_r_vecs; r++)
3165 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
3166
3167 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
3168 if (err)
3169 return err;
3170
3171 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
3172 err = netif_set_real_num_tx_queues(nn->dp.netdev,
3173 nn->dp.num_stack_tx_rings);
3174 if (err)
3175 return err;
3176 }
3177
3178 return nfp_net_set_config_and_enable(nn);
3179}
3180
3181struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
3182{
3183 struct nfp_net_dp *new;
3184
3185 new = kmalloc(sizeof(*new), GFP_KERNEL);
3186 if (!new)
3187 return NULL;
3188
3189 *new = nn->dp;
3190
3191
3192 new->fl_bufsz = 0;
3193 new->tx_rings = NULL;
3194 new->rx_rings = NULL;
3195 new->num_r_vecs = 0;
3196 new->num_stack_tx_rings = 0;
3197
3198 return new;
3199}
3200
3201static int
3202nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
3203 struct netlink_ext_ack *extack)
3204{
3205
3206 if (!dp->xdp_prog)
3207 return 0;
3208 if (dp->fl_bufsz > PAGE_SIZE) {
3209 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
3210 return -EINVAL;
3211 }
3212 if (dp->num_tx_rings > nn->max_tx_rings) {
3213 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
3214 return -EINVAL;
3215 }
3216
3217 return 0;
3218}
3219
3220int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
3221 struct netlink_ext_ack *extack)
3222{
3223 int r, err;
3224
3225 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
3226
3227 dp->num_stack_tx_rings = dp->num_tx_rings;
3228 if (dp->xdp_prog)
3229 dp->num_stack_tx_rings -= dp->num_rx_rings;
3230
3231 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
3232
3233 err = nfp_net_check_config(nn, dp, extack);
3234 if (err)
3235 goto exit_free_dp;
3236
3237 if (!netif_running(dp->netdev)) {
3238 nfp_net_dp_swap(nn, dp);
3239 err = 0;
3240 goto exit_free_dp;
3241 }
3242
3243
3244 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
3245 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
3246 if (err) {
3247 dp->num_r_vecs = r;
3248 goto err_cleanup_vecs;
3249 }
3250 }
3251
3252 err = nfp_net_rx_rings_prepare(nn, dp);
3253 if (err)
3254 goto err_cleanup_vecs;
3255
3256 err = nfp_net_tx_rings_prepare(nn, dp);
3257 if (err)
3258 goto err_free_rx;
3259
3260
3261 nfp_net_close_stack(nn);
3262 nfp_net_clear_config_and_disable(nn);
3263
3264 err = nfp_net_dp_swap_enable(nn, dp);
3265 if (err) {
3266 int err2;
3267
3268 nfp_net_clear_config_and_disable(nn);
3269
3270
3271 err2 = nfp_net_dp_swap_enable(nn, dp);
3272 if (err2)
3273 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
3274 err, err2);
3275 }
3276 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3277 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3278
3279 nfp_net_rx_rings_free(dp);
3280 nfp_net_tx_rings_free(dp);
3281
3282 nfp_net_open_stack(nn);
3283exit_free_dp:
3284 kfree(dp);
3285
3286 return err;
3287
3288err_free_rx:
3289 nfp_net_rx_rings_free(dp);
3290err_cleanup_vecs:
3291 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3292 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3293 kfree(dp);
3294 return err;
3295}
3296
3297static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
3298{
3299 struct nfp_net *nn = netdev_priv(netdev);
3300 struct nfp_net_dp *dp;
3301 int err;
3302
3303 err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
3304 if (err)
3305 return err;
3306
3307 dp = nfp_net_clone_dp(nn);
3308 if (!dp)
3309 return -ENOMEM;
3310
3311 dp->mtu = new_mtu;
3312
3313 return nfp_net_ring_reconfig(nn, dp, NULL);
3314}
3315
3316static int
3317nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3318{
3319 const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
3320 struct nfp_net *nn = netdev_priv(netdev);
3321 int err;
3322
3323
3324
3325
3326 if (!vid)
3327 return 0;
3328
3329 err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
3330 if (err)
3331 return err;
3332
3333 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3334 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3335 ETH_P_8021Q);
3336
3337 return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
3338}
3339
3340static int
3341nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3342{
3343 const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
3344 struct nfp_net *nn = netdev_priv(netdev);
3345 int err;
3346
3347
3348
3349
3350 if (!vid)
3351 return 0;
3352
3353 err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
3354 if (err)
3355 return err;
3356
3357 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3358 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3359 ETH_P_8021Q);
3360
3361 return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
3362}
3363
3364static void nfp_net_stat64(struct net_device *netdev,
3365 struct rtnl_link_stats64 *stats)
3366{
3367 struct nfp_net *nn = netdev_priv(netdev);
3368 int r;
3369
3370
3371 for (r = 0; r < nn->max_r_vecs; r++) {
3372 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
3373 u64 data[3];
3374 unsigned int start;
3375
3376 do {
3377 start = u64_stats_fetch_begin(&r_vec->rx_sync);
3378 data[0] = r_vec->rx_pkts;
3379 data[1] = r_vec->rx_bytes;
3380 data[2] = r_vec->rx_drops;
3381 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
3382 stats->rx_packets += data[0];
3383 stats->rx_bytes += data[1];
3384 stats->rx_dropped += data[2];
3385
3386 do {
3387 start = u64_stats_fetch_begin(&r_vec->tx_sync);
3388 data[0] = r_vec->tx_pkts;
3389 data[1] = r_vec->tx_bytes;
3390 data[2] = r_vec->tx_errors;
3391 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
3392 stats->tx_packets += data[0];
3393 stats->tx_bytes += data[1];
3394 stats->tx_errors += data[2];
3395 }
3396
3397
3398 stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
3399 stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
3400 stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
3401
3402 stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
3403 stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
3404}
3405
3406static int nfp_net_set_features(struct net_device *netdev,
3407 netdev_features_t features)
3408{
3409 netdev_features_t changed = netdev->features ^ features;
3410 struct nfp_net *nn = netdev_priv(netdev);
3411 u32 new_ctrl;
3412 int err;
3413
3414
3415
3416 new_ctrl = nn->dp.ctrl;
3417
3418 if (changed & NETIF_F_RXCSUM) {
3419 if (features & NETIF_F_RXCSUM)
3420 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3421 else
3422 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
3423 }
3424
3425 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3426 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
3427 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3428 else
3429 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
3430 }
3431
3432 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
3433 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
3434 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3435 NFP_NET_CFG_CTRL_LSO;
3436 else
3437 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3438 }
3439
3440 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
3441 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3442 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3443 else
3444 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
3445 }
3446
3447 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
3448 if (features & NETIF_F_HW_VLAN_CTAG_TX)
3449 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3450 else
3451 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
3452 }
3453
3454 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3455 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3456 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3457 else
3458 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
3459 }
3460
3461 if (changed & NETIF_F_SG) {
3462 if (features & NETIF_F_SG)
3463 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
3464 else
3465 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
3466 }
3467
3468 err = nfp_port_set_features(netdev, features);
3469 if (err)
3470 return err;
3471
3472 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
3473 netdev->features, features, changed);
3474
3475 if (new_ctrl == nn->dp.ctrl)
3476 return 0;
3477
3478 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
3479 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
3480 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
3481 if (err)
3482 return err;
3483
3484 nn->dp.ctrl = new_ctrl;
3485
3486 return 0;
3487}
3488
3489static netdev_features_t
3490nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
3491 netdev_features_t features)
3492{
3493 u8 l4_hdr;
3494
3495
3496 features &= vlan_features_check(skb, features);
3497
3498 if (!skb->encapsulation)
3499 return features;
3500
3501
3502 if (skb_is_gso(skb)) {
3503 u32 hdrlen;
3504
3505 hdrlen = skb_inner_transport_header(skb) - skb->data +
3506 inner_tcp_hdrlen(skb);
3507
3508
3509
3510
3511 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
3512 features &= ~NETIF_F_GSO_MASK;
3513 }
3514
3515
3516 switch (vlan_get_protocol(skb)) {
3517 case htons(ETH_P_IP):
3518 l4_hdr = ip_hdr(skb)->protocol;
3519 break;
3520 case htons(ETH_P_IPV6):
3521 l4_hdr = ipv6_hdr(skb)->nexthdr;
3522 break;
3523 default:
3524 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3525 }
3526
3527 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
3528 skb->inner_protocol != htons(ETH_P_TEB) ||
3529 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
3530 (l4_hdr == IPPROTO_UDP &&
3531 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
3532 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
3533 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3534
3535 return features;
3536}
3537
3538static int
3539nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
3540{
3541 struct nfp_net *nn = netdev_priv(netdev);
3542 int n;
3543
3544
3545
3546
3547 if (nn->port)
3548 return -EOPNOTSUPP;
3549
3550 if (nn->dp.is_vf || nn->vnic_no_name)
3551 return -EOPNOTSUPP;
3552
3553 n = snprintf(name, len, "n%d", nn->id);
3554 if (n >= len)
3555 return -EINVAL;
3556
3557 return 0;
3558}
3559
3560static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
3561{
3562 struct bpf_prog *prog = bpf->prog;
3563 struct nfp_net_dp *dp;
3564 int err;
3565
3566 if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
3567 return -EBUSY;
3568
3569 if (!prog == !nn->dp.xdp_prog) {
3570 WRITE_ONCE(nn->dp.xdp_prog, prog);
3571 xdp_attachment_setup(&nn->xdp, bpf);
3572 return 0;
3573 }
3574
3575 dp = nfp_net_clone_dp(nn);
3576 if (!dp)
3577 return -ENOMEM;
3578
3579 dp->xdp_prog = prog;
3580 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
3581 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
3582 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
3583
3584
3585 err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
3586 if (err)
3587 return err;
3588
3589 xdp_attachment_setup(&nn->xdp, bpf);
3590 return 0;
3591}
3592
3593static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
3594{
3595 int err;
3596
3597 if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
3598 return -EBUSY;
3599
3600 err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
3601 if (err)
3602 return err;
3603
3604 xdp_attachment_setup(&nn->xdp_hw, bpf);
3605 return 0;
3606}
3607
3608static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
3609{
3610 struct nfp_net *nn = netdev_priv(netdev);
3611
3612 switch (xdp->command) {
3613 case XDP_SETUP_PROG:
3614 return nfp_net_xdp_setup_drv(nn, xdp);
3615 case XDP_SETUP_PROG_HW:
3616 return nfp_net_xdp_setup_hw(nn, xdp);
3617 default:
3618 return nfp_app_bpf(nn->app, nn, xdp);
3619 }
3620}
3621
3622static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
3623{
3624 struct nfp_net *nn = netdev_priv(netdev);
3625 struct sockaddr *saddr = addr;
3626 int err;
3627
3628 err = eth_prepare_mac_addr_change(netdev, addr);
3629 if (err)
3630 return err;
3631
3632 nfp_net_write_mac_addr(nn, saddr->sa_data);
3633
3634 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
3635 if (err)
3636 return err;
3637
3638 eth_commit_mac_addr_change(netdev, addr);
3639
3640 return 0;
3641}
3642
3643const struct net_device_ops nfp_net_netdev_ops = {
3644 .ndo_init = nfp_app_ndo_init,
3645 .ndo_uninit = nfp_app_ndo_uninit,
3646 .ndo_open = nfp_net_netdev_open,
3647 .ndo_stop = nfp_net_netdev_close,
3648 .ndo_start_xmit = nfp_net_tx,
3649 .ndo_get_stats64 = nfp_net_stat64,
3650 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3651 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3652 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3653 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3654 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
3655 .ndo_set_vf_trust = nfp_app_set_vf_trust,
3656 .ndo_get_vf_config = nfp_app_get_vf_config,
3657 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
3658 .ndo_setup_tc = nfp_port_setup_tc,
3659 .ndo_tx_timeout = nfp_net_tx_timeout,
3660 .ndo_set_rx_mode = nfp_net_set_rx_mode,
3661 .ndo_change_mtu = nfp_net_change_mtu,
3662 .ndo_set_mac_address = nfp_net_set_mac_address,
3663 .ndo_set_features = nfp_net_set_features,
3664 .ndo_features_check = nfp_net_features_check,
3665 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
3666 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
3667 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
3668 .ndo_bpf = nfp_net_xdp,
3669 .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
3670};
3671
3672static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
3673{
3674 struct nfp_net *nn = netdev_priv(netdev);
3675 int i;
3676
3677 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3678 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
3679 struct udp_tunnel_info ti0, ti1;
3680
3681 udp_tunnel_nic_get_port(netdev, table, i, &ti0);
3682 udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
3683
3684 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
3685 be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
3686 }
3687
3688 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
3689}
3690
3691static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
3692 .sync_table = nfp_udp_tunnel_sync,
3693 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
3694 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
3695 .tables = {
3696 {
3697 .n_entries = NFP_NET_N_VXLAN_PORTS,
3698 .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,
3699 },
3700 },
3701};
3702
3703
3704
3705
3706
3707void nfp_net_info(struct nfp_net *nn)
3708{
3709 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3710 nn->dp.is_vf ? "VF " : "",
3711 nn->dp.num_tx_rings, nn->max_tx_rings,
3712 nn->dp.num_rx_rings, nn->max_rx_rings);
3713 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3714 nn->fw_ver.resv, nn->fw_ver.class,
3715 nn->fw_ver.major, nn->fw_ver.minor,
3716 nn->max_mtu);
3717 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3718 nn->cap,
3719 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
3720 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
3721 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
3722 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
3723 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
3724 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
3725 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
3726 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
3727 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
3728 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
3729 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
3730 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
3731 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
3732 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
3733 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
3734 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
3735 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
3736 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
3737 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
3738 "RXCSUM_COMPLETE " : "",
3739 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
3740 nfp_app_extra_cap(nn->app, nn));
3741}
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757struct nfp_net *
3758nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
3759 unsigned int max_tx_rings, unsigned int max_rx_rings)
3760{
3761 struct nfp_net *nn;
3762 int err;
3763
3764 if (needs_netdev) {
3765 struct net_device *netdev;
3766
3767 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
3768 max_tx_rings, max_rx_rings);
3769 if (!netdev)
3770 return ERR_PTR(-ENOMEM);
3771
3772 SET_NETDEV_DEV(netdev, &pdev->dev);
3773 nn = netdev_priv(netdev);
3774 nn->dp.netdev = netdev;
3775 } else {
3776 nn = vzalloc(sizeof(*nn));
3777 if (!nn)
3778 return ERR_PTR(-ENOMEM);
3779 }
3780
3781 nn->dp.dev = &pdev->dev;
3782 nn->dp.ctrl_bar = ctrl_bar;
3783 nn->pdev = pdev;
3784
3785 nn->max_tx_rings = max_tx_rings;
3786 nn->max_rx_rings = max_rx_rings;
3787
3788 nn->dp.num_tx_rings = min_t(unsigned int,
3789 max_tx_rings, num_online_cpus());
3790 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3791 netif_get_num_default_rss_queues());
3792
3793 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3794 nn->dp.num_r_vecs = min_t(unsigned int,
3795 nn->dp.num_r_vecs, num_online_cpus());
3796
3797 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3798 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3799
3800 sema_init(&nn->bar_lock, 1);
3801
3802 spin_lock_init(&nn->reconfig_lock);
3803 spin_lock_init(&nn->link_status_lock);
3804
3805 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
3806
3807 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
3808 &nn->tlv_caps);
3809 if (err)
3810 goto err_free_nn;
3811
3812 err = nfp_ccm_mbox_alloc(nn);
3813 if (err)
3814 goto err_free_nn;
3815
3816 return nn;
3817
3818err_free_nn:
3819 if (nn->dp.netdev)
3820 free_netdev(nn->dp.netdev);
3821 else
3822 vfree(nn);
3823 return ERR_PTR(err);
3824}
3825
3826
3827
3828
3829
3830void nfp_net_free(struct nfp_net *nn)
3831{
3832 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3833 nfp_ccm_mbox_free(nn);
3834
3835 if (nn->dp.netdev)
3836 free_netdev(nn->dp.netdev);
3837 else
3838 vfree(nn);
3839}
3840
3841
3842
3843
3844
3845
3846
3847unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3848{
3849 switch (nn->rss_hfunc) {
3850 case ETH_RSS_HASH_TOP:
3851 return NFP_NET_CFG_RSS_KEY_SZ;
3852 case ETH_RSS_HASH_XOR:
3853 return 0;
3854 case ETH_RSS_HASH_CRC32:
3855 return 4;
3856 }
3857
3858 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3859 return 0;
3860}
3861
3862
3863
3864
3865
3866static void nfp_net_rss_init(struct nfp_net *nn)
3867{
3868 unsigned long func_bit, rss_cap_hfunc;
3869 u32 reg;
3870
3871
3872 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3873 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3874 if (!rss_cap_hfunc)
3875 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3876 NFP_NET_CFG_RSS_TOEPLITZ);
3877
3878 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3879 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3880 dev_warn(nn->dp.dev,
3881 "Bad RSS config, defaulting to Toeplitz hash\n");
3882 func_bit = ETH_RSS_HASH_TOP_BIT;
3883 }
3884 nn->rss_hfunc = 1 << func_bit;
3885
3886 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3887
3888 nfp_net_rss_init_itbl(nn);
3889
3890
3891 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3892 NFP_NET_CFG_RSS_IPV6_TCP |
3893 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3894 NFP_NET_CFG_RSS_MASK;
3895}
3896
3897
3898
3899
3900
3901static void nfp_net_irqmod_init(struct nfp_net *nn)
3902{
3903 nn->rx_coalesce_usecs = 50;
3904 nn->rx_coalesce_max_frames = 64;
3905 nn->tx_coalesce_usecs = 50;
3906 nn->tx_coalesce_max_frames = 64;
3907}
3908
3909static void nfp_net_netdev_init(struct nfp_net *nn)
3910{
3911 struct net_device *netdev = nn->dp.netdev;
3912
3913 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
3914
3915 netdev->mtu = nn->dp.mtu;
3916
3917
3918
3919
3920
3921
3922
3923 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
3924 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3925
3926 netdev->hw_features = NETIF_F_HIGHDMA;
3927 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
3928 netdev->hw_features |= NETIF_F_RXCSUM;
3929 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3930 }
3931 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3932 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3933 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3934 }
3935 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3936 netdev->hw_features |= NETIF_F_SG;
3937 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3938 }
3939 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
3940 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3941 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3942 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3943 NFP_NET_CFG_CTRL_LSO;
3944 }
3945 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
3946 netdev->hw_features |= NETIF_F_RXHASH;
3947 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
3948 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3949 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3950 netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
3951 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
3952 }
3953 if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3954 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3955 netdev->hw_features |= NETIF_F_GSO_GRE;
3956 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
3957 }
3958 if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
3959 netdev->hw_enc_features = netdev->hw_features;
3960
3961 netdev->vlan_features = netdev->hw_features;
3962
3963 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3964 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3965 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3966 }
3967 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3968 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3969 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3970 } else {
3971 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3972 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3973 }
3974 }
3975 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
3976 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3977 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3978 }
3979
3980 netdev->features = netdev->hw_features;
3981
3982 if (nfp_app_has_tc(nn->app) && nn->port)
3983 netdev->hw_features |= NETIF_F_HW_TC;
3984
3985
3986 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3987 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3988
3989
3990 netdev->netdev_ops = &nfp_net_netdev_ops;
3991 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3992
3993
3994 netdev->min_mtu = ETH_MIN_MTU;
3995 netdev->max_mtu = nn->max_mtu;
3996
3997 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
3998
3999 netif_carrier_off(netdev);
4000
4001 nfp_net_set_ethtool_ops(netdev);
4002}
4003
4004static int nfp_net_read_caps(struct nfp_net *nn)
4005{
4006
4007 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
4008 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
4009
4010
4011
4012
4013
4014 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
4015 !nn->dp.netdev ||
4016 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
4017 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
4018
4019
4020
4021 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
4022 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
4023
4024
4025 if (nn->fw_ver.major >= 2) {
4026 u32 reg;
4027
4028 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
4029 if (reg > NFP_NET_MAX_PREPEND) {
4030 nn_err(nn, "Invalid rx offset: %d\n", reg);
4031 return -EINVAL;
4032 }
4033 nn->dp.rx_offset = reg;
4034 } else {
4035 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
4036 }
4037
4038
4039 if (!nn->dp.netdev)
4040 nn->cap &= nn->app->type->ctrl_cap_mask;
4041
4042 return 0;
4043}
4044
4045
4046
4047
4048
4049
4050
4051int nfp_net_init(struct nfp_net *nn)
4052{
4053 int err;
4054
4055 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
4056
4057 err = nfp_net_read_caps(nn);
4058 if (err)
4059 return err;
4060
4061
4062 if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
4063 nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
4064 } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
4065 nn->dp.mtu = nn->max_mtu;
4066 } else {
4067 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
4068 }
4069 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
4070
4071 if (nfp_app_ctrl_uses_data_vnics(nn->app))
4072 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
4073
4074 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
4075 nfp_net_rss_init(nn);
4076 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
4077 NFP_NET_CFG_CTRL_RSS;
4078 }
4079
4080
4081 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
4082 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
4083
4084
4085 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
4086 nfp_net_irqmod_init(nn);
4087 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
4088 }
4089
4090
4091 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
4092
4093
4094 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
4095 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
4096 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
4097 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
4098 NFP_NET_CFG_UPDATE_GEN);
4099 if (err)
4100 return err;
4101
4102 if (nn->dp.netdev) {
4103 nfp_net_netdev_init(nn);
4104
4105 err = nfp_ccm_mbox_init(nn);
4106 if (err)
4107 return err;
4108
4109 err = nfp_net_tls_init(nn);
4110 if (err)
4111 goto err_clean_mbox;
4112 }
4113
4114 nfp_net_vecs_init(nn);
4115
4116 if (!nn->dp.netdev)
4117 return 0;
4118 return register_netdev(nn->dp.netdev);
4119
4120err_clean_mbox:
4121 nfp_ccm_mbox_clean(nn);
4122 return err;
4123}
4124
4125
4126
4127
4128
4129void nfp_net_clean(struct nfp_net *nn)
4130{
4131 if (!nn->dp.netdev)
4132 return;
4133
4134 unregister_netdev(nn->dp.netdev);
4135 nfp_ccm_mbox_clean(nn);
4136 nfp_net_reconfig_wait_posted(nn);
4137}
4138