1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#ifndef _NFP_NET_H_
43#define _NFP_NET_H_
44
45#include <linux/interrupt.h>
46#include <linux/list.h>
47#include <linux/netdevice.h>
48#include <linux/pci.h>
49#include <asm-generic/io-64-nonatomic-hi-lo.h>
50
51#include "nfp_net_ctrl.h"
52
53#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args)
54#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
55#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
56#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args)
57#define nn_warn_ratelimit(nn, fmt, args...) \
58 do { \
59 if (unlikely(net_ratelimit())) \
60 netdev_warn((nn)->netdev, fmt, ## args); \
61 } while (0)
62
63
64#define NFP_NET_POLL_TIMEOUT 5
65
66
67#define NFP_NET_CTRL_BAR 0
68#define NFP_NET_Q0_BAR 2
69#define NFP_NET_Q1_BAR 4
70
71
72#define NFP_NET_MAX_DMA_BITS 40
73
74
75#define NFP_NET_DEFAULT_MTU 1500
76
77
78#define NFP_NET_MAX_PREPEND 64
79
80
81#define NFP_NET_NON_Q_VECTORS 2
82#define NFP_NET_IRQ_LSC_IDX 0
83#define NFP_NET_IRQ_EXN_IDX 1
84#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1)
85
86
87#define NFP_NET_MAX_TX_RINGS 64
88#define NFP_NET_MAX_RX_RINGS 64
89#define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
90 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
91#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
92
93#define NFP_NET_MIN_TX_DESCS 256
94#define NFP_NET_MIN_RX_DESCS 256
95#define NFP_NET_MAX_TX_DESCS (256 * 1024)
96#define NFP_NET_MAX_RX_DESCS (256 * 1024)
97
98#define NFP_NET_TX_DESCS_DEFAULT 4096
99#define NFP_NET_RX_DESCS_DEFAULT 4096
100
101#define NFP_NET_FL_BATCH 16
102
103
104#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
105
106#define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
107#define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \
108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
109
110
111struct nfp_cpp;
112struct nfp_net;
113struct nfp_net_r_vector;
114
115
116#define nfp_desc_set_dma_addr(desc, dma_addr) \
117 do { \
118 __typeof(desc) __d = (desc); \
119 dma_addr_t __addr = (dma_addr); \
120 \
121 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
122 __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
123 } while (0)
124
125
126
127#define PCIE_DESC_TX_EOP BIT(7)
128#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
129#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
130
131
132#define PCIE_DESC_TX_CSUM BIT(7)
133#define PCIE_DESC_TX_IP4_CSUM BIT(6)
134#define PCIE_DESC_TX_TCP_CSUM BIT(5)
135#define PCIE_DESC_TX_UDP_CSUM BIT(4)
136#define PCIE_DESC_TX_VLAN BIT(3)
137#define PCIE_DESC_TX_LSO BIT(2)
138#define PCIE_DESC_TX_ENCAP BIT(1)
139#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
140
141struct nfp_net_tx_desc {
142 union {
143 struct {
144 u8 dma_addr_hi;
145 __le16 dma_len;
146 u8 offset_eop;
147
148
149 __le32 dma_addr_lo;
150
151 __le16 mss;
152 u8 l4_offset;
153 u8 flags;
154
155 __le16 vlan;
156 __le16 data_len;
157 } __packed;
158 __le32 vals[4];
159 };
160};
161
162
163
164
165
166
167
168
169
170
171
172
173struct nfp_net_tx_buf {
174 struct sk_buff *skb;
175 dma_addr_t dma_addr;
176 short int fidx;
177 u16 pkt_cnt;
178 u32 real_len;
179};
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198struct nfp_net_tx_ring {
199 struct nfp_net_r_vector *r_vec;
200
201 u32 idx;
202 int qcidx;
203 u8 __iomem *qcp_q;
204
205 u32 cnt;
206 u32 wr_p;
207 u32 rd_p;
208 u32 qcp_rd_p;
209
210 u32 wr_ptr_add;
211
212 struct nfp_net_tx_buf *txbufs;
213 struct nfp_net_tx_desc *txds;
214
215 dma_addr_t dma;
216 unsigned int size;
217} ____cacheline_aligned;
218
219
220
221#define PCIE_DESC_RX_DD BIT(7)
222#define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0)
223
224
225#define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15))
226#define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14))
227#define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13))
228#define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12))
229#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
230#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
231#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
232#define PCIE_DESC_RX_SPARE cpu_to_le16(BIT(8))
233#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
234#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
235#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
236#define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4))
237#define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3))
238#define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2))
239#define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1))
240#define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0))
241
242#define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \
243 PCIE_DESC_RX_TCP_CSUM | \
244 PCIE_DESC_RX_UDP_CSUM | \
245 PCIE_DESC_RX_I_IP4_CSUM | \
246 PCIE_DESC_RX_I_TCP_CSUM | \
247 PCIE_DESC_RX_I_UDP_CSUM)
248#define PCIE_DESC_RX_CSUM_OK_SHIFT 1
249#define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
250#define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \
251 PCIE_DESC_RX_CSUM_OK_SHIFT)
252
253struct nfp_net_rx_desc {
254 union {
255 struct {
256 u8 dma_addr_hi;
257 __le16 reserved;
258 u8 meta_len_dd;
259
260 __le32 dma_addr_lo;
261 } __packed fld;
262
263 struct {
264 __le16 data_len;
265 u8 reserved;
266 u8 meta_len_dd;
267
268
269
270 __le16 flags;
271 __le16 vlan;
272 } __packed rxd;
273
274 __le32 vals[2];
275 };
276};
277
278struct nfp_net_rx_hash {
279 __be32 hash_type;
280 __be32 hash;
281};
282
283
284
285
286
287
288struct nfp_net_rx_buf {
289 void *frag;
290 dma_addr_t dma_addr;
291};
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313struct nfp_net_rx_ring {
314 struct nfp_net_r_vector *r_vec;
315
316 u32 cnt;
317 u32 wr_p;
318 u32 rd_p;
319
320 u16 idx;
321 u16 wr_ptr_add;
322
323 int fl_qcidx;
324 int rx_qcidx;
325 u8 __iomem *qcp_fl;
326 u8 __iomem *qcp_rx;
327
328 struct nfp_net_rx_buf *rxbufs;
329 struct nfp_net_rx_desc *rxds;
330
331 dma_addr_t dma;
332 unsigned int size;
333 unsigned int bufsz;
334} ____cacheline_aligned;
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370struct nfp_net_r_vector {
371 struct nfp_net *nfp_net;
372 struct napi_struct napi;
373
374 struct nfp_net_tx_ring *tx_ring;
375 struct nfp_net_rx_ring *rx_ring;
376
377 u16 irq_entry;
378
379 struct u64_stats_sync rx_sync;
380 u64 rx_pkts;
381 u64 rx_bytes;
382 u64 rx_drops;
383 u64 hw_csum_rx_ok;
384 u64 hw_csum_rx_inner_ok;
385 u64 hw_csum_rx_error;
386
387 struct u64_stats_sync tx_sync;
388 u64 tx_pkts;
389 u64 tx_bytes;
390 u64 hw_csum_tx;
391 u64 hw_csum_tx_inner;
392 u64 tx_gather;
393 u64 tx_lso;
394 u64 tx_errors;
395 u64 tx_busy;
396
397 u32 irq_vector;
398 irq_handler_t handler;
399 char name[IFNAMSIZ + 8];
400 cpumask_t affinity_mask;
401} ____cacheline_aligned;
402
403
404struct nfp_net_fw_version {
405 u8 minor;
406 u8 major;
407 u8 class;
408 u8 resv;
409} __packed;
410
411static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
412 u8 resv, u8 class, u8 major, u8 minor)
413{
414 return fw_ver->resv == resv &&
415 fw_ver->class == class &&
416 fw_ver->major == major &&
417 fw_ver->minor == minor;
418}
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476struct nfp_net {
477 struct pci_dev *pdev;
478 struct net_device *netdev;
479
480 unsigned is_vf:1;
481 unsigned fw_loaded:1;
482
483 u32 ctrl;
484 u32 fl_bufsz;
485
486 u32 rx_offset;
487
488 struct nfp_net_tx_ring *tx_rings;
489 struct nfp_net_rx_ring *rx_rings;
490
491 struct nfp_net_fw_version fw_ver;
492 u32 cap;
493 u32 max_mtu;
494
495 u32 rss_cfg;
496 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
497 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
498
499 unsigned int max_tx_rings;
500 unsigned int max_rx_rings;
501
502 unsigned int num_tx_rings;
503 unsigned int num_rx_rings;
504
505 int stride_tx;
506 int stride_rx;
507
508 int txd_cnt;
509 int rxd_cnt;
510
511 unsigned int max_r_vecs;
512 unsigned int num_r_vecs;
513 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
514 struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
515
516 irq_handler_t lsc_handler;
517 char lsc_name[IFNAMSIZ + 8];
518
519 irq_handler_t exn_handler;
520 char exn_name[IFNAMSIZ + 8];
521
522 irq_handler_t shared_handler;
523 char shared_name[IFNAMSIZ + 8];
524
525 u32 me_freq_mhz;
526
527 bool link_up;
528 spinlock_t link_status_lock;
529
530 spinlock_t reconfig_lock;
531 u32 reconfig_posted;
532 bool reconfig_timer_active;
533 bool reconfig_sync_present;
534 struct timer_list reconfig_timer;
535
536 u32 rx_coalesce_usecs;
537 u32 rx_coalesce_max_frames;
538 u32 tx_coalesce_usecs;
539 u32 tx_coalesce_max_frames;
540
541 __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS];
542 u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS];
543
544 u8 __iomem *qcp_cfg;
545
546 u8 __iomem *ctrl_bar;
547 u8 __iomem *tx_bar;
548 u8 __iomem *rx_bar;
549
550 struct dentry *debugfs_dir;
551 u32 ethtool_dump_flag;
552
553 struct list_head port_list;
554
555 struct nfp_cpp *cpp;
556};
557
558struct nfp_net_ring_set {
559 unsigned int n_rings;
560 unsigned int mtu;
561 unsigned int dcnt;
562 void *rings;
563};
564
565
566
567
568static inline u16 nn_readb(struct nfp_net *nn, int off)
569{
570 return readb(nn->ctrl_bar + off);
571}
572
573static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
574{
575 writeb(val, nn->ctrl_bar + off);
576}
577
578static inline u16 nn_readw(struct nfp_net *nn, int off)
579{
580 return readw(nn->ctrl_bar + off);
581}
582
583static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
584{
585 writew(val, nn->ctrl_bar + off);
586}
587
588static inline u32 nn_readl(struct nfp_net *nn, int off)
589{
590 return readl(nn->ctrl_bar + off);
591}
592
593static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
594{
595 writel(val, nn->ctrl_bar + off);
596}
597
598static inline u64 nn_readq(struct nfp_net *nn, int off)
599{
600 return readq(nn->ctrl_bar + off);
601}
602
603static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
604{
605 writeq(val, nn->ctrl_bar + off);
606}
607
608
609static inline void nn_pci_flush(struct nfp_net *nn)
610{
611 nn_readl(nn, NFP_NET_CFG_VERSION);
612}
613
614
615
616
617
618
619
620
621
622
623#define NFP_QCP_QUEUE_ADDR_SZ 0x800
624#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
625#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
626#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
627#define NFP_QCP_QUEUE_STS_LO 0x0008
628#define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff
629#define NFP_QCP_QUEUE_STS_HI 0x000c
630#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
631
632
633#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
634
635
636enum nfp_qcp_ptr {
637 NFP_QCP_READ_PTR = 0,
638 NFP_QCP_WRITE_PTR
639};
640
641
642
643
644
645#define NFP_QCP_MAX_ADD 0x3f
646
647static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
648 enum nfp_qcp_ptr ptr, u32 val)
649{
650 u32 off;
651
652 if (ptr == NFP_QCP_READ_PTR)
653 off = NFP_QCP_QUEUE_ADD_RPTR;
654 else
655 off = NFP_QCP_QUEUE_ADD_WPTR;
656
657 while (val > NFP_QCP_MAX_ADD) {
658 writel(NFP_QCP_MAX_ADD, q + off);
659 val -= NFP_QCP_MAX_ADD;
660 }
661
662 writel(val, q + off);
663}
664
665
666
667
668
669
670
671
672
673static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
674{
675 _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
676}
677
678
679
680
681
682
683
684
685
686static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
687{
688 _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
689}
690
691static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
692{
693 u32 off;
694 u32 val;
695
696 if (ptr == NFP_QCP_READ_PTR)
697 off = NFP_QCP_QUEUE_STS_LO;
698 else
699 off = NFP_QCP_QUEUE_STS_HI;
700
701 val = readl(q + off);
702
703 if (ptr == NFP_QCP_READ_PTR)
704 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
705 else
706 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
707}
708
709
710
711
712
713
714
715static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
716{
717 return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
718}
719
720
721
722
723
724
725
726static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
727{
728 return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
729}
730
731
732extern const char nfp_driver_version[];
733
734
735void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
736 void __iomem *ctrl_bar);
737
738struct nfp_net *
739nfp_net_netdev_alloc(struct pci_dev *pdev,
740 unsigned int max_tx_rings, unsigned int max_rx_rings);
741void nfp_net_netdev_free(struct nfp_net *nn);
742int nfp_net_netdev_init(struct net_device *netdev);
743void nfp_net_netdev_clean(struct net_device *netdev);
744void nfp_net_set_ethtool_ops(struct net_device *netdev);
745void nfp_net_info(struct nfp_net *nn);
746int nfp_net_reconfig(struct nfp_net *nn, u32 update);
747void nfp_net_rss_write_itbl(struct nfp_net *nn);
748void nfp_net_rss_write_key(struct nfp_net *nn);
749void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
750
751unsigned int
752nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
753 unsigned int min_irqs, unsigned int want_irqs);
754void nfp_net_irqs_disable(struct pci_dev *pdev);
755void
756nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
757 unsigned int n);
758int
759nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_ring_set *rx,
760 struct nfp_net_ring_set *tx);
761
762#ifdef CONFIG_NFP_DEBUG
763void nfp_net_debugfs_create(void);
764void nfp_net_debugfs_destroy(void);
765struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
766void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
767void nfp_net_debugfs_dir_clean(struct dentry **dir);
768#else
769static inline void nfp_net_debugfs_create(void)
770{
771}
772
773static inline void nfp_net_debugfs_destroy(void)
774{
775}
776
777static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
778{
779 return NULL;
780}
781
782static inline void
783nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
784{
785}
786
787static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
788{
789}
790#endif
791
792#endif
793