1
2
3
4
5
6
7
8
9
10
11
12#ifndef _NFP_NET_H_
13#define _NFP_NET_H_
14
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/netdevice.h>
18#include <linux/pci.h>
19#include <linux/io-64-nonatomic-hi-lo.h>
20#include <net/xdp.h>
21
22#include "nfp_net_ctrl.h"
23
24#define nn_pr(nn, lvl, fmt, args...) \
25 ({ \
26 struct nfp_net *__nn = (nn); \
27 \
28 if (__nn->dp.netdev) \
29 netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
30 else \
31 dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
32 })
33
34#define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
35#define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
36#define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
37#define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
38
39#define nn_dp_warn(dp, fmt, args...) \
40 ({ \
41 struct nfp_net_dp *__dp = (dp); \
42 \
43 if (unlikely(net_ratelimit())) { \
44 if (__dp->netdev) \
45 netdev_warn(__dp->netdev, fmt, ## args); \
46 else \
47 dev_warn(__dp->dev, fmt, ## args); \
48 } \
49 })
50
51
52#define NFP_NET_POLL_TIMEOUT 5
53
54
55#define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
56
57
58#define NFP_NET_CTRL_BAR 0
59#define NFP_NET_Q0_BAR 2
60#define NFP_NET_Q1_BAR 4
61
62
63#define NFP_NET_MAX_DMA_BITS 40
64
65
66#define NFP_NET_DEFAULT_MTU 1500
67
68
69#define NFP_NET_MAX_PREPEND 64
70
71
72#define NFP_NET_NON_Q_VECTORS 2
73#define NFP_NET_IRQ_LSC_IDX 0
74#define NFP_NET_IRQ_EXN_IDX 1
75#define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
76
77
78#define NFP_NET_MAX_TX_RINGS 64
79#define NFP_NET_MAX_RX_RINGS 64
80#define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
81 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
82#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
83
84#define NFP_NET_MIN_TX_DESCS 256
85#define NFP_NET_MIN_RX_DESCS 256
86#define NFP_NET_MAX_TX_DESCS (256 * 1024)
87#define NFP_NET_MAX_RX_DESCS (256 * 1024)
88
89#define NFP_NET_TX_DESCS_DEFAULT 4096
90#define NFP_NET_RX_DESCS_DEFAULT 4096
91
92#define NFP_NET_FL_BATCH 16
93#define NFP_NET_XDP_MAX_COMPLETE 2048
94
95
96#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
97
98#define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
99#define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \
100 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
101
102
103struct nfp_cpp;
104struct nfp_eth_table_port;
105struct nfp_net;
106struct nfp_net_r_vector;
107struct nfp_port;
108
109
110#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
111
112
113#define nfp_desc_set_dma_addr(desc, dma_addr) \
114 do { \
115 __typeof(desc) __d = (desc); \
116 dma_addr_t __addr = (dma_addr); \
117 \
118 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
119 __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
120 } while (0)
121
122
123
124#define PCIE_DESC_TX_EOP BIT(7)
125#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
126#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
127
128
129#define PCIE_DESC_TX_CSUM BIT(7)
130#define PCIE_DESC_TX_IP4_CSUM BIT(6)
131#define PCIE_DESC_TX_TCP_CSUM BIT(5)
132#define PCIE_DESC_TX_UDP_CSUM BIT(4)
133#define PCIE_DESC_TX_VLAN BIT(3)
134#define PCIE_DESC_TX_LSO BIT(2)
135#define PCIE_DESC_TX_ENCAP BIT(1)
136#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
137
138struct nfp_net_tx_desc {
139 union {
140 struct {
141 u8 dma_addr_hi;
142 __le16 dma_len;
143 u8 offset_eop;
144
145
146 __le32 dma_addr_lo;
147
148 __le16 mss;
149 u8 lso_hdrlen;
150 u8 flags;
151 union {
152 struct {
153 u8 l3_offset;
154 u8 l4_offset;
155 };
156 __le16 vlan;
157 };
158 __le16 data_len;
159 } __packed;
160 __le32 vals[4];
161 __le64 vals8[2];
162 };
163};
164
165
166
167
168
169
170
171
172
173
174
175
176
177struct nfp_net_tx_buf {
178 union {
179 struct sk_buff *skb;
180 void *frag;
181 };
182 dma_addr_t dma_addr;
183 short int fidx;
184 u16 pkt_cnt;
185 u32 real_len;
186};
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206struct nfp_net_tx_ring {
207 struct nfp_net_r_vector *r_vec;
208
209 u32 idx;
210 int qcidx;
211 u8 __iomem *qcp_q;
212
213 u32 cnt;
214 u32 wr_p;
215 u32 rd_p;
216 u32 qcp_rd_p;
217
218 u32 wr_ptr_add;
219
220 struct nfp_net_tx_buf *txbufs;
221 struct nfp_net_tx_desc *txds;
222
223 dma_addr_t dma;
224 size_t size;
225 bool is_xdp;
226} ____cacheline_aligned;
227
228
229
230#define PCIE_DESC_RX_DD BIT(7)
231#define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0)
232
233
234#define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15))
235#define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14))
236#define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13))
237#define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12))
238#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
239#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
240#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
241#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
242#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
243#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
244#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
245#define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4))
246#define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3))
247#define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2))
248#define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1))
249#define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0))
250
251#define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \
252 PCIE_DESC_RX_TCP_CSUM | \
253 PCIE_DESC_RX_UDP_CSUM | \
254 PCIE_DESC_RX_I_IP4_CSUM | \
255 PCIE_DESC_RX_I_TCP_CSUM | \
256 PCIE_DESC_RX_I_UDP_CSUM)
257#define PCIE_DESC_RX_CSUM_OK_SHIFT 1
258#define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
259#define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \
260 PCIE_DESC_RX_CSUM_OK_SHIFT)
261
262struct nfp_net_rx_desc {
263 union {
264 struct {
265 u8 dma_addr_hi;
266 __le16 reserved;
267 u8 meta_len_dd;
268
269 __le32 dma_addr_lo;
270 } __packed fld;
271
272 struct {
273 __le16 data_len;
274 u8 reserved;
275 u8 meta_len_dd;
276
277
278
279 __le16 flags;
280 __le16 vlan;
281 } __packed rxd;
282
283 __le32 vals[2];
284 };
285};
286
287#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
288
289struct nfp_meta_parsed {
290 u8 hash_type;
291 u8 csum_type;
292 u32 hash;
293 u32 mark;
294 u32 portid;
295 __wsum csum;
296};
297
298struct nfp_net_rx_hash {
299 __be32 hash_type;
300 __be32 hash;
301};
302
303
304
305
306
307
308struct nfp_net_rx_buf {
309 void *frag;
310 dma_addr_t dma_addr;
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328struct nfp_net_rx_ring {
329 struct nfp_net_r_vector *r_vec;
330
331 u32 cnt;
332 u32 wr_p;
333 u32 rd_p;
334
335 u32 idx;
336
337 int fl_qcidx;
338 u8 __iomem *qcp_fl;
339
340 struct nfp_net_rx_buf *rxbufs;
341 struct nfp_net_rx_desc *rxds;
342
343 struct xdp_rxq_info xdp_rxq;
344
345 dma_addr_t dma;
346 size_t size;
347} ____cacheline_aligned;
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388struct nfp_net_r_vector {
389 struct nfp_net *nfp_net;
390 union {
391 struct napi_struct napi;
392 struct {
393 struct tasklet_struct tasklet;
394 struct sk_buff_head queue;
395 struct spinlock lock;
396 };
397 };
398
399 struct nfp_net_tx_ring *tx_ring;
400 struct nfp_net_rx_ring *rx_ring;
401
402 u16 irq_entry;
403
404 struct u64_stats_sync rx_sync;
405 u64 rx_pkts;
406 u64 rx_bytes;
407 u64 rx_drops;
408 u64 hw_csum_rx_ok;
409 u64 hw_csum_rx_inner_ok;
410 u64 hw_csum_rx_complete;
411
412 struct nfp_net_tx_ring *xdp_ring;
413
414 struct u64_stats_sync tx_sync;
415 u64 tx_pkts;
416 u64 tx_bytes;
417 u64 hw_csum_tx;
418 u64 hw_csum_tx_inner;
419 u64 tx_gather;
420 u64 tx_lso;
421
422 u64 hw_csum_rx_error;
423 u64 rx_replace_buf_alloc_fail;
424 u64 tx_errors;
425 u64 tx_busy;
426
427 u32 irq_vector;
428 irq_handler_t handler;
429 char name[IFNAMSIZ + 8];
430 cpumask_t affinity_mask;
431} ____cacheline_aligned;
432
433
434struct nfp_net_fw_version {
435 u8 minor;
436 u8 major;
437 u8 class;
438 u8 resv;
439} __packed;
440
441static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
442 u8 resv, u8 class, u8 major, u8 minor)
443{
444 return fw_ver->resv == resv &&
445 fw_ver->class == class &&
446 fw_ver->major == major &&
447 fw_ver->minor == minor;
448}
449
450struct nfp_stat_pair {
451 u64 pkts;
452 u64 bytes;
453};
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479struct nfp_net_dp {
480 struct device *dev;
481 struct net_device *netdev;
482
483 u8 is_vf:1;
484 u8 chained_metadata_format:1;
485
486 u8 rx_dma_dir;
487 u8 rx_offset;
488
489 u32 rx_dma_off;
490
491 u32 ctrl;
492 u32 fl_bufsz;
493
494 struct bpf_prog *xdp_prog;
495
496 struct nfp_net_tx_ring *tx_rings;
497 struct nfp_net_rx_ring *rx_rings;
498
499 u8 __iomem *ctrl_bar;
500
501
502
503 unsigned int txd_cnt;
504 unsigned int rxd_cnt;
505
506 unsigned int num_r_vecs;
507
508 unsigned int num_tx_rings;
509 unsigned int num_stack_tx_rings;
510 unsigned int num_rx_rings;
511
512 unsigned int mtu;
513};
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569struct nfp_net {
570 struct nfp_net_dp dp;
571
572 struct nfp_net_fw_version fw_ver;
573
574 u32 id;
575
576 u32 cap;
577 u32 max_mtu;
578
579 u8 rss_hfunc;
580 u32 rss_cfg;
581 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
582 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
583
584 struct xdp_attachment_info xdp;
585 struct xdp_attachment_info xdp_hw;
586
587 unsigned int max_tx_rings;
588 unsigned int max_rx_rings;
589
590 int stride_tx;
591 int stride_rx;
592
593 unsigned int max_r_vecs;
594 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
595 struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
596
597 irq_handler_t lsc_handler;
598 char lsc_name[IFNAMSIZ + 8];
599
600 irq_handler_t exn_handler;
601 char exn_name[IFNAMSIZ + 8];
602
603 irq_handler_t shared_handler;
604 char shared_name[IFNAMSIZ + 8];
605
606 u32 me_freq_mhz;
607
608 bool link_up;
609 spinlock_t link_status_lock;
610
611 spinlock_t reconfig_lock;
612 u32 reconfig_posted;
613 bool reconfig_timer_active;
614 bool reconfig_sync_present;
615 struct timer_list reconfig_timer;
616 u32 reconfig_in_progress_update;
617
618 u32 rx_coalesce_usecs;
619 u32 rx_coalesce_max_frames;
620 u32 tx_coalesce_usecs;
621 u32 tx_coalesce_max_frames;
622
623 __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS];
624 u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS];
625
626 u8 __iomem *qcp_cfg;
627
628 u8 __iomem *tx_bar;
629 u8 __iomem *rx_bar;
630
631 struct nfp_net_tlv_caps tlv_caps;
632
633 struct dentry *debugfs_dir;
634
635 struct list_head vnic_list;
636
637 struct pci_dev *pdev;
638 struct nfp_app *app;
639
640 bool vnic_no_name;
641
642 struct nfp_port *port;
643
644 void *app_priv;
645};
646
647
648
649
650static inline u16 nn_readb(struct nfp_net *nn, int off)
651{
652 return readb(nn->dp.ctrl_bar + off);
653}
654
655static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
656{
657 writeb(val, nn->dp.ctrl_bar + off);
658}
659
660static inline u16 nn_readw(struct nfp_net *nn, int off)
661{
662 return readw(nn->dp.ctrl_bar + off);
663}
664
665static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
666{
667 writew(val, nn->dp.ctrl_bar + off);
668}
669
670static inline u32 nn_readl(struct nfp_net *nn, int off)
671{
672 return readl(nn->dp.ctrl_bar + off);
673}
674
675static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
676{
677 writel(val, nn->dp.ctrl_bar + off);
678}
679
680static inline u64 nn_readq(struct nfp_net *nn, int off)
681{
682 return readq(nn->dp.ctrl_bar + off);
683}
684
685static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
686{
687 writeq(val, nn->dp.ctrl_bar + off);
688}
689
690
691static inline void nn_pci_flush(struct nfp_net *nn)
692{
693 nn_readl(nn, NFP_NET_CFG_VERSION);
694}
695
696
697
698
699
700
701
702
703
704
705#define NFP_QCP_QUEUE_ADDR_SZ 0x800
706#define NFP_QCP_QUEUE_AREA_SZ 0x80000
707#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
708#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
709#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
710#define NFP_QCP_QUEUE_STS_LO 0x0008
711#define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff
712#define NFP_QCP_QUEUE_STS_HI 0x000c
713#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
714
715
716#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
717
718
719enum nfp_qcp_ptr {
720 NFP_QCP_READ_PTR = 0,
721 NFP_QCP_WRITE_PTR
722};
723
724
725
726
727
728#define NFP_QCP_MAX_ADD 0x3f
729
730static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
731 enum nfp_qcp_ptr ptr, u32 val)
732{
733 u32 off;
734
735 if (ptr == NFP_QCP_READ_PTR)
736 off = NFP_QCP_QUEUE_ADD_RPTR;
737 else
738 off = NFP_QCP_QUEUE_ADD_WPTR;
739
740 while (val > NFP_QCP_MAX_ADD) {
741 writel(NFP_QCP_MAX_ADD, q + off);
742 val -= NFP_QCP_MAX_ADD;
743 }
744
745 writel(val, q + off);
746}
747
748
749
750
751
752
753
754
755
756static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
757{
758 _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
759}
760
761
762
763
764
765
766
767
768
769static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
770{
771 _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
772}
773
774static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
775{
776 u32 off;
777 u32 val;
778
779 if (ptr == NFP_QCP_READ_PTR)
780 off = NFP_QCP_QUEUE_STS_LO;
781 else
782 off = NFP_QCP_QUEUE_STS_HI;
783
784 val = readl(q + off);
785
786 if (ptr == NFP_QCP_READ_PTR)
787 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
788 else
789 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
790}
791
792
793
794
795
796
797
798static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
799{
800 return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
801}
802
803
804
805
806
807
808
809static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
810{
811 return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
812}
813
814static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
815{
816 WARN_ON_ONCE(!nn->dp.netdev && nn->port);
817 return !!nn->dp.netdev;
818}
819
820static inline bool nfp_net_running(struct nfp_net *nn)
821{
822 return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
823}
824
825static inline const char *nfp_net_name(struct nfp_net *nn)
826{
827 return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
828}
829
830static inline void nfp_ctrl_lock(struct nfp_net *nn)
831 __acquires(&nn->r_vecs[0].lock)
832{
833 spin_lock_bh(&nn->r_vecs[0].lock);
834}
835
836static inline void nfp_ctrl_unlock(struct nfp_net *nn)
837 __releases(&nn->r_vecs[0].lock)
838{
839 spin_unlock_bh(&nn->r_vecs[0].lock);
840}
841
842
843extern const char nfp_driver_version[];
844
845extern const struct net_device_ops nfp_net_netdev_ops;
846
847static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
848{
849 return netdev->netdev_ops == &nfp_net_netdev_ops;
850}
851
852
853void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
854 void __iomem *ctrl_bar);
855
856struct nfp_net *
857nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
858 unsigned int max_tx_rings, unsigned int max_rx_rings);
859void nfp_net_free(struct nfp_net *nn);
860
861int nfp_net_init(struct nfp_net *nn);
862void nfp_net_clean(struct nfp_net *nn);
863
864int nfp_ctrl_open(struct nfp_net *nn);
865void nfp_ctrl_close(struct nfp_net *nn);
866
867void nfp_net_set_ethtool_ops(struct net_device *netdev);
868void nfp_net_info(struct nfp_net *nn);
869int nfp_net_reconfig(struct nfp_net *nn, u32 update);
870unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
871void nfp_net_rss_write_itbl(struct nfp_net *nn);
872void nfp_net_rss_write_key(struct nfp_net *nn);
873void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
874int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
875
876unsigned int
877nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
878 unsigned int min_irqs, unsigned int want_irqs);
879void nfp_net_irqs_disable(struct pci_dev *pdev);
880void
881nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
882 unsigned int n);
883
884struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
885int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
886 struct netlink_ext_ack *extack);
887
888#ifdef CONFIG_NFP_DEBUG
889void nfp_net_debugfs_create(void);
890void nfp_net_debugfs_destroy(void);
891struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
892void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir);
893void nfp_net_debugfs_dir_clean(struct dentry **dir);
894#else
895static inline void nfp_net_debugfs_create(void)
896{
897}
898
899static inline void nfp_net_debugfs_destroy(void)
900{
901}
902
903static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
904{
905 return NULL;
906}
907
908static inline void
909nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
910{
911}
912
913static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
914{
915}
916#endif
917
918#endif
919