1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#ifndef _NFP_NET_H_
43#define _NFP_NET_H_
44
45#include <linux/interrupt.h>
46#include <linux/list.h>
47#include <linux/netdevice.h>
48#include <linux/pci.h>
49#include <linux/io-64-nonatomic-hi-lo.h>
50#include <net/xdp.h>
51
52#include "nfp_net_ctrl.h"
53
54#define nn_pr(nn, lvl, fmt, args...) \
55 ({ \
56 struct nfp_net *__nn = (nn); \
57 \
58 if (__nn->dp.netdev) \
59 netdev_printk(lvl, __nn->dp.netdev, fmt, ## args); \
60 else \
61 dev_printk(lvl, __nn->dp.dev, "ctrl: " fmt, ## args); \
62 })
63
64#define nn_err(nn, fmt, args...) nn_pr(nn, KERN_ERR, fmt, ## args)
65#define nn_warn(nn, fmt, args...) nn_pr(nn, KERN_WARNING, fmt, ## args)
66#define nn_info(nn, fmt, args...) nn_pr(nn, KERN_INFO, fmt, ## args)
67#define nn_dbg(nn, fmt, args...) nn_pr(nn, KERN_DEBUG, fmt, ## args)
68
69#define nn_dp_warn(dp, fmt, args...) \
70 ({ \
71 struct nfp_net_dp *__dp = (dp); \
72 \
73 if (unlikely(net_ratelimit())) { \
74 if (__dp->netdev) \
75 netdev_warn(__dp->netdev, fmt, ## args); \
76 else \
77 dev_warn(__dp->dev, fmt, ## args); \
78 } \
79 })
80
81
82#define NFP_NET_POLL_TIMEOUT 5
83
84
85#define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
86
87
88#define NFP_NET_CTRL_BAR 0
89#define NFP_NET_Q0_BAR 2
90#define NFP_NET_Q1_BAR 4
91
92
93#define NFP_NET_MAX_DMA_BITS 40
94
95
96#define NFP_NET_DEFAULT_MTU 1500
97
98
99#define NFP_NET_MAX_PREPEND 64
100
101
102#define NFP_NET_NON_Q_VECTORS 2
103#define NFP_NET_IRQ_LSC_IDX 0
104#define NFP_NET_IRQ_EXN_IDX 1
105#define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
106
107
108#define NFP_NET_MAX_TX_RINGS 64
109#define NFP_NET_MAX_RX_RINGS 64
110#define NFP_NET_MAX_R_VECS (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
111 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
112#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
113
114#define NFP_NET_MIN_TX_DESCS 256
115#define NFP_NET_MIN_RX_DESCS 256
116#define NFP_NET_MAX_TX_DESCS (256 * 1024)
117#define NFP_NET_MAX_RX_DESCS (256 * 1024)
118
119#define NFP_NET_TX_DESCS_DEFAULT 4096
120#define NFP_NET_RX_DESCS_DEFAULT 4096
121
122#define NFP_NET_FL_BATCH 16
123#define NFP_NET_XDP_MAX_COMPLETE 2048
124
125
126#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
127
128#define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
129#define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM + \
130 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
131
132
133struct nfp_cpp;
134struct nfp_eth_table_port;
135struct nfp_net;
136struct nfp_net_r_vector;
137struct nfp_port;
138
139
140#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
141
142
143#define nfp_desc_set_dma_addr(desc, dma_addr) \
144 do { \
145 __typeof(desc) __d = (desc); \
146 dma_addr_t __addr = (dma_addr); \
147 \
148 __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr)); \
149 __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
150 } while (0)
151
152
153
154#define PCIE_DESC_TX_EOP BIT(7)
155#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
156#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
157
158
159#define PCIE_DESC_TX_CSUM BIT(7)
160#define PCIE_DESC_TX_IP4_CSUM BIT(6)
161#define PCIE_DESC_TX_TCP_CSUM BIT(5)
162#define PCIE_DESC_TX_UDP_CSUM BIT(4)
163#define PCIE_DESC_TX_VLAN BIT(3)
164#define PCIE_DESC_TX_LSO BIT(2)
165#define PCIE_DESC_TX_ENCAP BIT(1)
166#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
167
168struct nfp_net_tx_desc {
169 union {
170 struct {
171 u8 dma_addr_hi;
172 __le16 dma_len;
173 u8 offset_eop;
174
175
176 __le32 dma_addr_lo;
177
178 __le16 mss;
179 u8 lso_hdrlen;
180 u8 flags;
181 union {
182 struct {
183 u8 l3_offset;
184 u8 l4_offset;
185 };
186 __le16 vlan;
187 };
188 __le16 data_len;
189 } __packed;
190 __le32 vals[4];
191 };
192};
193
194
195
196
197
198
199
200
201
202
203
204
205
206struct nfp_net_tx_buf {
207 union {
208 struct sk_buff *skb;
209 void *frag;
210 };
211 dma_addr_t dma_addr;
212 short int fidx;
213 u16 pkt_cnt;
214 u32 real_len;
215};
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235struct nfp_net_tx_ring {
236 struct nfp_net_r_vector *r_vec;
237
238 u32 idx;
239 int qcidx;
240 u8 __iomem *qcp_q;
241
242 u32 cnt;
243 u32 wr_p;
244 u32 rd_p;
245 u32 qcp_rd_p;
246
247 u32 wr_ptr_add;
248
249 struct nfp_net_tx_buf *txbufs;
250 struct nfp_net_tx_desc *txds;
251
252 dma_addr_t dma;
253 unsigned int size;
254 bool is_xdp;
255} ____cacheline_aligned;
256
257
258
259#define PCIE_DESC_RX_DD BIT(7)
260#define PCIE_DESC_RX_META_LEN_MASK GENMASK(6, 0)
261
262
263#define PCIE_DESC_RX_RSS cpu_to_le16(BIT(15))
264#define PCIE_DESC_RX_I_IP4_CSUM cpu_to_le16(BIT(14))
265#define PCIE_DESC_RX_I_IP4_CSUM_OK cpu_to_le16(BIT(13))
266#define PCIE_DESC_RX_I_TCP_CSUM cpu_to_le16(BIT(12))
267#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
268#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
269#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
270#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
271#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
272#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
273#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
274#define PCIE_DESC_RX_TCP_CSUM cpu_to_le16(BIT(4))
275#define PCIE_DESC_RX_TCP_CSUM_OK cpu_to_le16(BIT(3))
276#define PCIE_DESC_RX_UDP_CSUM cpu_to_le16(BIT(2))
277#define PCIE_DESC_RX_UDP_CSUM_OK cpu_to_le16(BIT(1))
278#define PCIE_DESC_RX_VLAN cpu_to_le16(BIT(0))
279
280#define PCIE_DESC_RX_CSUM_ALL (PCIE_DESC_RX_IP4_CSUM | \
281 PCIE_DESC_RX_TCP_CSUM | \
282 PCIE_DESC_RX_UDP_CSUM | \
283 PCIE_DESC_RX_I_IP4_CSUM | \
284 PCIE_DESC_RX_I_TCP_CSUM | \
285 PCIE_DESC_RX_I_UDP_CSUM)
286#define PCIE_DESC_RX_CSUM_OK_SHIFT 1
287#define __PCIE_DESC_RX_CSUM_ALL le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
288#define __PCIE_DESC_RX_CSUM_ALL_OK (__PCIE_DESC_RX_CSUM_ALL >> \
289 PCIE_DESC_RX_CSUM_OK_SHIFT)
290
291struct nfp_net_rx_desc {
292 union {
293 struct {
294 u8 dma_addr_hi;
295 __le16 reserved;
296 u8 meta_len_dd;
297
298 __le32 dma_addr_lo;
299 } __packed fld;
300
301 struct {
302 __le16 data_len;
303 u8 reserved;
304 u8 meta_len_dd;
305
306
307
308 __le16 flags;
309 __le16 vlan;
310 } __packed rxd;
311
312 __le32 vals[2];
313 };
314};
315
316#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
317
318struct nfp_meta_parsed {
319 u8 hash_type;
320 u8 csum_type;
321 u32 hash;
322 u32 mark;
323 u32 portid;
324 __wsum csum;
325};
326
327struct nfp_net_rx_hash {
328 __be32 hash_type;
329 __be32 hash;
330};
331
332
333
334
335
336
337struct nfp_net_rx_buf {
338 void *frag;
339 dma_addr_t dma_addr;
340};
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357struct nfp_net_rx_ring {
358 struct nfp_net_r_vector *r_vec;
359
360 u32 cnt;
361 u32 wr_p;
362 u32 rd_p;
363
364 u32 idx;
365
366 int fl_qcidx;
367 unsigned int size;
368 u8 __iomem *qcp_fl;
369
370 struct nfp_net_rx_buf *rxbufs;
371 struct nfp_net_rx_desc *rxds;
372
373 dma_addr_t dma;
374 struct xdp_rxq_info xdp_rxq;
375} ____cacheline_aligned;
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416struct nfp_net_r_vector {
417 struct nfp_net *nfp_net;
418 union {
419 struct napi_struct napi;
420 struct {
421 struct tasklet_struct tasklet;
422 struct sk_buff_head queue;
423 struct spinlock lock;
424 };
425 };
426
427 struct nfp_net_tx_ring *tx_ring;
428 struct nfp_net_rx_ring *rx_ring;
429
430 u16 irq_entry;
431
432 struct u64_stats_sync rx_sync;
433 u64 rx_pkts;
434 u64 rx_bytes;
435 u64 rx_drops;
436 u64 hw_csum_rx_ok;
437 u64 hw_csum_rx_inner_ok;
438 u64 hw_csum_rx_complete;
439
440 struct nfp_net_tx_ring *xdp_ring;
441
442 struct u64_stats_sync tx_sync;
443 u64 tx_pkts;
444 u64 tx_bytes;
445 u64 hw_csum_tx;
446 u64 hw_csum_tx_inner;
447 u64 tx_gather;
448 u64 tx_lso;
449
450 u64 hw_csum_rx_error;
451 u64 rx_replace_buf_alloc_fail;
452 u64 tx_errors;
453 u64 tx_busy;
454
455 u32 irq_vector;
456 irq_handler_t handler;
457 char name[IFNAMSIZ + 8];
458 cpumask_t affinity_mask;
459} ____cacheline_aligned;
460
461
462struct nfp_net_fw_version {
463 u8 minor;
464 u8 major;
465 u8 class;
466 u8 resv;
467} __packed;
468
469static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
470 u8 resv, u8 class, u8 major, u8 minor)
471{
472 return fw_ver->resv == resv &&
473 fw_ver->class == class &&
474 fw_ver->major == major &&
475 fw_ver->minor == minor;
476}
477
478struct nfp_stat_pair {
479 u64 pkts;
480 u64 bytes;
481};
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508struct nfp_net_dp {
509 struct device *dev;
510 struct net_device *netdev;
511
512 u8 is_vf:1;
513 u8 bpf_offload_xdp:1;
514 u8 chained_metadata_format:1;
515
516 u8 rx_dma_dir;
517 u8 rx_offset;
518
519 u32 rx_dma_off;
520
521 u32 ctrl;
522 u32 fl_bufsz;
523
524 struct bpf_prog *xdp_prog;
525
526 struct nfp_net_tx_ring *tx_rings;
527 struct nfp_net_rx_ring *rx_rings;
528
529 u8 __iomem *ctrl_bar;
530
531
532
533 unsigned int txd_cnt;
534 unsigned int rxd_cnt;
535
536 unsigned int num_r_vecs;
537
538 unsigned int num_tx_rings;
539 unsigned int num_stack_tx_rings;
540 unsigned int num_rx_rings;
541
542 unsigned int mtu;
543};
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595struct nfp_net {
596 struct nfp_net_dp dp;
597
598 struct nfp_net_fw_version fw_ver;
599
600 u32 cap;
601 u32 max_mtu;
602
603 u8 rss_hfunc;
604 u32 rss_cfg;
605 u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
606 u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
607
608 u32 xdp_flags;
609 struct bpf_prog *xdp_prog;
610
611 unsigned int max_tx_rings;
612 unsigned int max_rx_rings;
613
614 int stride_tx;
615 int stride_rx;
616
617 unsigned int max_r_vecs;
618 struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
619 struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
620
621 irq_handler_t lsc_handler;
622 char lsc_name[IFNAMSIZ + 8];
623
624 irq_handler_t exn_handler;
625 char exn_name[IFNAMSIZ + 8];
626
627 irq_handler_t shared_handler;
628 char shared_name[IFNAMSIZ + 8];
629
630 u32 me_freq_mhz;
631
632 bool link_up;
633 spinlock_t link_status_lock;
634
635 spinlock_t reconfig_lock;
636 u32 reconfig_posted;
637 bool reconfig_timer_active;
638 bool reconfig_sync_present;
639 struct timer_list reconfig_timer;
640
641 u32 rx_coalesce_usecs;
642 u32 rx_coalesce_max_frames;
643 u32 tx_coalesce_usecs;
644 u32 tx_coalesce_max_frames;
645
646 __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS];
647 u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS];
648
649 u8 __iomem *qcp_cfg;
650
651 u8 __iomem *tx_bar;
652 u8 __iomem *rx_bar;
653
654 struct nfp_net_tlv_caps tlv_caps;
655
656 struct dentry *debugfs_dir;
657
658 struct list_head vnic_list;
659
660 struct pci_dev *pdev;
661 struct nfp_app *app;
662
663 struct nfp_port *port;
664
665 void *app_priv;
666};
667
668
669
670
671static inline u16 nn_readb(struct nfp_net *nn, int off)
672{
673 return readb(nn->dp.ctrl_bar + off);
674}
675
676static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
677{
678 writeb(val, nn->dp.ctrl_bar + off);
679}
680
681static inline u16 nn_readw(struct nfp_net *nn, int off)
682{
683 return readw(nn->dp.ctrl_bar + off);
684}
685
686static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
687{
688 writew(val, nn->dp.ctrl_bar + off);
689}
690
691static inline u32 nn_readl(struct nfp_net *nn, int off)
692{
693 return readl(nn->dp.ctrl_bar + off);
694}
695
696static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
697{
698 writel(val, nn->dp.ctrl_bar + off);
699}
700
701static inline u64 nn_readq(struct nfp_net *nn, int off)
702{
703 return readq(nn->dp.ctrl_bar + off);
704}
705
706static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
707{
708 writeq(val, nn->dp.ctrl_bar + off);
709}
710
711
712static inline void nn_pci_flush(struct nfp_net *nn)
713{
714 nn_readl(nn, NFP_NET_CFG_VERSION);
715}
716
717
718
719
720
721
722
723
724
725
726#define NFP_QCP_QUEUE_ADDR_SZ 0x800
727#define NFP_QCP_QUEUE_AREA_SZ 0x80000
728#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
729#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
730#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
731#define NFP_QCP_QUEUE_STS_LO 0x0008
732#define NFP_QCP_QUEUE_STS_LO_READPTR_mask 0x3ffff
733#define NFP_QCP_QUEUE_STS_HI 0x000c
734#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
735
736
737#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
738
739
740enum nfp_qcp_ptr {
741 NFP_QCP_READ_PTR = 0,
742 NFP_QCP_WRITE_PTR
743};
744
745
746
747
748
749#define NFP_QCP_MAX_ADD 0x3f
750
751static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
752 enum nfp_qcp_ptr ptr, u32 val)
753{
754 u32 off;
755
756 if (ptr == NFP_QCP_READ_PTR)
757 off = NFP_QCP_QUEUE_ADD_RPTR;
758 else
759 off = NFP_QCP_QUEUE_ADD_WPTR;
760
761 while (val > NFP_QCP_MAX_ADD) {
762 writel(NFP_QCP_MAX_ADD, q + off);
763 val -= NFP_QCP_MAX_ADD;
764 }
765
766 writel(val, q + off);
767}
768
769
770
771
772
773
774
775
776
777static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
778{
779 _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
780}
781
782
783
784
785
786
787
788
789
790static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
791{
792 _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
793}
794
795static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
796{
797 u32 off;
798 u32 val;
799
800 if (ptr == NFP_QCP_READ_PTR)
801 off = NFP_QCP_QUEUE_STS_LO;
802 else
803 off = NFP_QCP_QUEUE_STS_HI;
804
805 val = readl(q + off);
806
807 if (ptr == NFP_QCP_READ_PTR)
808 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
809 else
810 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
811}
812
813
814
815
816
817
818
819static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
820{
821 return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
822}
823
824
825
826
827
828
829
830static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
831{
832 return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
833}
834
835static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
836{
837 WARN_ON_ONCE(!nn->dp.netdev && nn->port);
838 return !!nn->dp.netdev;
839}
840
841static inline bool nfp_net_running(struct nfp_net *nn)
842{
843 return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
844}
845
846static inline const char *nfp_net_name(struct nfp_net *nn)
847{
848 return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
849}
850
851static inline void nfp_ctrl_lock(struct nfp_net *nn)
852 __acquires(&nn->r_vecs[0].lock)
853{
854 spin_lock_bh(&nn->r_vecs[0].lock);
855}
856
857static inline void nfp_ctrl_unlock(struct nfp_net *nn)
858 __releases(&nn->r_vecs[0].lock)
859{
860 spin_unlock_bh(&nn->r_vecs[0].lock);
861}
862
863
864extern const char nfp_driver_version[];
865
866extern const struct net_device_ops nfp_net_netdev_ops;
867
868static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
869{
870 return netdev->netdev_ops == &nfp_net_netdev_ops;
871}
872
873
874void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
875 void __iomem *ctrl_bar);
876
877struct nfp_net *
878nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
879 unsigned int max_tx_rings, unsigned int max_rx_rings);
880void nfp_net_free(struct nfp_net *nn);
881
882int nfp_net_init(struct nfp_net *nn);
883void nfp_net_clean(struct nfp_net *nn);
884
885int nfp_ctrl_open(struct nfp_net *nn);
886void nfp_ctrl_close(struct nfp_net *nn);
887
888void nfp_net_set_ethtool_ops(struct net_device *netdev);
889void nfp_net_info(struct nfp_net *nn);
890int nfp_net_reconfig(struct nfp_net *nn, u32 update);
891unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
892void nfp_net_rss_write_itbl(struct nfp_net *nn);
893void nfp_net_rss_write_key(struct nfp_net *nn);
894void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
895
896unsigned int
897nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
898 unsigned int min_irqs, unsigned int want_irqs);
899void nfp_net_irqs_disable(struct pci_dev *pdev);
900void
901nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
902 unsigned int n);
903
904struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
905int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
906 struct netlink_ext_ack *extack);
907
908#ifdef CONFIG_NFP_DEBUG
909void nfp_net_debugfs_create(void);
910void nfp_net_debugfs_destroy(void);
911struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
912void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id);
913void nfp_net_debugfs_dir_clean(struct dentry **dir);
914#else
915static inline void nfp_net_debugfs_create(void)
916{
917}
918
919static inline void nfp_net_debugfs_destroy(void)
920{
921}
922
923static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
924{
925 return NULL;
926}
927
928static inline void
929nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
930{
931}
932
933static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
934{
935}
936#endif
937
938#endif
939