linux/drivers/net/ethernet/netronome/nfp/nfp_net.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
   3 *
   4 * This software is dual licensed under the GNU General License Version 2,
   5 * June 1991 as shown in the file COPYING in the top-level directory of this
   6 * source tree or the BSD 2-Clause License provided below.  You have the
   7 * option to license this software under the complete terms of either license.
   8 *
   9 * The BSD 2-Clause License:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      1. Redistributions of source code must retain the above
  16 *         copyright notice, this list of conditions and the following
  17 *         disclaimer.
  18 *
  19 *      2. Redistributions in binary form must reproduce the above
  20 *         copyright notice, this list of conditions and the following
  21 *         disclaimer in the documentation and/or other materials
  22 *         provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * nfp_net.h
  36 * Declarations for Netronome network device driver.
  37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
  38 *          Jason McMullan <jason.mcmullan@netronome.com>
  39 *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
  40 */
  41
  42#ifndef _NFP_NET_H_
  43#define _NFP_NET_H_
  44
  45#include <linux/interrupt.h>
  46#include <linux/list.h>
  47#include <linux/netdevice.h>
  48#include <linux/pci.h>
  49#include <asm-generic/io-64-nonatomic-hi-lo.h>
  50
  51#include "nfp_net_ctrl.h"
  52
  53#define nn_err(nn, fmt, args...)  netdev_err((nn)->netdev, fmt, ## args)
  54#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
  55#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
  56#define nn_dbg(nn, fmt, args...)  netdev_dbg((nn)->netdev, fmt, ## args)
  57#define nn_warn_ratelimit(nn, fmt, args...)                             \
  58        do {                                                            \
  59                if (unlikely(net_ratelimit()))                          \
  60                        netdev_warn((nn)->netdev, fmt, ## args);        \
  61        } while (0)
  62
  63/* Max time to wait for NFP to respond on updates (in seconds) */
  64#define NFP_NET_POLL_TIMEOUT    5
  65
  66/* Bar allocation */
  67#define NFP_NET_CTRL_BAR        0
  68#define NFP_NET_Q0_BAR          2
  69#define NFP_NET_Q1_BAR          4       /* OBSOLETE */
  70
  71/* Max bits in DMA address */
  72#define NFP_NET_MAX_DMA_BITS    40
  73
  74/* Default size for MTU and freelist buffer sizes */
  75#define NFP_NET_DEFAULT_MTU             1500
  76
  77/* Maximum number of bytes prepended to a packet */
  78#define NFP_NET_MAX_PREPEND             64
  79
  80/* Interrupt definitions */
  81#define NFP_NET_NON_Q_VECTORS           2
  82#define NFP_NET_IRQ_LSC_IDX             0
  83#define NFP_NET_IRQ_EXN_IDX             1
  84#define NFP_NET_MIN_PORT_IRQS           (NFP_NET_NON_Q_VECTORS + 1)
  85
  86/* Queue/Ring definitions */
  87#define NFP_NET_MAX_TX_RINGS    64      /* Max. # of Tx rings per device */
  88#define NFP_NET_MAX_RX_RINGS    64      /* Max. # of Rx rings per device */
  89#define NFP_NET_MAX_R_VECS      (NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
  90                                 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
  91#define NFP_NET_MAX_IRQS        (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
  92
  93#define NFP_NET_MIN_TX_DESCS    256     /* Min. # of Tx descs per ring */
  94#define NFP_NET_MIN_RX_DESCS    256     /* Min. # of Rx descs per ring */
  95#define NFP_NET_MAX_TX_DESCS    (256 * 1024) /* Max. # of Tx descs per ring */
  96#define NFP_NET_MAX_RX_DESCS    (256 * 1024) /* Max. # of Rx descs per ring */
  97
  98#define NFP_NET_TX_DESCS_DEFAULT 4096   /* Default # of Tx descs per ring */
  99#define NFP_NET_RX_DESCS_DEFAULT 4096   /* Default # of Rx descs per ring */
 100
 101#define NFP_NET_FL_BATCH        16      /* Add freelist in this Batch size */
 102
 103/* Offload definitions */
 104#define NFP_NET_N_VXLAN_PORTS   (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
 105
 106#define NFP_NET_RX_BUF_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
 107#define NFP_NET_RX_BUF_NON_DATA (NFP_NET_RX_BUF_HEADROOM +              \
 108                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 109
 110/* Forward declarations */
 111struct nfp_cpp;
 112struct nfp_net;
 113struct nfp_net_r_vector;
 114
 115/* Convenience macro for writing dma address into RX/TX descriptors */
 116#define nfp_desc_set_dma_addr(desc, dma_addr)                           \
 117        do {                                                            \
 118                __typeof(desc) __d = (desc);                            \
 119                dma_addr_t __addr = (dma_addr);                         \
 120                                                                        \
 121                __d->dma_addr_lo = cpu_to_le32(lower_32_bits(__addr));  \
 122                __d->dma_addr_hi = upper_32_bits(__addr) & 0xff;        \
 123        } while (0)
 124
 125/* TX descriptor format */
 126
 127#define PCIE_DESC_TX_EOP                BIT(7)
 128#define PCIE_DESC_TX_OFFSET_MASK        GENMASK(6, 0)
 129#define PCIE_DESC_TX_MSS_MASK           GENMASK(13, 0)
 130
 131/* Flags in the host TX descriptor */
 132#define PCIE_DESC_TX_CSUM               BIT(7)
 133#define PCIE_DESC_TX_IP4_CSUM           BIT(6)
 134#define PCIE_DESC_TX_TCP_CSUM           BIT(5)
 135#define PCIE_DESC_TX_UDP_CSUM           BIT(4)
 136#define PCIE_DESC_TX_VLAN               BIT(3)
 137#define PCIE_DESC_TX_LSO                BIT(2)
 138#define PCIE_DESC_TX_ENCAP              BIT(1)
 139#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
 140
 141struct nfp_net_tx_desc {
 142        union {
 143                struct {
 144                        u8 dma_addr_hi; /* High bits of host buf address */
 145                        __le16 dma_len; /* Length to DMA for this desc */
 146                        u8 offset_eop;  /* Offset in buf where pkt starts +
 147                                         * highest bit is eop flag.
 148                                         */
 149                        __le32 dma_addr_lo; /* Low 32bit of host buf addr */
 150
 151                        __le16 mss;     /* MSS to be used for LSO */
 152                        u8 l4_offset;   /* LSO, where the L4 data starts */
 153                        u8 flags;       /* TX Flags, see @PCIE_DESC_TX_* */
 154
 155                        __le16 vlan;    /* VLAN tag to add if indicated */
 156                        __le16 data_len; /* Length of frame + meta data */
 157                } __packed;
 158                __le32 vals[4];
 159        };
 160};
 161
 162/**
 163 * struct nfp_net_tx_buf - software TX buffer descriptor
 164 * @skb:        sk_buff associated with this buffer
 165 * @dma_addr:   DMA mapping address of the buffer
 166 * @fidx:       Fragment index (-1 for the head and [0..nr_frags-1] for frags)
 167 * @pkt_cnt:    Number of packets to be produced out of the skb associated
 168 *              with this buffer (valid only on the head's buffer).
 169 *              Will be 1 for all non-TSO packets.
 170 * @real_len:   Number of bytes which to be produced out of the skb (valid only
 171 *              on the head's buffer). Equal to skb->len for non-TSO packets.
 172 */
 173struct nfp_net_tx_buf {
 174        struct sk_buff *skb;
 175        dma_addr_t dma_addr;
 176        short int fidx;
 177        u16 pkt_cnt;
 178        u32 real_len;
 179};
 180
 181/**
 182 * struct nfp_net_tx_ring - TX ring structure
 183 * @r_vec:      Back pointer to ring vector structure
 184 * @idx:        Ring index from Linux's perspective
 185 * @qcidx:      Queue Controller Peripheral (QCP) queue index for the TX queue
 186 * @qcp_q:      Pointer to base of the QCP TX queue
 187 * @cnt:        Size of the queue in number of descriptors
 188 * @wr_p:       TX ring write pointer (free running)
 189 * @rd_p:       TX ring read pointer (free running)
 190 * @qcp_rd_p:   Local copy of QCP TX queue read pointer
 191 * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
 192 *              (used for .xmit_more delayed kick)
 193 * @txbufs:     Array of transmitted TX buffers, to free on transmit
 194 * @txds:       Virtual address of TX ring in host memory
 195 * @dma:        DMA address of the TX ring
 196 * @size:       Size, in bytes, of the TX ring (needed to free)
 197 */
 198struct nfp_net_tx_ring {
 199        struct nfp_net_r_vector *r_vec;
 200
 201        u32 idx;
 202        int qcidx;
 203        u8 __iomem *qcp_q;
 204
 205        u32 cnt;
 206        u32 wr_p;
 207        u32 rd_p;
 208        u32 qcp_rd_p;
 209
 210        u32 wr_ptr_add;
 211
 212        struct nfp_net_tx_buf *txbufs;
 213        struct nfp_net_tx_desc *txds;
 214
 215        dma_addr_t dma;
 216        unsigned int size;
 217} ____cacheline_aligned;
 218
 219/* RX and freelist descriptor format */
 220
 221#define PCIE_DESC_RX_DD                 BIT(7)
 222#define PCIE_DESC_RX_META_LEN_MASK      GENMASK(6, 0)
 223
 224/* Flags in the RX descriptor */
 225#define PCIE_DESC_RX_RSS                cpu_to_le16(BIT(15))
 226#define PCIE_DESC_RX_I_IP4_CSUM         cpu_to_le16(BIT(14))
 227#define PCIE_DESC_RX_I_IP4_CSUM_OK      cpu_to_le16(BIT(13))
 228#define PCIE_DESC_RX_I_TCP_CSUM         cpu_to_le16(BIT(12))
 229#define PCIE_DESC_RX_I_TCP_CSUM_OK      cpu_to_le16(BIT(11))
 230#define PCIE_DESC_RX_I_UDP_CSUM         cpu_to_le16(BIT(10))
 231#define PCIE_DESC_RX_I_UDP_CSUM_OK      cpu_to_le16(BIT(9))
 232#define PCIE_DESC_RX_SPARE              cpu_to_le16(BIT(8))
 233#define PCIE_DESC_RX_EOP                cpu_to_le16(BIT(7))
 234#define PCIE_DESC_RX_IP4_CSUM           cpu_to_le16(BIT(6))
 235#define PCIE_DESC_RX_IP4_CSUM_OK        cpu_to_le16(BIT(5))
 236#define PCIE_DESC_RX_TCP_CSUM           cpu_to_le16(BIT(4))
 237#define PCIE_DESC_RX_TCP_CSUM_OK        cpu_to_le16(BIT(3))
 238#define PCIE_DESC_RX_UDP_CSUM           cpu_to_le16(BIT(2))
 239#define PCIE_DESC_RX_UDP_CSUM_OK        cpu_to_le16(BIT(1))
 240#define PCIE_DESC_RX_VLAN               cpu_to_le16(BIT(0))
 241
 242#define PCIE_DESC_RX_CSUM_ALL           (PCIE_DESC_RX_IP4_CSUM |        \
 243                                         PCIE_DESC_RX_TCP_CSUM |        \
 244                                         PCIE_DESC_RX_UDP_CSUM |        \
 245                                         PCIE_DESC_RX_I_IP4_CSUM |      \
 246                                         PCIE_DESC_RX_I_TCP_CSUM |      \
 247                                         PCIE_DESC_RX_I_UDP_CSUM)
 248#define PCIE_DESC_RX_CSUM_OK_SHIFT      1
 249#define __PCIE_DESC_RX_CSUM_ALL         le16_to_cpu(PCIE_DESC_RX_CSUM_ALL)
 250#define __PCIE_DESC_RX_CSUM_ALL_OK      (__PCIE_DESC_RX_CSUM_ALL >>     \
 251                                         PCIE_DESC_RX_CSUM_OK_SHIFT)
 252
 253struct nfp_net_rx_desc {
 254        union {
 255                struct {
 256                        u8 dma_addr_hi; /* High bits of the buf address */
 257                        __le16 reserved; /* Must be zero */
 258                        u8 meta_len_dd; /* Must be zero */
 259
 260                        __le32 dma_addr_lo; /* Low bits of the buffer address */
 261                } __packed fld;
 262
 263                struct {
 264                        __le16 data_len; /* Length of the frame + meta data */
 265                        u8 reserved;
 266                        u8 meta_len_dd; /* Length of meta data prepended +
 267                                         * descriptor done flag.
 268                                         */
 269
 270                        __le16 flags;   /* RX flags. See @PCIE_DESC_RX_* */
 271                        __le16 vlan;    /* VLAN if stripped */
 272                } __packed rxd;
 273
 274                __le32 vals[2];
 275        };
 276};
 277
 278struct nfp_net_rx_hash {
 279        __be32 hash_type;
 280        __be32 hash;
 281};
 282
 283/**
 284 * struct nfp_net_rx_buf - software RX buffer descriptor
 285 * @frag:       page fragment buffer
 286 * @dma_addr:   DMA mapping address of the buffer
 287 */
 288struct nfp_net_rx_buf {
 289        void *frag;
 290        dma_addr_t dma_addr;
 291};
 292
 293/**
 294 * struct nfp_net_rx_ring - RX ring structure
 295 * @r_vec:      Back pointer to ring vector structure
 296 * @cnt:        Size of the queue in number of descriptors
 297 * @wr_p:       FL/RX ring write pointer (free running)
 298 * @rd_p:       FL/RX ring read pointer (free running)
 299 * @idx:        Ring index from Linux's perspective
 300 * @fl_qcidx:   Queue Controller Peripheral (QCP) queue index for the freelist
 301 * @rx_qcidx:   Queue Controller Peripheral (QCP) queue index for the RX queue
 302 * @qcp_fl:     Pointer to base of the QCP freelist queue
 303 * @qcp_rx:     Pointer to base of the QCP RX queue
 304 * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
 305 *              (used for free list batching)
 306 * @rxbufs:     Array of transmitted FL/RX buffers
 307 * @rxds:       Virtual address of FL/RX ring in host memory
 308 * @dma:        DMA address of the FL/RX ring
 309 * @size:       Size, in bytes, of the FL/RX ring (needed to free)
 310 * @bufsz:      Buffer allocation size for convenience of management routines
 311 *              (NOTE: this is in second cache line, do not use on fast path!)
 312 */
 313struct nfp_net_rx_ring {
 314        struct nfp_net_r_vector *r_vec;
 315
 316        u32 cnt;
 317        u32 wr_p;
 318        u32 rd_p;
 319
 320        u16 idx;
 321        u16 wr_ptr_add;
 322
 323        int fl_qcidx;
 324        int rx_qcidx;
 325        u8 __iomem *qcp_fl;
 326        u8 __iomem *qcp_rx;
 327
 328        struct nfp_net_rx_buf *rxbufs;
 329        struct nfp_net_rx_desc *rxds;
 330
 331        dma_addr_t dma;
 332        unsigned int size;
 333        unsigned int bufsz;
 334} ____cacheline_aligned;
 335
 336/**
 337 * struct nfp_net_r_vector - Per ring interrupt vector configuration
 338 * @nfp_net:        Backpointer to nfp_net structure
 339 * @napi:           NAPI structure for this ring vec
 340 * @tx_ring:        Pointer to TX ring
 341 * @rx_ring:        Pointer to RX ring
 342 * @irq_idx:        Index into MSI-X table
 343 * @irq_entry:      MSI-X table entry (use for talking to the device)
 344 * @rx_sync:        Seqlock for atomic updates of RX stats
 345 * @rx_pkts:        Number of received packets
 346 * @rx_bytes:       Number of received bytes
 347 * @rx_drops:       Number of packets dropped on RX due to lack of resources
 348 * @hw_csum_rx_ok:  Counter of packets where the HW checksum was OK
 349 * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
 350 * @hw_csum_rx_error:    Counter of packets with bad checksums
 351 * @tx_sync:        Seqlock for atomic updates of TX stats
 352 * @tx_pkts:        Number of Transmitted packets
 353 * @tx_bytes:       Number of Transmitted bytes
 354 * @hw_csum_tx:     Counter of packets with TX checksum offload requested
 355 * @hw_csum_tx_inner:    Counter of inner TX checksum offload requests
 356 * @tx_gather:      Counter of packets with Gather DMA
 357 * @tx_lso:         Counter of LSO packets sent
 358 * @tx_errors:      How many TX errors were encountered
 359 * @tx_busy:        How often was TX busy (no space)?
 360 * @irq_vector:     Interrupt vector number (use for talking to the OS)
 361 * @handler:        Interrupt handler for this ring vector
 362 * @name:           Name of the interrupt vector
 363 * @affinity_mask:  SMP affinity mask for this vector
 364 *
 365 * This structure ties RX and TX rings to interrupt vectors and a NAPI
 366 * context. This currently only supports one RX and TX ring per
 367 * interrupt vector but might be extended in the future to allow
 368 * association of multiple rings per vector.
 369 */
 370struct nfp_net_r_vector {
 371        struct nfp_net *nfp_net;
 372        struct napi_struct napi;
 373
 374        struct nfp_net_tx_ring *tx_ring;
 375        struct nfp_net_rx_ring *rx_ring;
 376
 377        u16 irq_entry;
 378
 379        struct u64_stats_sync rx_sync;
 380        u64 rx_pkts;
 381        u64 rx_bytes;
 382        u64 rx_drops;
 383        u64 hw_csum_rx_ok;
 384        u64 hw_csum_rx_inner_ok;
 385        u64 hw_csum_rx_error;
 386
 387        struct u64_stats_sync tx_sync;
 388        u64 tx_pkts;
 389        u64 tx_bytes;
 390        u64 hw_csum_tx;
 391        u64 hw_csum_tx_inner;
 392        u64 tx_gather;
 393        u64 tx_lso;
 394        u64 tx_errors;
 395        u64 tx_busy;
 396
 397        u32 irq_vector;
 398        irq_handler_t handler;
 399        char name[IFNAMSIZ + 8];
 400        cpumask_t affinity_mask;
 401} ____cacheline_aligned;
 402
 403/* Firmware version as it is written in the 32bit value in the BAR */
 404struct nfp_net_fw_version {
 405        u8 minor;
 406        u8 major;
 407        u8 class;
 408        u8 resv;
 409} __packed;
 410
 411static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
 412                                     u8 resv, u8 class, u8 major, u8 minor)
 413{
 414        return fw_ver->resv == resv &&
 415               fw_ver->class == class &&
 416               fw_ver->major == major &&
 417               fw_ver->minor == minor;
 418}
 419
 420/**
 421 * struct nfp_net - NFP network device structure
 422 * @pdev:               Backpointer to PCI device
 423 * @netdev:             Backpointer to net_device structure
 424 * @is_vf:              Is the driver attached to a VF?
 425 * @fw_loaded:          Is the firmware loaded?
 426 * @ctrl:               Local copy of the control register/word.
 427 * @fl_bufsz:           Currently configured size of the freelist buffers
 428 * @rx_offset:          Offset in the RX buffers where packet data starts
 429 * @fw_ver:             Firmware version
 430 * @cap:                Capabilities advertised by the Firmware
 431 * @max_mtu:            Maximum support MTU advertised by the Firmware
 432 * @rss_cfg:            RSS configuration
 433 * @rss_key:            RSS secret key
 434 * @rss_itbl:           RSS indirection table
 435 * @max_tx_rings:       Maximum number of TX rings supported by the Firmware
 436 * @max_rx_rings:       Maximum number of RX rings supported by the Firmware
 437 * @num_tx_rings:       Currently configured number of TX rings
 438 * @num_rx_rings:       Currently configured number of RX rings
 439 * @txd_cnt:            Size of the TX ring in number of descriptors
 440 * @rxd_cnt:            Size of the RX ring in number of descriptors
 441 * @tx_rings:           Array of pre-allocated TX ring structures
 442 * @rx_rings:           Array of pre-allocated RX ring structures
 443 * @max_r_vecs:         Number of allocated interrupt vectors for RX/TX
 444 * @num_r_vecs:         Number of used ring vectors
 445 * @r_vecs:             Pre-allocated array of ring vectors
 446 * @irq_entries:        Pre-allocated array of MSI-X entries
 447 * @lsc_handler:        Handler for Link State Change interrupt
 448 * @lsc_name:           Name for Link State Change interrupt
 449 * @exn_handler:        Handler for Exception interrupt
 450 * @exn_name:           Name for Exception interrupt
 451 * @shared_handler:     Handler for shared interrupts
 452 * @shared_name:        Name for shared interrupt
 453 * @me_freq_mhz:        ME clock_freq (MHz)
 454 * @reconfig_lock:      Protects HW reconfiguration request regs/machinery
 455 * @reconfig_posted:    Pending reconfig bits coming from async sources
 456 * @reconfig_timer_active:  Timer for reading reconfiguration results is pending
 457 * @reconfig_sync_present:  Some thread is performing synchronous reconfig
 458 * @reconfig_timer:     Timer for async reading of reconfig results
 459 * @link_up:            Is the link up?
 460 * @link_status_lock:   Protects @link_up and ensures atomicity with BAR reading
 461 * @rx_coalesce_usecs:      RX interrupt moderation usecs delay parameter
 462 * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
 463 * @tx_coalesce_usecs:      TX interrupt moderation usecs delay parameter
 464 * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter
 465 * @vxlan_ports:        VXLAN ports for RX inner csum offload communicated to HW
 466 * @vxlan_usecnt:       IPv4/IPv6 VXLAN port use counts
 467 * @qcp_cfg:            Pointer to QCP queue used for configuration notification
 468 * @ctrl_bar:           Pointer to mapped control BAR
 469 * @tx_bar:             Pointer to mapped TX queues
 470 * @rx_bar:             Pointer to mapped FL/RX queues
 471 * @debugfs_dir:        Device directory in debugfs
 472 * @ethtool_dump_flag:  Ethtool dump flag
 473 * @port_list:          Entry on device port list
 474 * @cpp:                CPP device handle if available
 475 */
 476struct nfp_net {
 477        struct pci_dev *pdev;
 478        struct net_device *netdev;
 479
 480        unsigned is_vf:1;
 481        unsigned fw_loaded:1;
 482
 483        u32 ctrl;
 484        u32 fl_bufsz;
 485
 486        u32 rx_offset;
 487
 488        struct nfp_net_tx_ring *tx_rings;
 489        struct nfp_net_rx_ring *rx_rings;
 490
 491        struct nfp_net_fw_version fw_ver;
 492        u32 cap;
 493        u32 max_mtu;
 494
 495        u32 rss_cfg;
 496        u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
 497        u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
 498
 499        unsigned int max_tx_rings;
 500        unsigned int max_rx_rings;
 501
 502        unsigned int num_tx_rings;
 503        unsigned int num_rx_rings;
 504
 505        int stride_tx;
 506        int stride_rx;
 507
 508        int txd_cnt;
 509        int rxd_cnt;
 510
 511        unsigned int max_r_vecs;
 512        unsigned int num_r_vecs;
 513        struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
 514        struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
 515
 516        irq_handler_t lsc_handler;
 517        char lsc_name[IFNAMSIZ + 8];
 518
 519        irq_handler_t exn_handler;
 520        char exn_name[IFNAMSIZ + 8];
 521
 522        irq_handler_t shared_handler;
 523        char shared_name[IFNAMSIZ + 8];
 524
 525        u32 me_freq_mhz;
 526
 527        bool link_up;
 528        spinlock_t link_status_lock;
 529
 530        spinlock_t reconfig_lock;
 531        u32 reconfig_posted;
 532        bool reconfig_timer_active;
 533        bool reconfig_sync_present;
 534        struct timer_list reconfig_timer;
 535
 536        u32 rx_coalesce_usecs;
 537        u32 rx_coalesce_max_frames;
 538        u32 tx_coalesce_usecs;
 539        u32 tx_coalesce_max_frames;
 540
 541        __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS];
 542        u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS];
 543
 544        u8 __iomem *qcp_cfg;
 545
 546        u8 __iomem *ctrl_bar;
 547        u8 __iomem *tx_bar;
 548        u8 __iomem *rx_bar;
 549
 550        struct dentry *debugfs_dir;
 551        u32 ethtool_dump_flag;
 552
 553        struct list_head port_list;
 554
 555        struct nfp_cpp *cpp;
 556};
 557
 558struct nfp_net_ring_set {
 559        unsigned int n_rings;
 560        unsigned int mtu;
 561        unsigned int dcnt;
 562        void *rings;
 563};
 564
 565/* Functions to read/write from/to a BAR
 566 * Performs any endian conversion necessary.
 567 */
 568static inline u16 nn_readb(struct nfp_net *nn, int off)
 569{
 570        return readb(nn->ctrl_bar + off);
 571}
 572
 573static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
 574{
 575        writeb(val, nn->ctrl_bar + off);
 576}
 577
 578static inline u16 nn_readw(struct nfp_net *nn, int off)
 579{
 580        return readw(nn->ctrl_bar + off);
 581}
 582
 583static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
 584{
 585        writew(val, nn->ctrl_bar + off);
 586}
 587
 588static inline u32 nn_readl(struct nfp_net *nn, int off)
 589{
 590        return readl(nn->ctrl_bar + off);
 591}
 592
 593static inline void nn_writel(struct nfp_net *nn, int off, u32 val)
 594{
 595        writel(val, nn->ctrl_bar + off);
 596}
 597
 598static inline u64 nn_readq(struct nfp_net *nn, int off)
 599{
 600        return readq(nn->ctrl_bar + off);
 601}
 602
 603static inline void nn_writeq(struct nfp_net *nn, int off, u64 val)
 604{
 605        writeq(val, nn->ctrl_bar + off);
 606}
 607
 608/* Flush posted PCI writes by reading something without side effects */
 609static inline void nn_pci_flush(struct nfp_net *nn)
 610{
 611        nn_readl(nn, NFP_NET_CFG_VERSION);
 612}
 613
 614/* Queue Controller Peripheral access functions and definitions.
 615 *
 616 * Some of the BARs of the NFP are mapped to portions of the Queue
 617 * Controller Peripheral (QCP) address space on the NFP.  A QCP queue
 618 * has a read and a write pointer (as well as a size and flags,
 619 * indicating overflow etc).  The QCP offers a number of different
 620 * operation on queue pointers, but here we only offer function to
 621 * either add to a pointer or to read the pointer value.
 622 */
 623#define NFP_QCP_QUEUE_ADDR_SZ                   0x800
 624#define NFP_QCP_QUEUE_OFF(_x)                   ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
 625#define NFP_QCP_QUEUE_ADD_RPTR                  0x0000
 626#define NFP_QCP_QUEUE_ADD_WPTR                  0x0004
 627#define NFP_QCP_QUEUE_STS_LO                    0x0008
 628#define NFP_QCP_QUEUE_STS_LO_READPTR_mask       0x3ffff
 629#define NFP_QCP_QUEUE_STS_HI                    0x000c
 630#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask      0x3ffff
 631
 632/* The offset of a QCP queues in the PCIe Target */
 633#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
 634
 635/* nfp_qcp_ptr - Read or Write Pointer of a queue */
 636enum nfp_qcp_ptr {
 637        NFP_QCP_READ_PTR = 0,
 638        NFP_QCP_WRITE_PTR
 639};
 640
 641/* There appear to be an *undocumented* upper limit on the value which
 642 * one can add to a queue and that value is either 0x3f or 0x7f.  We
 643 * go with 0x3f as a conservative measure.
 644 */
 645#define NFP_QCP_MAX_ADD                         0x3f
 646
 647static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
 648                                    enum nfp_qcp_ptr ptr, u32 val)
 649{
 650        u32 off;
 651
 652        if (ptr == NFP_QCP_READ_PTR)
 653                off = NFP_QCP_QUEUE_ADD_RPTR;
 654        else
 655                off = NFP_QCP_QUEUE_ADD_WPTR;
 656
 657        while (val > NFP_QCP_MAX_ADD) {
 658                writel(NFP_QCP_MAX_ADD, q + off);
 659                val -= NFP_QCP_MAX_ADD;
 660        }
 661
 662        writel(val, q + off);
 663}
 664
 665/**
 666 * nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
 667 *
 668 * @q:   Base address for queue structure
 669 * @val: Value to add to the queue pointer
 670 *
 671 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
 672 */
 673static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
 674{
 675        _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
 676}
 677
 678/**
 679 * nfp_qcp_wr_ptr_add() - Add the value to the write pointer of a queue
 680 *
 681 * @q:   Base address for queue structure
 682 * @val: Value to add to the queue pointer
 683 *
 684 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
 685 */
 686static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
 687{
 688        _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
 689}
 690
 691static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
 692{
 693        u32 off;
 694        u32 val;
 695
 696        if (ptr == NFP_QCP_READ_PTR)
 697                off = NFP_QCP_QUEUE_STS_LO;
 698        else
 699                off = NFP_QCP_QUEUE_STS_HI;
 700
 701        val = readl(q + off);
 702
 703        if (ptr == NFP_QCP_READ_PTR)
 704                return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
 705        else
 706                return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
 707}
 708
 709/**
 710 * nfp_qcp_rd_ptr_read() - Read the current read pointer value for a queue
 711 * @q:  Base address for queue structure
 712 *
 713 * Return: Value read.
 714 */
 715static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
 716{
 717        return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
 718}
 719
 720/**
 721 * nfp_qcp_wr_ptr_read() - Read the current write pointer value for a queue
 722 * @q:  Base address for queue structure
 723 *
 724 * Return: Value read.
 725 */
 726static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
 727{
 728        return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
 729}
 730
 731/* Globals */
 732extern const char nfp_driver_version[];
 733
 734/* Prototypes */
 735void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
 736                            void __iomem *ctrl_bar);
 737
 738struct nfp_net *
 739nfp_net_netdev_alloc(struct pci_dev *pdev,
 740                     unsigned int max_tx_rings, unsigned int max_rx_rings);
 741void nfp_net_netdev_free(struct nfp_net *nn);
 742int nfp_net_netdev_init(struct net_device *netdev);
 743void nfp_net_netdev_clean(struct net_device *netdev);
 744void nfp_net_set_ethtool_ops(struct net_device *netdev);
 745void nfp_net_info(struct nfp_net *nn);
 746int nfp_net_reconfig(struct nfp_net *nn, u32 update);
 747void nfp_net_rss_write_itbl(struct nfp_net *nn);
 748void nfp_net_rss_write_key(struct nfp_net *nn);
 749void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
 750
 751unsigned int
 752nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
 753                   unsigned int min_irqs, unsigned int want_irqs);
 754void nfp_net_irqs_disable(struct pci_dev *pdev);
 755void
 756nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
 757                    unsigned int n);
 758int
 759nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_ring_set *rx,
 760                      struct nfp_net_ring_set *tx);
 761
 762#ifdef CONFIG_NFP_DEBUG
 763void nfp_net_debugfs_create(void);
 764void nfp_net_debugfs_destroy(void);
 765struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
 766void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
 767void nfp_net_debugfs_dir_clean(struct dentry **dir);
 768#else
 769static inline void nfp_net_debugfs_create(void)
 770{
 771}
 772
 773static inline void nfp_net_debugfs_destroy(void)
 774{
 775}
 776
 777static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
 778{
 779        return NULL;
 780}
 781
 782static inline void
 783nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
 784{
 785}
 786
 787static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
 788{
 789}
 790#endif /* CONFIG_NFP_DEBUG */
 791
 792#endif /* _NFP_NET_H_ */
 793