linux/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
   3 * driver for Linux.
   4 *
   5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36/*
  37 * This file should not be included directly.  Include t4vf_common.h instead.
  38 */
  39
  40#ifndef __CXGB4VF_ADAPTER_H__
  41#define __CXGB4VF_ADAPTER_H__
  42
  43#include <linux/interrupt.h>
  44#include <linux/pci.h>
  45#include <linux/spinlock.h>
  46#include <linux/skbuff.h>
  47#include <linux/if_ether.h>
  48#include <linux/netdevice.h>
  49
  50#include "../cxgb4/t4_hw.h"
  51
  52/*
  53 * Constants of the implementation.
  54 */
  55enum {
  56        MAX_NPORTS      = 1,            /* max # of "ports" */
  57        MAX_PORT_QSETS  = 8,            /* max # of Queue Sets / "port" */
  58        MAX_ETH_QSETS   = MAX_NPORTS*MAX_PORT_QSETS,
  59
  60        /*
  61         * MSI-X interrupt index usage.
  62         */
  63        MSIX_FW         = 0,            /* MSI-X index for firmware Q */
  64        MSIX_IQFLINT    = 1,            /* MSI-X index base for Ingress Qs */
  65        MSIX_EXTRAS     = 1,
  66        MSIX_ENTRIES    = MAX_ETH_QSETS + MSIX_EXTRAS,
  67
  68        /*
  69         * The maximum number of Ingress and Egress Queues is determined by
  70         * the maximum number of "Queue Sets" which we support plus any
  71         * ancillary queues.  Each "Queue Set" requires one Ingress Queue
  72         * for RX Packet Ingress Event notifications and two Egress Queues for
  73         * a Free List and an Ethernet TX list.
  74         */
  75        INGQ_EXTRAS     = 2,            /* firmware event queue and */
  76                                        /*   forwarded interrupts */
  77        MAX_INGQ        = MAX_ETH_QSETS+INGQ_EXTRAS,
  78        MAX_EGRQ        = MAX_ETH_QSETS*2,
  79};
  80
  81/*
  82 * Forward structure definition references.
  83 */
  84struct adapter;
  85struct sge_eth_rxq;
  86struct sge_rspq;
  87
  88/*
  89 * Per-"port" information.  This is really per-Virtual Interface information
  90 * but the use of the "port" nomanclature makes it easier to go back and forth
  91 * between the PF and VF drivers ...
  92 */
  93struct port_info {
  94        struct adapter *adapter;        /* our adapter */
  95        u32 vlan_id;                    /* vlan id for VST */
  96        u16 viid;                       /* virtual interface ID */
  97        int xact_addr_filt;             /* index of our MAC address filter */
  98        u16 rss_size;                   /* size of VI's RSS table slice */
  99        u8 pidx;                        /* index into adapter port[] */
 100        s8 mdio_addr;
 101        u8 port_type;                   /* firmware port type */
 102        u8 mod_type;                    /* firmware module type */
 103        u8 port_id;                     /* physical port ID */
 104        u8 nqsets;                      /* # of "Queue Sets" */
 105        u8 first_qset;                  /* index of first "Queue Set" */
 106        struct link_config link_cfg;    /* physical port configuration */
 107};
 108
 109/*
 110 * Scatter Gather Engine resources for the "adapter".  Our ingress and egress
 111 * queues are organized into "Queue Sets" with one ingress and one egress
 112 * queue per Queue Set.  These Queue Sets are aportionable between the "ports"
 113 * (Virtual Interfaces).  One extra ingress queue is used to receive
 114 * asynchronous messages from the firmware.  Note that the "Queue IDs" that we
 115 * use here are really "Relative Queue IDs" which are returned as part of the
 116 * firmware command to allocate queues.  These queue IDs are relative to the
 117 * absolute Queue ID base of the section of the Queue ID space allocated to
 118 * the PF/VF.
 119 */
 120
 121/*
 122 * SGE free-list queue state.
 123 */
 124struct rx_sw_desc;
 125struct sge_fl {
 126        unsigned int avail;             /* # of available RX buffers */
 127        unsigned int pend_cred;         /* new buffers since last FL DB ring */
 128        unsigned int cidx;              /* consumer index */
 129        unsigned int pidx;              /* producer index */
 130        unsigned long alloc_failed;     /* # of buffer allocation failures */
 131        unsigned long large_alloc_failed;
 132        unsigned long starving;         /* # of times FL was found starving */
 133
 134        /*
 135         * Write-once/infrequently fields.
 136         * -------------------------------
 137         */
 138
 139        unsigned int cntxt_id;          /* SGE relative QID for the free list */
 140        unsigned int abs_id;            /* SGE absolute QID for the free list */
 141        unsigned int size;              /* capacity of free list */
 142        struct rx_sw_desc *sdesc;       /* address of SW RX descriptor ring */
 143        __be64 *desc;                   /* address of HW RX descriptor ring */
 144        dma_addr_t addr;                /* PCI bus address of hardware ring */
 145        void __iomem *bar2_addr;        /* address of BAR2 Queue registers */
 146        unsigned int bar2_qid;          /* Queue ID for BAR2 Queue registers */
 147};
 148
 149/*
 150 * An ingress packet gather list.
 151 */
 152struct pkt_gl {
 153        struct page_frag frags[MAX_SKB_FRAGS];
 154        void *va;                       /* virtual address of first byte */
 155        unsigned int nfrags;            /* # of fragments */
 156        unsigned int tot_len;           /* total length of fragments */
 157};
 158
 159typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
 160                              const struct pkt_gl *);
 161
 162/*
 163 * State for an SGE Response Queue.
 164 */
 165struct sge_rspq {
 166        struct napi_struct napi;        /* NAPI scheduling control */
 167        const __be64 *cur_desc;         /* current descriptor in queue */
 168        unsigned int cidx;              /* consumer index */
 169        u8 gen;                         /* current generation bit */
 170        u8 next_intr_params;            /* holdoff params for next interrupt */
 171        int offset;                     /* offset into current FL buffer */
 172
 173        unsigned int unhandled_irqs;    /* bogus interrupts */
 174
 175        /*
 176         * Write-once/infrequently fields.
 177         * -------------------------------
 178         */
 179
 180        u8 intr_params;                 /* interrupt holdoff parameters */
 181        u8 pktcnt_idx;                  /* interrupt packet threshold */
 182        u8 idx;                         /* queue index within its group */
 183        u16 cntxt_id;                   /* SGE rel QID for the response Q */
 184        u16 abs_id;                     /* SGE abs QID for the response Q */
 185        __be64 *desc;                   /* address of hardware response ring */
 186        dma_addr_t phys_addr;           /* PCI bus address of ring */
 187        void __iomem *bar2_addr;        /* address of BAR2 Queue registers */
 188        unsigned int bar2_qid;          /* Queue ID for BAR2 Queue registers */
 189        unsigned int iqe_len;           /* entry size */
 190        unsigned int size;              /* capcity of response Q */
 191        struct adapter *adapter;        /* our adapter */
 192        struct net_device *netdev;      /* associated net device */
 193        rspq_handler_t handler;         /* the handler for this response Q */
 194};
 195
 196/*
 197 * Ethernet queue statistics
 198 */
 199struct sge_eth_stats {
 200        unsigned long pkts;             /* # of ethernet packets */
 201        unsigned long lro_pkts;         /* # of LRO super packets */
 202        unsigned long lro_merged;       /* # of wire packets merged by LRO */
 203        unsigned long rx_cso;           /* # of Rx checksum offloads */
 204        unsigned long vlan_ex;          /* # of Rx VLAN extractions */
 205        unsigned long rx_drops;         /* # of packets dropped due to no mem */
 206};
 207
 208/*
 209 * State for an Ethernet Receive Queue.
 210 */
 211struct sge_eth_rxq {
 212        struct sge_rspq rspq;           /* Response Queue */
 213        struct sge_fl fl;               /* Free List */
 214        struct sge_eth_stats stats;     /* receive statistics */
 215};
 216
 217/*
 218 * SGE Transmit Queue state.  This contains all of the resources associated
 219 * with the hardware status of a TX Queue which is a circular ring of hardware
 220 * TX Descriptors.  For convenience, it also contains a pointer to a parallel
 221 * "Software Descriptor" array but we don't know anything about it here other
 222 * than its type name.
 223 */
 224struct tx_desc {
 225        /*
 226         * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
 227         * hardware: Sizes, Producer and Consumer indices, etc.
 228         */
 229        __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
 230};
 231struct tx_sw_desc;
 232struct sge_txq {
 233        unsigned int in_use;            /* # of in-use TX descriptors */
 234        unsigned int size;              /* # of descriptors */
 235        unsigned int cidx;              /* SW consumer index */
 236        unsigned int pidx;              /* producer index */
 237        unsigned long stops;            /* # of times queue has been stopped */
 238        unsigned long restarts;         /* # of queue restarts */
 239
 240        /*
 241         * Write-once/infrequently fields.
 242         * -------------------------------
 243         */
 244
 245        unsigned int cntxt_id;          /* SGE relative QID for the TX Q */
 246        unsigned int abs_id;            /* SGE absolute QID for the TX Q */
 247        struct tx_desc *desc;           /* address of HW TX descriptor ring */
 248        struct tx_sw_desc *sdesc;       /* address of SW TX descriptor ring */
 249        struct sge_qstat *stat;         /* queue status entry */
 250        dma_addr_t phys_addr;           /* PCI bus address of hardware ring */
 251        void __iomem *bar2_addr;        /* address of BAR2 Queue registers */
 252        unsigned int bar2_qid;          /* Queue ID for BAR2 Queue registers */
 253};
 254
 255/*
 256 * State for an Ethernet Transmit Queue.
 257 */
 258struct sge_eth_txq {
 259        struct sge_txq q;               /* SGE TX Queue */
 260        struct netdev_queue *txq;       /* associated netdev TX queue */
 261        unsigned long tso;              /* # of TSO requests */
 262        unsigned long tx_cso;           /* # of TX checksum offloads */
 263        unsigned long vlan_ins;         /* # of TX VLAN insertions */
 264        unsigned long mapping_err;      /* # of I/O MMU packet mapping errors */
 265};
 266
 267/*
 268 * The complete set of Scatter/Gather Engine resources.
 269 */
 270struct sge {
 271        /*
 272         * Our "Queue Sets" ...
 273         */
 274        struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
 275        struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
 276
 277        /*
 278         * Extra ingress queues for asynchronous firmware events and
 279         * forwarded interrupts (when in MSI mode).
 280         */
 281        struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
 282
 283        struct sge_rspq intrq ____cacheline_aligned_in_smp;
 284        spinlock_t intrq_lock;
 285
 286        /*
 287         * State for managing "starving Free Lists" -- Free Lists which have
 288         * fallen below a certain threshold of buffers available to the
 289         * hardware and attempts to refill them up to that threshold have
 290         * failed.  We have a regular "slow tick" timer process which will
 291         * make periodic attempts to refill these starving Free Lists ...
 292         */
 293        DECLARE_BITMAP(starving_fl, MAX_EGRQ);
 294        struct timer_list rx_timer;
 295
 296        /*
 297         * State for cleaning up completed TX descriptors.
 298         */
 299        struct timer_list tx_timer;
 300
 301        /*
 302         * Write-once/infrequently fields.
 303         * -------------------------------
 304         */
 305
 306        u16 max_ethqsets;               /* # of available Ethernet queue sets */
 307        u16 ethqsets;                   /* # of active Ethernet queue sets */
 308        u16 ethtxq_rover;               /* Tx queue to clean up next */
 309        u16 timer_val[SGE_NTIMERS];     /* interrupt holdoff timer array */
 310        u8 counter_val[SGE_NCOUNTERS];  /* interrupt RX threshold array */
 311
 312        /* Decoded Adapter Parameters.
 313         */
 314        u32 fl_pg_order;                /* large page allocation size */
 315        u32 stat_len;                   /* length of status page at ring end */
 316        u32 pktshift;                   /* padding between CPL & packet data */
 317        u32 fl_align;                   /* response queue message alignment */
 318        u32 fl_starve_thres;            /* Free List starvation threshold */
 319
 320        /*
 321         * Reverse maps from Absolute Queue IDs to associated queue pointers.
 322         * The absolute Queue IDs are in a compact range which start at a
 323         * [potentially large] Base Queue ID.  We perform the reverse map by
 324         * first converting the Absolute Queue ID into a Relative Queue ID by
 325         * subtracting off the Base Queue ID and then use a Relative Queue ID
 326         * indexed table to get the pointer to the corresponding software
 327         * queue structure.
 328         */
 329        unsigned int egr_base;
 330        unsigned int ingr_base;
 331        void *egr_map[MAX_EGRQ];
 332        struct sge_rspq *ingr_map[MAX_INGQ];
 333};
 334
 335/*
 336 * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
 337 * and Ingress-Queues.  The EQ_MAP() and IQ_MAP() macros which provide
 338 * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
 339 */
 340#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
 341#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
 342
 343#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
 344#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
 345
 346/*
 347 * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
 348 */
 349#define for_each_ethrxq(sge, iter) \
 350        for (iter = 0; iter < (sge)->ethqsets; iter++)
 351
 352struct hash_mac_addr {
 353        struct list_head list;
 354        u8 addr[ETH_ALEN];
 355        unsigned int iface_mac;
 356};
 357
 358struct mbox_list {
 359        struct list_head list;
 360};
 361
 362/*
 363 * Per-"adapter" (Virtual Function) information.
 364 */
 365struct adapter {
 366        /* PCI resources */
 367        void __iomem *regs;
 368        void __iomem *bar2;
 369        struct pci_dev *pdev;
 370        struct device *pdev_dev;
 371
 372        /* "adapter" resources */
 373        unsigned long registered_device_map;
 374        unsigned long open_device_map;
 375        unsigned long flags;
 376        struct adapter_params params;
 377
 378        /* queue and interrupt resources */
 379        struct {
 380                unsigned short vec;
 381                char desc[22];
 382        } msix_info[MSIX_ENTRIES];
 383        struct sge sge;
 384
 385        /* Linux network device resources */
 386        struct net_device *port[MAX_NPORTS];
 387        const char *name;
 388        unsigned int msg_enable;
 389
 390        /* debugfs resources */
 391        struct dentry *debugfs_root;
 392
 393        /* various locks */
 394        spinlock_t stats_lock;
 395
 396        /* lock for mailbox cmd list */
 397        spinlock_t mbox_lock;
 398        struct mbox_list mlist;
 399
 400        /* support for mailbox command/reply logging */
 401#define T4VF_OS_LOG_MBOX_CMDS 256
 402        struct mbox_cmd_log *mbox_log;
 403
 404        /* list of MAC addresses in MPS Hash */
 405        struct list_head mac_hlist;
 406};
 407
 408enum { /* adapter flags */
 409        CXGB4VF_FULL_INIT_DONE                  = (1UL << 0),
 410        CXGB4VF_USING_MSI                       = (1UL << 1),
 411        CXGB4VF_USING_MSIX                      = (1UL << 2),
 412        CXGB4VF_QUEUES_BOUND                    = (1UL << 3),
 413        CXGB4VF_ROOT_NO_RELAXED_ORDERING        = (1UL << 4),
 414        CXGB4VF_FW_OK                           = (1UL << 5),
 415};
 416
 417/*
 418 * The following register read/write routine definitions are required by
 419 * the common code.
 420 */
 421
 422/**
 423 * t4_read_reg - read a HW register
 424 * @adapter: the adapter
 425 * @reg_addr: the register address
 426 *
 427 * Returns the 32-bit value of the given HW register.
 428 */
 429static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
 430{
 431        return readl(adapter->regs + reg_addr);
 432}
 433
 434/**
 435 * t4_write_reg - write a HW register
 436 * @adapter: the adapter
 437 * @reg_addr: the register address
 438 * @val: the value to write
 439 *
 440 * Write a 32-bit value into the given HW register.
 441 */
 442static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
 443{
 444        writel(val, adapter->regs + reg_addr);
 445}
 446
 447#ifndef readq
 448static inline u64 readq(const volatile void __iomem *addr)
 449{
 450        return readl(addr) + ((u64)readl(addr + 4) << 32);
 451}
 452
 453static inline void writeq(u64 val, volatile void __iomem *addr)
 454{
 455        writel(val, addr);
 456        writel(val >> 32, addr + 4);
 457}
 458#endif
 459
 460/**
 461 * t4_read_reg64 - read a 64-bit HW register
 462 * @adapter: the adapter
 463 * @reg_addr: the register address
 464 *
 465 * Returns the 64-bit value of the given HW register.
 466 */
 467static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
 468{
 469        return readq(adapter->regs + reg_addr);
 470}
 471
 472/**
 473 * t4_write_reg64 - write a 64-bit HW register
 474 * @adapter: the adapter
 475 * @reg_addr: the register address
 476 * @val: the value to write
 477 *
 478 * Write a 64-bit value into the given HW register.
 479 */
 480static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
 481                                  u64 val)
 482{
 483        writeq(val, adapter->regs + reg_addr);
 484}
 485
 486/**
 487 * port_name - return the string name of a port
 488 * @adapter: the adapter
 489 * @pidx: the port index
 490 *
 491 * Return the string name of the selected port.
 492 */
 493static inline const char *port_name(struct adapter *adapter, int pidx)
 494{
 495        return adapter->port[pidx]->name;
 496}
 497
 498/**
 499 * t4_os_set_hw_addr - store a port's MAC address in SW
 500 * @adapter: the adapter
 501 * @pidx: the port index
 502 * @hw_addr: the Ethernet address
 503 *
 504 * Store the Ethernet address of the given port in SW.  Called by the common
 505 * code when it retrieves a port's Ethernet address from EEPROM.
 506 */
 507static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
 508                                     u8 hw_addr[])
 509{
 510        memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
 511}
 512
 513/**
 514 * netdev2pinfo - return the port_info structure associated with a net_device
 515 * @dev: the netdev
 516 *
 517 * Return the struct port_info associated with a net_device
 518 */
 519static inline struct port_info *netdev2pinfo(const struct net_device *dev)
 520{
 521        return netdev_priv(dev);
 522}
 523
 524/**
 525 * adap2pinfo - return the port_info of a port
 526 * @adap: the adapter
 527 * @pidx: the port index
 528 *
 529 * Return the port_info structure for the adapter.
 530 */
 531static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
 532{
 533        return netdev_priv(adapter->port[pidx]);
 534}
 535
 536/**
 537 * netdev2adap - return the adapter structure associated with a net_device
 538 * @dev: the netdev
 539 *
 540 * Return the struct adapter associated with a net_device
 541 */
 542static inline struct adapter *netdev2adap(const struct net_device *dev)
 543{
 544        return netdev2pinfo(dev)->adapter;
 545}
 546
 547/*
 548 * OS "Callback" function declarations.  These are functions that the OS code
 549 * is "contracted" to provide for the common code.
 550 */
 551void t4vf_os_link_changed(struct adapter *, int, int);
 552void t4vf_os_portmod_changed(struct adapter *, int);
 553
 554/*
 555 * SGE function prototype declarations.
 556 */
 557int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
 558                       struct net_device *, int,
 559                       struct sge_fl *, rspq_handler_t);
 560int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
 561                           struct net_device *, struct netdev_queue *,
 562                           unsigned int);
 563void t4vf_free_sge_resources(struct adapter *);
 564
 565netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *);
 566int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
 567                       const struct pkt_gl *);
 568
 569irq_handler_t t4vf_intr_handler(struct adapter *);
 570irqreturn_t t4vf_sge_intr_msix(int, void *);
 571
 572int t4vf_sge_init(struct adapter *);
 573void t4vf_sge_start(struct adapter *);
 574void t4vf_sge_stop(struct adapter *);
 575
 576#endif /* __CXGB4VF_ADAPTER_H__ */
 577