linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#ifndef __CXGB4_H__
  36#define __CXGB4_H__
  37
  38#include "t4_hw.h"
  39
  40#include <linux/bitops.h>
  41#include <linux/cache.h>
  42#include <linux/interrupt.h>
  43#include <linux/list.h>
  44#include <linux/netdevice.h>
  45#include <linux/pci.h>
  46#include <linux/spinlock.h>
  47#include <linux/timer.h>
  48#include <linux/vmalloc.h>
  49#include <asm/io.h>
  50#include "cxgb4_uld.h"
  51#include "t4_hw.h"
  52
  53#define FW_VERSION_MAJOR 1
  54#define FW_VERSION_MINOR 4
  55#define FW_VERSION_MICRO 0
  56
  57#define FW_VERSION_MAJOR_T5 0
  58#define FW_VERSION_MINOR_T5 0
  59#define FW_VERSION_MICRO_T5 0
  60
  61#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
  62
  63enum {
  64        MAX_NPORTS = 4,     /* max # of ports */
  65        SERNUM_LEN = 24,    /* Serial # length */
  66        EC_LEN     = 16,    /* E/C length */
  67        ID_LEN     = 16,    /* ID length */
  68};
  69
  70enum {
  71        MEM_EDC0,
  72        MEM_EDC1,
  73        MEM_MC,
  74        MEM_MC0 = MEM_MC,
  75        MEM_MC1
  76};
  77
  78enum {
  79        MEMWIN0_APERTURE = 2048,
  80        MEMWIN0_BASE     = 0x1b800,
  81        MEMWIN1_APERTURE = 32768,
  82        MEMWIN1_BASE     = 0x28000,
  83        MEMWIN1_BASE_T5  = 0x52000,
  84        MEMWIN2_APERTURE = 65536,
  85        MEMWIN2_BASE     = 0x30000,
  86        MEMWIN2_BASE_T5  = 0x54000,
  87};
  88
  89enum dev_master {
  90        MASTER_CANT,
  91        MASTER_MAY,
  92        MASTER_MUST
  93};
  94
  95enum dev_state {
  96        DEV_STATE_UNINIT,
  97        DEV_STATE_INIT,
  98        DEV_STATE_ERR
  99};
 100
 101enum {
 102        PAUSE_RX      = 1 << 0,
 103        PAUSE_TX      = 1 << 1,
 104        PAUSE_AUTONEG = 1 << 2
 105};
 106
 107struct port_stats {
 108        u64 tx_octets;            /* total # of octets in good frames */
 109        u64 tx_frames;            /* all good frames */
 110        u64 tx_bcast_frames;      /* all broadcast frames */
 111        u64 tx_mcast_frames;      /* all multicast frames */
 112        u64 tx_ucast_frames;      /* all unicast frames */
 113        u64 tx_error_frames;      /* all error frames */
 114
 115        u64 tx_frames_64;         /* # of Tx frames in a particular range */
 116        u64 tx_frames_65_127;
 117        u64 tx_frames_128_255;
 118        u64 tx_frames_256_511;
 119        u64 tx_frames_512_1023;
 120        u64 tx_frames_1024_1518;
 121        u64 tx_frames_1519_max;
 122
 123        u64 tx_drop;              /* # of dropped Tx frames */
 124        u64 tx_pause;             /* # of transmitted pause frames */
 125        u64 tx_ppp0;              /* # of transmitted PPP prio 0 frames */
 126        u64 tx_ppp1;              /* # of transmitted PPP prio 1 frames */
 127        u64 tx_ppp2;              /* # of transmitted PPP prio 2 frames */
 128        u64 tx_ppp3;              /* # of transmitted PPP prio 3 frames */
 129        u64 tx_ppp4;              /* # of transmitted PPP prio 4 frames */
 130        u64 tx_ppp5;              /* # of transmitted PPP prio 5 frames */
 131        u64 tx_ppp6;              /* # of transmitted PPP prio 6 frames */
 132        u64 tx_ppp7;              /* # of transmitted PPP prio 7 frames */
 133
 134        u64 rx_octets;            /* total # of octets in good frames */
 135        u64 rx_frames;            /* all good frames */
 136        u64 rx_bcast_frames;      /* all broadcast frames */
 137        u64 rx_mcast_frames;      /* all multicast frames */
 138        u64 rx_ucast_frames;      /* all unicast frames */
 139        u64 rx_too_long;          /* # of frames exceeding MTU */
 140        u64 rx_jabber;            /* # of jabber frames */
 141        u64 rx_fcs_err;           /* # of received frames with bad FCS */
 142        u64 rx_len_err;           /* # of received frames with length error */
 143        u64 rx_symbol_err;        /* symbol errors */
 144        u64 rx_runt;              /* # of short frames */
 145
 146        u64 rx_frames_64;         /* # of Rx frames in a particular range */
 147        u64 rx_frames_65_127;
 148        u64 rx_frames_128_255;
 149        u64 rx_frames_256_511;
 150        u64 rx_frames_512_1023;
 151        u64 rx_frames_1024_1518;
 152        u64 rx_frames_1519_max;
 153
 154        u64 rx_pause;             /* # of received pause frames */
 155        u64 rx_ppp0;              /* # of received PPP prio 0 frames */
 156        u64 rx_ppp1;              /* # of received PPP prio 1 frames */
 157        u64 rx_ppp2;              /* # of received PPP prio 2 frames */
 158        u64 rx_ppp3;              /* # of received PPP prio 3 frames */
 159        u64 rx_ppp4;              /* # of received PPP prio 4 frames */
 160        u64 rx_ppp5;              /* # of received PPP prio 5 frames */
 161        u64 rx_ppp6;              /* # of received PPP prio 6 frames */
 162        u64 rx_ppp7;              /* # of received PPP prio 7 frames */
 163
 164        u64 rx_ovflow0;           /* drops due to buffer-group 0 overflows */
 165        u64 rx_ovflow1;           /* drops due to buffer-group 1 overflows */
 166        u64 rx_ovflow2;           /* drops due to buffer-group 2 overflows */
 167        u64 rx_ovflow3;           /* drops due to buffer-group 3 overflows */
 168        u64 rx_trunc0;            /* buffer-group 0 truncated packets */
 169        u64 rx_trunc1;            /* buffer-group 1 truncated packets */
 170        u64 rx_trunc2;            /* buffer-group 2 truncated packets */
 171        u64 rx_trunc3;            /* buffer-group 3 truncated packets */
 172};
 173
 174struct lb_port_stats {
 175        u64 octets;
 176        u64 frames;
 177        u64 bcast_frames;
 178        u64 mcast_frames;
 179        u64 ucast_frames;
 180        u64 error_frames;
 181
 182        u64 frames_64;
 183        u64 frames_65_127;
 184        u64 frames_128_255;
 185        u64 frames_256_511;
 186        u64 frames_512_1023;
 187        u64 frames_1024_1518;
 188        u64 frames_1519_max;
 189
 190        u64 drop;
 191
 192        u64 ovflow0;
 193        u64 ovflow1;
 194        u64 ovflow2;
 195        u64 ovflow3;
 196        u64 trunc0;
 197        u64 trunc1;
 198        u64 trunc2;
 199        u64 trunc3;
 200};
 201
 202struct tp_tcp_stats {
 203        u32 tcpOutRsts;
 204        u64 tcpInSegs;
 205        u64 tcpOutSegs;
 206        u64 tcpRetransSegs;
 207};
 208
 209struct tp_err_stats {
 210        u32 macInErrs[4];
 211        u32 hdrInErrs[4];
 212        u32 tcpInErrs[4];
 213        u32 tnlCongDrops[4];
 214        u32 ofldChanDrops[4];
 215        u32 tnlTxDrops[4];
 216        u32 ofldVlanDrops[4];
 217        u32 tcp6InErrs[4];
 218        u32 ofldNoNeigh;
 219        u32 ofldCongDefer;
 220};
 221
 222struct tp_params {
 223        unsigned int ntxchan;        /* # of Tx channels */
 224        unsigned int tre;            /* log2 of core clocks per TP tick */
 225        unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
 226                                     /* channel map */
 227
 228        uint32_t dack_re;            /* DACK timer resolution */
 229        unsigned short tx_modq[NCHAN];  /* channel to modulation queue map */
 230};
 231
 232struct vpd_params {
 233        unsigned int cclk;
 234        u8 ec[EC_LEN + 1];
 235        u8 sn[SERNUM_LEN + 1];
 236        u8 id[ID_LEN + 1];
 237};
 238
 239struct pci_params {
 240        unsigned char speed;
 241        unsigned char width;
 242};
 243
 244struct adapter_params {
 245        struct tp_params  tp;
 246        struct vpd_params vpd;
 247        struct pci_params pci;
 248
 249        unsigned int sf_size;             /* serial flash size in bytes */
 250        unsigned int sf_nsec;             /* # of flash sectors */
 251        unsigned int sf_fw_start;         /* start of FW image in flash */
 252
 253        unsigned int fw_vers;
 254        unsigned int tp_vers;
 255        u8 api_vers[7];
 256
 257        unsigned short mtus[NMTUS];
 258        unsigned short a_wnd[NCCTRL_WIN];
 259        unsigned short b_wnd[NCCTRL_WIN];
 260
 261        unsigned char nports;             /* # of ethernet ports */
 262        unsigned char portvec;
 263        unsigned char rev;                /* chip revision */
 264        unsigned char offload;
 265
 266        unsigned char bypass;
 267
 268        unsigned int ofldq_wr_cred;
 269};
 270
 271struct trace_params {
 272        u32 data[TRACE_LEN / 4];
 273        u32 mask[TRACE_LEN / 4];
 274        unsigned short snap_len;
 275        unsigned short min_len;
 276        unsigned char skip_ofst;
 277        unsigned char skip_len;
 278        unsigned char invert;
 279        unsigned char port;
 280};
 281
 282struct link_config {
 283        unsigned short supported;        /* link capabilities */
 284        unsigned short advertising;      /* advertised capabilities */
 285        unsigned short requested_speed;  /* speed user has requested */
 286        unsigned short speed;            /* actual link speed */
 287        unsigned char  requested_fc;     /* flow control user has requested */
 288        unsigned char  fc;               /* actual link flow control */
 289        unsigned char  autoneg;          /* autonegotiating? */
 290        unsigned char  link_ok;          /* link up? */
 291};
 292
 293#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
 294
 295enum {
 296        MAX_ETH_QSETS = 32,           /* # of Ethernet Tx/Rx queue sets */
 297        MAX_OFLD_QSETS = 16,          /* # of offload Tx/Rx queue sets */
 298        MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
 299        MAX_RDMA_QUEUES = NCHAN,      /* # of streaming RDMA Rx queues */
 300};
 301
 302enum {
 303        MAX_EGRQ = 128,         /* max # of egress queues, including FLs */
 304        MAX_INGQ = 64           /* max # of interrupt-capable ingress queues */
 305};
 306
 307struct adapter;
 308struct sge_rspq;
 309
 310struct port_info {
 311        struct adapter *adapter;
 312        u16    viid;
 313        s16    xact_addr_filt;        /* index of exact MAC address filter */
 314        u16    rss_size;              /* size of VI's RSS table slice */
 315        s8     mdio_addr;
 316        u8     port_type;
 317        u8     mod_type;
 318        u8     port_id;
 319        u8     tx_chan;
 320        u8     lport;                 /* associated offload logical port */
 321        u8     nqsets;                /* # of qsets */
 322        u8     first_qset;            /* index of first qset */
 323        u8     rss_mode;
 324        struct link_config link_cfg;
 325        u16   *rss;
 326};
 327
 328struct dentry;
 329struct work_struct;
 330
 331enum {                                 /* adapter flags */
 332        FULL_INIT_DONE     = (1 << 0),
 333        USING_MSI          = (1 << 1),
 334        USING_MSIX         = (1 << 2),
 335        FW_OK              = (1 << 4),
 336        RSS_TNLALLLOOKUP   = (1 << 5),
 337        USING_SOFT_PARAMS  = (1 << 6),
 338        MASTER_PF          = (1 << 7),
 339        FW_OFLD_CONN       = (1 << 9),
 340};
 341
 342struct rx_sw_desc;
 343
 344struct sge_fl {                     /* SGE free-buffer queue state */
 345        unsigned int avail;         /* # of available Rx buffers */
 346        unsigned int pend_cred;     /* new buffers since last FL DB ring */
 347        unsigned int cidx;          /* consumer index */
 348        unsigned int pidx;          /* producer index */
 349        unsigned long alloc_failed; /* # of times buffer allocation failed */
 350        unsigned long large_alloc_failed;
 351        unsigned long starving;
 352        /* RO fields */
 353        unsigned int cntxt_id;      /* SGE context id for the free list */
 354        unsigned int size;          /* capacity of free list */
 355        struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
 356        __be64 *desc;               /* address of HW Rx descriptor ring */
 357        dma_addr_t addr;            /* bus address of HW ring start */
 358};
 359
 360/* A packet gather list */
 361struct pkt_gl {
 362        struct page_frag frags[MAX_SKB_FRAGS];
 363        void *va;                         /* virtual address of first byte */
 364        unsigned int nfrags;              /* # of fragments */
 365        unsigned int tot_len;             /* total length of fragments */
 366};
 367
 368typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
 369                              const struct pkt_gl *gl);
 370
 371struct sge_rspq {                   /* state for an SGE response queue */
 372        struct napi_struct napi;
 373        const __be64 *cur_desc;     /* current descriptor in queue */
 374        unsigned int cidx;          /* consumer index */
 375        u8 gen;                     /* current generation bit */
 376        u8 intr_params;             /* interrupt holdoff parameters */
 377        u8 next_intr_params;        /* holdoff params for next interrupt */
 378        u8 pktcnt_idx;              /* interrupt packet threshold */
 379        u8 uld;                     /* ULD handling this queue */
 380        u8 idx;                     /* queue index within its group */
 381        int offset;                 /* offset into current Rx buffer */
 382        u16 cntxt_id;               /* SGE context id for the response q */
 383        u16 abs_id;                 /* absolute SGE id for the response q */
 384        __be64 *desc;               /* address of HW response ring */
 385        dma_addr_t phys_addr;       /* physical address of the ring */
 386        unsigned int iqe_len;       /* entry size */
 387        unsigned int size;          /* capacity of response queue */
 388        struct adapter *adap;
 389        struct net_device *netdev;  /* associated net device */
 390        rspq_handler_t handler;
 391};
 392
 393struct sge_eth_stats {              /* Ethernet queue statistics */
 394        unsigned long pkts;         /* # of ethernet packets */
 395        unsigned long lro_pkts;     /* # of LRO super packets */
 396        unsigned long lro_merged;   /* # of wire packets merged by LRO */
 397        unsigned long rx_cso;       /* # of Rx checksum offloads */
 398        unsigned long vlan_ex;      /* # of Rx VLAN extractions */
 399        unsigned long rx_drops;     /* # of packets dropped due to no mem */
 400};
 401
 402struct sge_eth_rxq {                /* SW Ethernet Rx queue */
 403        struct sge_rspq rspq;
 404        struct sge_fl fl;
 405        struct sge_eth_stats stats;
 406} ____cacheline_aligned_in_smp;
 407
 408struct sge_ofld_stats {             /* offload queue statistics */
 409        unsigned long pkts;         /* # of packets */
 410        unsigned long imm;          /* # of immediate-data packets */
 411        unsigned long an;           /* # of asynchronous notifications */
 412        unsigned long nomem;        /* # of responses deferred due to no mem */
 413};
 414
 415struct sge_ofld_rxq {               /* SW offload Rx queue */
 416        struct sge_rspq rspq;
 417        struct sge_fl fl;
 418        struct sge_ofld_stats stats;
 419} ____cacheline_aligned_in_smp;
 420
 421struct tx_desc {
 422        __be64 flit[8];
 423};
 424
 425struct tx_sw_desc;
 426
 427struct sge_txq {
 428        unsigned int  in_use;       /* # of in-use Tx descriptors */
 429        unsigned int  size;         /* # of descriptors */
 430        unsigned int  cidx;         /* SW consumer index */
 431        unsigned int  pidx;         /* producer index */
 432        unsigned long stops;        /* # of times q has been stopped */
 433        unsigned long restarts;     /* # of queue restarts */
 434        unsigned int  cntxt_id;     /* SGE context id for the Tx q */
 435        struct tx_desc *desc;       /* address of HW Tx descriptor ring */
 436        struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
 437        struct sge_qstat *stat;     /* queue status entry */
 438        dma_addr_t    phys_addr;    /* physical address of the ring */
 439        spinlock_t db_lock;
 440        int db_disabled;
 441        unsigned short db_pidx;
 442        u64 udb;
 443};
 444
 445struct sge_eth_txq {                /* state for an SGE Ethernet Tx queue */
 446        struct sge_txq q;
 447        struct netdev_queue *txq;   /* associated netdev TX queue */
 448        unsigned long tso;          /* # of TSO requests */
 449        unsigned long tx_cso;       /* # of Tx checksum offloads */
 450        unsigned long vlan_ins;     /* # of Tx VLAN insertions */
 451        unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
 452} ____cacheline_aligned_in_smp;
 453
 454struct sge_ofld_txq {               /* state for an SGE offload Tx queue */
 455        struct sge_txq q;
 456        struct adapter *adap;
 457        struct sk_buff_head sendq;  /* list of backpressured packets */
 458        struct tasklet_struct qresume_tsk; /* restarts the queue */
 459        u8 full;                    /* the Tx ring is full */
 460        unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
 461} ____cacheline_aligned_in_smp;
 462
 463struct sge_ctrl_txq {               /* state for an SGE control Tx queue */
 464        struct sge_txq q;
 465        struct adapter *adap;
 466        struct sk_buff_head sendq;  /* list of backpressured packets */
 467        struct tasklet_struct qresume_tsk; /* restarts the queue */
 468        u8 full;                    /* the Tx ring is full */
 469} ____cacheline_aligned_in_smp;
 470
 471struct sge {
 472        struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
 473        struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
 474        struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
 475
 476        struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
 477        struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
 478        struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
 479        struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
 480
 481        struct sge_rspq intrq ____cacheline_aligned_in_smp;
 482        spinlock_t intrq_lock;
 483
 484        u16 max_ethqsets;           /* # of available Ethernet queue sets */
 485        u16 ethqsets;               /* # of active Ethernet queue sets */
 486        u16 ethtxq_rover;           /* Tx queue to clean up next */
 487        u16 ofldqsets;              /* # of active offload queue sets */
 488        u16 rdmaqs;                 /* # of available RDMA Rx queues */
 489        u16 ofld_rxq[MAX_OFLD_QSETS];
 490        u16 rdma_rxq[NCHAN];
 491        u16 timer_val[SGE_NTIMERS];
 492        u8 counter_val[SGE_NCOUNTERS];
 493        u32 fl_pg_order;            /* large page allocation size */
 494        u32 stat_len;               /* length of status page at ring end */
 495        u32 pktshift;               /* padding between CPL & packet data */
 496        u32 fl_align;               /* response queue message alignment */
 497        u32 fl_starve_thres;        /* Free List starvation threshold */
 498        unsigned int starve_thres;
 499        u8 idma_state[2];
 500        unsigned int egr_start;
 501        unsigned int ingr_start;
 502        void *egr_map[MAX_EGRQ];    /* qid->queue egress queue map */
 503        struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
 504        DECLARE_BITMAP(starving_fl, MAX_EGRQ);
 505        DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
 506        struct timer_list rx_timer; /* refills starving FLs */
 507        struct timer_list tx_timer; /* checks Tx queues */
 508};
 509
 510#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
 511#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
 512#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
 513
 514struct l2t_data;
 515
 516#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
 517#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
 518#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
 519
 520#define CHELSIO_T4              0x4
 521#define CHELSIO_T5              0x5
 522
 523enum chip_type {
 524        T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
 525        T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
 526        T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
 527        T4_FIRST_REV    = T4_A1,
 528        T4_LAST_REV     = T4_A3,
 529
 530        T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
 531        T5_FIRST_REV    = T5_A1,
 532        T5_LAST_REV     = T5_A1,
 533};
 534
 535#ifdef CONFIG_PCI_IOV
 536
 537/* T4 supports SRIOV on PF0-3 and T5 on PF0-7.  However, the Serial
 538 * Configuration initialization for T5 only has SR-IOV functionality enabled
 539 * on PF0-3 in order to simplify everything.
 540 */
 541#define NUM_OF_PF_WITH_SRIOV 4
 542
 543#endif
 544
 545struct adapter {
 546        void __iomem *regs;
 547        void __iomem *bar2;
 548        struct pci_dev *pdev;
 549        struct device *pdev_dev;
 550        unsigned int mbox;
 551        unsigned int fn;
 552        unsigned int flags;
 553        enum chip_type chip;
 554
 555        int msg_enable;
 556
 557        struct adapter_params params;
 558        struct cxgb4_virt_res vres;
 559        unsigned int swintr;
 560
 561        unsigned int wol;
 562
 563        struct {
 564                unsigned short vec;
 565                char desc[IFNAMSIZ + 10];
 566        } msix_info[MAX_INGQ + 1];
 567
 568        struct sge sge;
 569
 570        struct net_device *port[MAX_NPORTS];
 571        u8 chan_map[NCHAN];                   /* channel -> port map */
 572
 573        u32 filter_mode;
 574        unsigned int l2t_start;
 575        unsigned int l2t_end;
 576        struct l2t_data *l2t;
 577        void *uld_handle[CXGB4_ULD_MAX];
 578        struct list_head list_node;
 579
 580        struct tid_info tids;
 581        void **tid_release_head;
 582        spinlock_t tid_release_lock;
 583        struct work_struct tid_release_task;
 584        struct work_struct db_full_task;
 585        struct work_struct db_drop_task;
 586        bool tid_release_task_busy;
 587
 588        struct dentry *debugfs_root;
 589
 590        spinlock_t stats_lock;
 591};
 592
 593/* Defined bit width of user definable filter tuples
 594 */
 595#define ETHTYPE_BITWIDTH 16
 596#define FRAG_BITWIDTH 1
 597#define MACIDX_BITWIDTH 9
 598#define FCOE_BITWIDTH 1
 599#define IPORT_BITWIDTH 3
 600#define MATCHTYPE_BITWIDTH 3
 601#define PROTO_BITWIDTH 8
 602#define TOS_BITWIDTH 8
 603#define PF_BITWIDTH 8
 604#define VF_BITWIDTH 8
 605#define IVLAN_BITWIDTH 16
 606#define OVLAN_BITWIDTH 16
 607
 608/* Filter matching rules.  These consist of a set of ingress packet field
 609 * (value, mask) tuples.  The associated ingress packet field matches the
 610 * tuple when ((field & mask) == value).  (Thus a wildcard "don't care" field
 611 * rule can be constructed by specifying a tuple of (0, 0).)  A filter rule
 612 * matches an ingress packet when all of the individual individual field
 613 * matching rules are true.
 614 *
 615 * Partial field masks are always valid, however, while it may be easy to
 616 * understand their meanings for some fields (e.g. IP address to match a
 617 * subnet), for others making sensible partial masks is less intuitive (e.g.
 618 * MPS match type) ...
 619 *
 620 * Most of the following data structures are modeled on T4 capabilities.
 621 * Drivers for earlier chips use the subsets which make sense for those chips.
 622 * We really need to come up with a hardware-independent mechanism to
 623 * represent hardware filter capabilities ...
 624 */
 625struct ch_filter_tuple {
 626        /* Compressed header matching field rules.  The TP_VLAN_PRI_MAP
 627         * register selects which of these fields will participate in the
 628         * filter match rules -- up to a maximum of 36 bits.  Because
 629         * TP_VLAN_PRI_MAP is a global register, all filters must use the same
 630         * set of fields.
 631         */
 632        uint32_t ethtype:ETHTYPE_BITWIDTH;      /* Ethernet type */
 633        uint32_t frag:FRAG_BITWIDTH;            /* IP fragmentation header */
 634        uint32_t ivlan_vld:1;                   /* inner VLAN valid */
 635        uint32_t ovlan_vld:1;                   /* outer VLAN valid */
 636        uint32_t pfvf_vld:1;                    /* PF/VF valid */
 637        uint32_t macidx:MACIDX_BITWIDTH;        /* exact match MAC index */
 638        uint32_t fcoe:FCOE_BITWIDTH;            /* FCoE packet */
 639        uint32_t iport:IPORT_BITWIDTH;          /* ingress port */
 640        uint32_t matchtype:MATCHTYPE_BITWIDTH;  /* MPS match type */
 641        uint32_t proto:PROTO_BITWIDTH;          /* protocol type */
 642        uint32_t tos:TOS_BITWIDTH;              /* TOS/Traffic Type */
 643        uint32_t pf:PF_BITWIDTH;                /* PCI-E PF ID */
 644        uint32_t vf:VF_BITWIDTH;                /* PCI-E VF ID */
 645        uint32_t ivlan:IVLAN_BITWIDTH;          /* inner VLAN */
 646        uint32_t ovlan:OVLAN_BITWIDTH;          /* outer VLAN */
 647
 648        /* Uncompressed header matching field rules.  These are always
 649         * available for field rules.
 650         */
 651        uint8_t lip[16];        /* local IP address (IPv4 in [3:0]) */
 652        uint8_t fip[16];        /* foreign IP address (IPv4 in [3:0]) */
 653        uint16_t lport;         /* local port */
 654        uint16_t fport;         /* foreign port */
 655};
 656
 657/* A filter ioctl command.
 658 */
 659struct ch_filter_specification {
 660        /* Administrative fields for filter.
 661         */
 662        uint32_t hitcnts:1;     /* count filter hits in TCB */
 663        uint32_t prio:1;        /* filter has priority over active/server */
 664
 665        /* Fundamental filter typing.  This is the one element of filter
 666         * matching that doesn't exist as a (value, mask) tuple.
 667         */
 668        uint32_t type:1;        /* 0 => IPv4, 1 => IPv6 */
 669
 670        /* Packet dispatch information.  Ingress packets which match the
 671         * filter rules will be dropped, passed to the host or switched back
 672         * out as egress packets.
 673         */
 674        uint32_t action:2;      /* drop, pass, switch */
 675
 676        uint32_t rpttid:1;      /* report TID in RSS hash field */
 677
 678        uint32_t dirsteer:1;    /* 0 => RSS, 1 => steer to iq */
 679        uint32_t iq:10;         /* ingress queue */
 680
 681        uint32_t maskhash:1;    /* dirsteer=0: store RSS hash in TCB */
 682        uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
 683                                /*             1 => TCB contains IQ ID */
 684
 685        /* Switch proxy/rewrite fields.  An ingress packet which matches a
 686         * filter with "switch" set will be looped back out as an egress
 687         * packet -- potentially with some Ethernet header rewriting.
 688         */
 689        uint32_t eport:2;       /* egress port to switch packet out */
 690        uint32_t newdmac:1;     /* rewrite destination MAC address */
 691        uint32_t newsmac:1;     /* rewrite source MAC address */
 692        uint32_t newvlan:2;     /* rewrite VLAN Tag */
 693        uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
 694        uint8_t smac[ETH_ALEN]; /* new source MAC address */
 695        uint16_t vlan;          /* VLAN Tag to insert */
 696
 697        /* Filter rule value/mask pairs.
 698         */
 699        struct ch_filter_tuple val;
 700        struct ch_filter_tuple mask;
 701};
 702
 703enum {
 704        FILTER_PASS = 0,        /* default */
 705        FILTER_DROP,
 706        FILTER_SWITCH
 707};
 708
 709enum {
 710        VLAN_NOCHANGE = 0,      /* default */
 711        VLAN_REMOVE,
 712        VLAN_INSERT,
 713        VLAN_REWRITE
 714};
 715
 716static inline int is_t5(enum chip_type chip)
 717{
 718        return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
 719}
 720
 721static inline int is_t4(enum chip_type chip)
 722{
 723        return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
 724}
 725
 726static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
 727{
 728        return readl(adap->regs + reg_addr);
 729}
 730
 731static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
 732{
 733        writel(val, adap->regs + reg_addr);
 734}
 735
 736#ifndef readq
 737static inline u64 readq(const volatile void __iomem *addr)
 738{
 739        return readl(addr) + ((u64)readl(addr + 4) << 32);
 740}
 741
 742static inline void writeq(u64 val, volatile void __iomem *addr)
 743{
 744        writel(val, addr);
 745        writel(val >> 32, addr + 4);
 746}
 747#endif
 748
 749static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
 750{
 751        return readq(adap->regs + reg_addr);
 752}
 753
 754static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
 755{
 756        writeq(val, adap->regs + reg_addr);
 757}
 758
 759/**
 760 * netdev2pinfo - return the port_info structure associated with a net_device
 761 * @dev: the netdev
 762 *
 763 * Return the struct port_info associated with a net_device
 764 */
 765static inline struct port_info *netdev2pinfo(const struct net_device *dev)
 766{
 767        return netdev_priv(dev);
 768}
 769
 770/**
 771 * adap2pinfo - return the port_info of a port
 772 * @adap: the adapter
 773 * @idx: the port index
 774 *
 775 * Return the port_info structure for the port of the given index.
 776 */
 777static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
 778{
 779        return netdev_priv(adap->port[idx]);
 780}
 781
 782/**
 783 * netdev2adap - return the adapter structure associated with a net_device
 784 * @dev: the netdev
 785 *
 786 * Return the struct adapter associated with a net_device
 787 */
 788static inline struct adapter *netdev2adap(const struct net_device *dev)
 789{
 790        return netdev2pinfo(dev)->adapter;
 791}
 792
 793void t4_os_portmod_changed(const struct adapter *adap, int port_id);
 794void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
 795
 796void *t4_alloc_mem(size_t size);
 797
 798void t4_free_sge_resources(struct adapter *adap);
 799irq_handler_t t4_intr_handler(struct adapter *adap);
 800netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
 801int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
 802                     const struct pkt_gl *gl);
 803int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
 804int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
 805int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
 806                     struct net_device *dev, int intr_idx,
 807                     struct sge_fl *fl, rspq_handler_t hnd);
 808int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
 809                         struct net_device *dev, struct netdev_queue *netdevq,
 810                         unsigned int iqid);
 811int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 812                          struct net_device *dev, unsigned int iqid,
 813                          unsigned int cmplqid);
 814int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
 815                          struct net_device *dev, unsigned int iqid);
 816irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
 817int t4_sge_init(struct adapter *adap);
 818void t4_sge_start(struct adapter *adap);
 819void t4_sge_stop(struct adapter *adap);
 820extern int dbfifo_int_thresh;
 821
 822#define for_each_port(adapter, iter) \
 823        for (iter = 0; iter < (adapter)->params.nports; ++iter)
 824
 825static inline int is_bypass(struct adapter *adap)
 826{
 827        return adap->params.bypass;
 828}
 829
 830static inline int is_bypass_device(int device)
 831{
 832        /* this should be set based upon device capabilities */
 833        switch (device) {
 834        case 0x440b:
 835        case 0x440c:
 836                return 1;
 837        default:
 838                return 0;
 839        }
 840}
 841
 842static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
 843{
 844        return adap->params.vpd.cclk / 1000;
 845}
 846
 847static inline unsigned int us_to_core_ticks(const struct adapter *adap,
 848                                            unsigned int us)
 849{
 850        return (us * adap->params.vpd.cclk) / 1000;
 851}
 852
 853static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
 854                                            unsigned int ticks)
 855{
 856        /* add Core Clock / 2 to round ticks to nearest uS */
 857        return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
 858                adapter->params.vpd.cclk);
 859}
 860
 861void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
 862                      u32 val);
 863
 864int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
 865                    void *rpl, bool sleep_ok);
 866
 867static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
 868                             int size, void *rpl)
 869{
 870        return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
 871}
 872
 873static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
 874                                int size, void *rpl)
 875{
 876        return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
 877}
 878
 879void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
 880                       unsigned int data_reg, const u32 *vals,
 881                       unsigned int nregs, unsigned int start_idx);
 882void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
 883                      unsigned int data_reg, u32 *vals, unsigned int nregs,
 884                      unsigned int start_idx);
 885
 886struct fw_filter_wr;
 887
 888void t4_intr_enable(struct adapter *adapter);
 889void t4_intr_disable(struct adapter *adapter);
 890int t4_slow_intr_handler(struct adapter *adapter);
 891
 892int t4_wait_dev_ready(struct adapter *adap);
 893int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
 894                  struct link_config *lc);
 895int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 896int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
 897                    __be32 *buf);
 898int t4_seeprom_wp(struct adapter *adapter, bool enable);
 899int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 900int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
 901unsigned int t4_flash_cfg_addr(struct adapter *adapter);
 902int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
 903int t4_check_fw_version(struct adapter *adapter);
 904int t4_prep_adapter(struct adapter *adapter);
 905int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 906void t4_fatal_err(struct adapter *adapter);
 907int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
 908                        int start, int n, const u16 *rspq, unsigned int nrspq);
 909int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
 910                       unsigned int flags);
 911int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
 912               u64 *parity);
 913int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
 914                u64 *parity);
 915
 916void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
 917void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
 918void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
 919                            unsigned int mask, unsigned int val);
 920void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
 921                         struct tp_tcp_stats *v6);
 922void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
 923                  const unsigned short *alpha, const unsigned short *beta);
 924
 925void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
 926
 927void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
 928                         const u8 *addr);
 929int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
 930                      u64 mask0, u64 mask1, unsigned int crc, bool enable);
 931
 932int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
 933                enum dev_master master, enum dev_state *state);
 934int t4_fw_bye(struct adapter *adap, unsigned int mbox);
 935int t4_early_init(struct adapter *adap, unsigned int mbox);
 936int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
 937int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
 938int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
 939int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 940                  const u8 *fw_data, unsigned int size, int force);
 941int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
 942                      unsigned int mtype, unsigned int maddr,
 943                      u32 *finiver, u32 *finicsum, u32 *cfcsum);
 944int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
 945                          unsigned int cache_line_size);
 946int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
 947int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 948                    unsigned int vf, unsigned int nparams, const u32 *params,
 949                    u32 *val);
 950int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 951                  unsigned int vf, unsigned int nparams, const u32 *params,
 952                  const u32 *val);
 953int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
 954                unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
 955                unsigned int rxqi, unsigned int rxq, unsigned int tc,
 956                unsigned int vi, unsigned int cmask, unsigned int pmask,
 957                unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
 958int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 959                unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
 960                unsigned int *rss_size);
 961int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 962                int mtu, int promisc, int all_multi, int bcast, int vlanex,
 963                bool sleep_ok);
 964int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
 965                      unsigned int viid, bool free, unsigned int naddr,
 966                      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
 967int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
 968                  int idx, const u8 *addr, bool persist, bool add_smt);
 969int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
 970                     bool ucast, u64 vec, bool sleep_ok);
 971int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
 972                 bool rx_en, bool tx_en);
 973int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
 974                     unsigned int nblinks);
 975int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 976               unsigned int mmd, unsigned int reg, u16 *valp);
 977int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 978               unsigned int mmd, unsigned int reg, u16 val);
 979int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 980               unsigned int vf, unsigned int iqtype, unsigned int iqid,
 981               unsigned int fl0id, unsigned int fl1id);
 982int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 983                   unsigned int vf, unsigned int eqid);
 984int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 985                    unsigned int vf, unsigned int eqid);
 986int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 987                    unsigned int vf, unsigned int eqid);
 988int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 989void t4_db_full(struct adapter *adapter);
 990void t4_db_dropped(struct adapter *adapter);
 991int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
 992int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
 993                         u32 addr, u32 val);
 994#endif /* __CXGB4_H__ */
 995