linux/drivers/net/ethernet/cavium/thunder/nic.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Cavium, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of version 2 of the GNU General Public License
   6 * as published by the Free Software Foundation.
   7 */
   8
   9#ifndef NIC_H
  10#define NIC_H
  11
  12#include <linux/netdevice.h>
  13#include <linux/interrupt.h>
  14#include <linux/pci.h>
  15#include "thunder_bgx.h"
  16
  17/* PCI device IDs */
  18#define PCI_DEVICE_ID_THUNDER_NIC_PF            0xA01E
  19#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF      0x0011
  20#define PCI_DEVICE_ID_THUNDER_NIC_VF            0xA034
  21#define PCI_DEVICE_ID_THUNDER_BGX               0xA026
  22
  23/* Subsystem device IDs */
  24#define PCI_SUBSYS_DEVID_88XX_NIC_PF            0xA11E
  25#define PCI_SUBSYS_DEVID_81XX_NIC_PF            0xA21E
  26#define PCI_SUBSYS_DEVID_83XX_NIC_PF            0xA31E
  27
  28#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF      0xA11E
  29#define PCI_SUBSYS_DEVID_88XX_NIC_VF            0xA134
  30#define PCI_SUBSYS_DEVID_81XX_NIC_VF            0xA234
  31#define PCI_SUBSYS_DEVID_83XX_NIC_VF            0xA334
  32
  33
  34/* PCI BAR nos */
  35#define PCI_CFG_REG_BAR_NUM             0
  36#define PCI_MSIX_REG_BAR_NUM            4
  37
  38/* NIC SRIOV VF count */
  39#define MAX_NUM_VFS_SUPPORTED           128
  40#define DEFAULT_NUM_VF_ENABLED          8
  41
  42#define NIC_TNS_BYPASS_MODE             0
  43#define NIC_TNS_MODE                    1
  44
  45/* NIC priv flags */
  46#define NIC_SRIOV_ENABLED               BIT(0)
  47
  48/* Min/Max packet size */
  49#define NIC_HW_MIN_FRS                  64
  50#define NIC_HW_MAX_FRS                  9190 /* Excluding L2 header and FCS */
  51
  52/* Max pkinds */
  53#define NIC_MAX_PKIND                   16
  54
  55/* Max when CPI_ALG is IP diffserv */
  56#define NIC_MAX_CPI_PER_LMAC            64
  57
  58/* NIC VF Interrupts */
  59#define NICVF_INTR_CQ                   0
  60#define NICVF_INTR_SQ                   1
  61#define NICVF_INTR_RBDR                 2
  62#define NICVF_INTR_PKT_DROP             3
  63#define NICVF_INTR_TCP_TIMER            4
  64#define NICVF_INTR_MBOX                 5
  65#define NICVF_INTR_QS_ERR               6
  66
  67#define NICVF_INTR_CQ_SHIFT             0
  68#define NICVF_INTR_SQ_SHIFT             8
  69#define NICVF_INTR_RBDR_SHIFT           16
  70#define NICVF_INTR_PKT_DROP_SHIFT       20
  71#define NICVF_INTR_TCP_TIMER_SHIFT      21
  72#define NICVF_INTR_MBOX_SHIFT           22
  73#define NICVF_INTR_QS_ERR_SHIFT         23
  74
  75#define NICVF_INTR_CQ_MASK              (0xFF << NICVF_INTR_CQ_SHIFT)
  76#define NICVF_INTR_SQ_MASK              (0xFF << NICVF_INTR_SQ_SHIFT)
  77#define NICVF_INTR_RBDR_MASK            (0x03 << NICVF_INTR_RBDR_SHIFT)
  78#define NICVF_INTR_PKT_DROP_MASK        BIT(NICVF_INTR_PKT_DROP_SHIFT)
  79#define NICVF_INTR_TCP_TIMER_MASK       BIT(NICVF_INTR_TCP_TIMER_SHIFT)
  80#define NICVF_INTR_MBOX_MASK            BIT(NICVF_INTR_MBOX_SHIFT)
  81#define NICVF_INTR_QS_ERR_MASK          BIT(NICVF_INTR_QS_ERR_SHIFT)
  82
  83/* MSI-X interrupts */
  84#define NIC_PF_MSIX_VECTORS             10
  85#define NIC_VF_MSIX_VECTORS             20
  86
  87#define NIC_PF_INTR_ID_ECC0_SBE         0
  88#define NIC_PF_INTR_ID_ECC0_DBE         1
  89#define NIC_PF_INTR_ID_ECC1_SBE         2
  90#define NIC_PF_INTR_ID_ECC1_DBE         3
  91#define NIC_PF_INTR_ID_ECC2_SBE         4
  92#define NIC_PF_INTR_ID_ECC2_DBE         5
  93#define NIC_PF_INTR_ID_ECC3_SBE         6
  94#define NIC_PF_INTR_ID_ECC3_DBE         7
  95#define NIC_PF_INTR_ID_MBOX0            8
  96#define NIC_PF_INTR_ID_MBOX1            9
  97
  98/* Minimum FIFO level before all packets for the CQ are dropped
  99 *
 100 * This value ensures that once a packet has been "accepted"
 101 * for reception it will not get dropped due to non-availability
 102 * of CQ descriptor. An errata in HW mandates this value to be
 103 * atleast 0x100.
 104 */
 105#define NICPF_CQM_MIN_DROP_LEVEL       0x100
 106
 107/* Global timer for CQ timer thresh interrupts
 108 * Calculated for SCLK of 700Mhz
 109 * value written should be a 1/16th of what is expected
 110 *
 111 * 1 tick per 0.025usec
 112 */
 113#define NICPF_CLK_PER_INT_TICK          1
 114
 115/* Time to wait before we decide that a SQ is stuck.
 116 *
 117 * Since both pkt rx and tx notifications are done with same CQ,
 118 * when packets are being received at very high rate (eg: L2 forwarding)
 119 * then freeing transmitted skbs will be delayed and watchdog
 120 * will kick in, resetting interface. Hence keeping this value high.
 121 */
 122#define NICVF_TX_TIMEOUT                (50 * HZ)
 123
 124struct nicvf_cq_poll {
 125        struct  nicvf *nicvf;
 126        u8      cq_idx;         /* Completion queue index */
 127        struct  napi_struct napi;
 128};
 129
 130#define NIC_MAX_RSS_HASH_BITS           8
 131#define NIC_MAX_RSS_IDR_TBL_SIZE        (1 << NIC_MAX_RSS_HASH_BITS)
 132#define RSS_HASH_KEY_SIZE               5 /* 320 bit key */
 133
 134struct nicvf_rss_info {
 135        bool enable;
 136#define RSS_L2_EXTENDED_HASH_ENA        BIT(0)
 137#define RSS_IP_HASH_ENA                 BIT(1)
 138#define RSS_TCP_HASH_ENA                BIT(2)
 139#define RSS_TCP_SYN_DIS                 BIT(3)
 140#define RSS_UDP_HASH_ENA                BIT(4)
 141#define RSS_L4_EXTENDED_HASH_ENA        BIT(5)
 142#define RSS_ROCE_ENA                    BIT(6)
 143#define RSS_L3_BI_DIRECTION_ENA         BIT(7)
 144#define RSS_L4_BI_DIRECTION_ENA         BIT(8)
 145        u64 cfg;
 146        u8  hash_bits;
 147        u16 rss_size;
 148        u8  ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
 149        u64 key[RSS_HASH_KEY_SIZE];
 150} ____cacheline_aligned_in_smp;
 151
 152enum rx_stats_reg_offset {
 153        RX_OCTS = 0x0,
 154        RX_UCAST = 0x1,
 155        RX_BCAST = 0x2,
 156        RX_MCAST = 0x3,
 157        RX_RED = 0x4,
 158        RX_RED_OCTS = 0x5,
 159        RX_ORUN = 0x6,
 160        RX_ORUN_OCTS = 0x7,
 161        RX_FCS = 0x8,
 162        RX_L2ERR = 0x9,
 163        RX_DRP_BCAST = 0xa,
 164        RX_DRP_MCAST = 0xb,
 165        RX_DRP_L3BCAST = 0xc,
 166        RX_DRP_L3MCAST = 0xd,
 167        RX_STATS_ENUM_LAST,
 168};
 169
 170enum tx_stats_reg_offset {
 171        TX_OCTS = 0x0,
 172        TX_UCAST = 0x1,
 173        TX_BCAST = 0x2,
 174        TX_MCAST = 0x3,
 175        TX_DROP = 0x4,
 176        TX_STATS_ENUM_LAST,
 177};
 178
 179struct nicvf_hw_stats {
 180        u64 rx_bytes;
 181        u64 rx_frames;
 182        u64 rx_ucast_frames;
 183        u64 rx_bcast_frames;
 184        u64 rx_mcast_frames;
 185        u64 rx_drops;
 186        u64 rx_drop_red;
 187        u64 rx_drop_red_bytes;
 188        u64 rx_drop_overrun;
 189        u64 rx_drop_overrun_bytes;
 190        u64 rx_drop_bcast;
 191        u64 rx_drop_mcast;
 192        u64 rx_drop_l3_bcast;
 193        u64 rx_drop_l3_mcast;
 194        u64 rx_fcs_errors;
 195        u64 rx_l2_errors;
 196
 197        u64 tx_bytes;
 198        u64 tx_frames;
 199        u64 tx_ucast_frames;
 200        u64 tx_bcast_frames;
 201        u64 tx_mcast_frames;
 202        u64 tx_drops;
 203};
 204
 205struct nicvf_drv_stats {
 206        /* CQE Rx errs */
 207        u64 rx_bgx_truncated_pkts;
 208        u64 rx_jabber_errs;
 209        u64 rx_fcs_errs;
 210        u64 rx_bgx_errs;
 211        u64 rx_prel2_errs;
 212        u64 rx_l2_hdr_malformed;
 213        u64 rx_oversize;
 214        u64 rx_undersize;
 215        u64 rx_l2_len_mismatch;
 216        u64 rx_l2_pclp;
 217        u64 rx_ip_ver_errs;
 218        u64 rx_ip_csum_errs;
 219        u64 rx_ip_hdr_malformed;
 220        u64 rx_ip_payload_malformed;
 221        u64 rx_ip_ttl_errs;
 222        u64 rx_l3_pclp;
 223        u64 rx_l4_malformed;
 224        u64 rx_l4_csum_errs;
 225        u64 rx_udp_len_errs;
 226        u64 rx_l4_port_errs;
 227        u64 rx_tcp_flag_errs;
 228        u64 rx_tcp_offset_errs;
 229        u64 rx_l4_pclp;
 230        u64 rx_truncated_pkts;
 231
 232        /* CQE Tx errs */
 233        u64 tx_desc_fault;
 234        u64 tx_hdr_cons_err;
 235        u64 tx_subdesc_err;
 236        u64 tx_max_size_exceeded;
 237        u64 tx_imm_size_oflow;
 238        u64 tx_data_seq_err;
 239        u64 tx_mem_seq_err;
 240        u64 tx_lock_viol;
 241        u64 tx_data_fault;
 242        u64 tx_tstmp_conflict;
 243        u64 tx_tstmp_timeout;
 244        u64 tx_mem_fault;
 245        u64 tx_csum_overlap;
 246        u64 tx_csum_overflow;
 247
 248        /* driver debug stats */
 249        u64 rcv_buffer_alloc_failures;
 250        u64 tx_tso;
 251        u64 tx_timeout;
 252        u64 txq_stop;
 253        u64 txq_wake;
 254
 255        struct u64_stats_sync   syncp;
 256};
 257
 258struct nicvf {
 259        struct nicvf            *pnicvf;
 260        struct net_device       *netdev;
 261        struct pci_dev          *pdev;
 262        void __iomem            *reg_base;
 263#define MAX_QUEUES_PER_QSET                     8
 264        struct queue_set        *qs;
 265        struct nicvf_cq_poll    *napi[8];
 266        u8                      vf_id;
 267        u8                      sqs_id;
 268        bool                    sqs_mode;
 269        bool                    hw_tso;
 270        bool                    t88;
 271
 272        /* Receive buffer alloc */
 273        u32                     rb_page_offset;
 274        u16                     rb_pageref;
 275        bool                    rb_alloc_fail;
 276        bool                    rb_work_scheduled;
 277        struct page             *rb_page;
 278        struct delayed_work     rbdr_work;
 279        struct tasklet_struct   rbdr_task;
 280
 281        /* Secondary Qset */
 282        u8                      sqs_count;
 283#define MAX_SQS_PER_VF_SINGLE_NODE              5
 284#define MAX_SQS_PER_VF                          11
 285        struct nicvf            *snicvf[MAX_SQS_PER_VF];
 286
 287        /* Queue count */
 288        u8                      rx_queues;
 289        u8                      tx_queues;
 290        u8                      max_queues;
 291
 292        u8                      node;
 293        u8                      cpi_alg;
 294        bool                    link_up;
 295        u8                      duplex;
 296        u32                     speed;
 297        bool                    tns_mode;
 298        bool                    loopback_supported;
 299        struct nicvf_rss_info   rss_info;
 300        struct tasklet_struct   qs_err_task;
 301        struct work_struct      reset_task;
 302
 303        /* Interrupt coalescing settings */
 304        u32                     cq_coalesce_usecs;
 305        u32                     msg_enable;
 306
 307        /* Stats */
 308        struct nicvf_hw_stats   hw_stats;
 309        struct nicvf_drv_stats  __percpu *drv_stats;
 310        struct bgx_stats        bgx_stats;
 311
 312        /* MSI-X  */
 313        bool                    msix_enabled;
 314        u8                      num_vec;
 315        struct msix_entry       msix_entries[NIC_VF_MSIX_VECTORS];
 316        char                    irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
 317        bool                    irq_allocated[NIC_VF_MSIX_VECTORS];
 318        cpumask_var_t           affinity_mask[NIC_VF_MSIX_VECTORS];
 319
 320        /* VF <-> PF mailbox communication */
 321        bool                    pf_acked;
 322        bool                    pf_nacked;
 323        bool                    set_mac_pending;
 324} ____cacheline_aligned_in_smp;
 325
 326/* PF <--> VF Mailbox communication
 327 * Eight 64bit registers are shared between PF and VF.
 328 * Separate set for each VF.
 329 * Writing '1' into last register mbx7 means end of message.
 330 */
 331
 332/* PF <--> VF mailbox communication */
 333#define NIC_PF_VF_MAILBOX_SIZE          2
 334#define NIC_MBOX_MSG_TIMEOUT            2000 /* ms */
 335
 336/* Mailbox message types */
 337#define NIC_MBOX_MSG_READY              0x01    /* Is PF ready to rcv msgs */
 338#define NIC_MBOX_MSG_ACK                0x02    /* ACK the message received */
 339#define NIC_MBOX_MSG_NACK               0x03    /* NACK the message received */
 340#define NIC_MBOX_MSG_QS_CFG             0x04    /* Configure Qset */
 341#define NIC_MBOX_MSG_RQ_CFG             0x05    /* Configure receive queue */
 342#define NIC_MBOX_MSG_SQ_CFG             0x06    /* Configure Send queue */
 343#define NIC_MBOX_MSG_RQ_DROP_CFG        0x07    /* Configure receive queue */
 344#define NIC_MBOX_MSG_SET_MAC            0x08    /* Add MAC ID to DMAC filter */
 345#define NIC_MBOX_MSG_SET_MAX_FRS        0x09    /* Set max frame size */
 346#define NIC_MBOX_MSG_CPI_CFG            0x0A    /* Config CPI, RSSI */
 347#define NIC_MBOX_MSG_RSS_SIZE           0x0B    /* Get RSS indir_tbl size */
 348#define NIC_MBOX_MSG_RSS_CFG            0x0C    /* Config RSS table */
 349#define NIC_MBOX_MSG_RSS_CFG_CONT       0x0D    /* RSS config continuation */
 350#define NIC_MBOX_MSG_RQ_BP_CFG          0x0E    /* RQ backpressure config */
 351#define NIC_MBOX_MSG_RQ_SW_SYNC         0x0F    /* Flush inflight pkts to RQ */
 352#define NIC_MBOX_MSG_BGX_STATS          0x10    /* Get stats from BGX */
 353#define NIC_MBOX_MSG_BGX_LINK_CHANGE    0x11    /* BGX:LMAC link status */
 354#define NIC_MBOX_MSG_ALLOC_SQS          0x12    /* Allocate secondary Qset */
 355#define NIC_MBOX_MSG_NICVF_PTR          0x13    /* Send nicvf ptr to PF */
 356#define NIC_MBOX_MSG_PNICVF_PTR         0x14    /* Get primary qset nicvf ptr */
 357#define NIC_MBOX_MSG_SNICVF_PTR         0x15    /* Send sqet nicvf ptr to PVF */
 358#define NIC_MBOX_MSG_LOOPBACK           0x16    /* Set interface in loopback */
 359#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17    /* Reset statistics counters */
 360#define NIC_MBOX_MSG_CFG_DONE           0xF0    /* VF configuration done */
 361#define NIC_MBOX_MSG_SHUTDOWN           0xF1    /* VF is being shutdown */
 362
 363struct nic_cfg_msg {
 364        u8    msg;
 365        u8    vf_id;
 366        u8    node_id;
 367        u8    tns_mode:1;
 368        u8    sqs_mode:1;
 369        u8    loopback_supported:1;
 370        u8    mac_addr[ETH_ALEN];
 371};
 372
 373/* Qset configuration */
 374struct qs_cfg_msg {
 375        u8    msg;
 376        u8    num;
 377        u8    sqs_count;
 378        u64   cfg;
 379};
 380
 381/* Receive queue configuration */
 382struct rq_cfg_msg {
 383        u8    msg;
 384        u8    qs_num;
 385        u8    rq_num;
 386        u64   cfg;
 387};
 388
 389/* Send queue configuration */
 390struct sq_cfg_msg {
 391        u8    msg;
 392        u8    qs_num;
 393        u8    sq_num;
 394        bool  sqs_mode;
 395        u64   cfg;
 396};
 397
 398/* Set VF's MAC address */
 399struct set_mac_msg {
 400        u8    msg;
 401        u8    vf_id;
 402        u8    mac_addr[ETH_ALEN];
 403};
 404
 405/* Set Maximum frame size */
 406struct set_frs_msg {
 407        u8    msg;
 408        u8    vf_id;
 409        u16   max_frs;
 410};
 411
 412/* Set CPI algorithm type */
 413struct cpi_cfg_msg {
 414        u8    msg;
 415        u8    vf_id;
 416        u8    rq_cnt;
 417        u8    cpi_alg;
 418};
 419
 420/* Get RSS table size */
 421struct rss_sz_msg {
 422        u8    msg;
 423        u8    vf_id;
 424        u16   ind_tbl_size;
 425};
 426
 427/* Set RSS configuration */
 428struct rss_cfg_msg {
 429        u8    msg;
 430        u8    vf_id;
 431        u8    hash_bits;
 432        u8    tbl_len;
 433        u8    tbl_offset;
 434#define RSS_IND_TBL_LEN_PER_MBX_MSG     8
 435        u8    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
 436};
 437
 438struct bgx_stats_msg {
 439        u8    msg;
 440        u8    vf_id;
 441        u8    rx;
 442        u8    idx;
 443        u64   stats;
 444};
 445
 446/* Physical interface link status */
 447struct bgx_link_status {
 448        u8    msg;
 449        u8    link_up;
 450        u8    duplex;
 451        u32   speed;
 452};
 453
 454/* Get Extra Qset IDs */
 455struct sqs_alloc {
 456        u8    msg;
 457        u8    vf_id;
 458        u8    qs_count;
 459};
 460
 461struct nicvf_ptr {
 462        u8    msg;
 463        u8    vf_id;
 464        bool  sqs_mode;
 465        u8    sqs_id;
 466        u64   nicvf;
 467};
 468
 469/* Set interface in loopback mode */
 470struct set_loopback {
 471        u8    msg;
 472        u8    vf_id;
 473        bool  enable;
 474};
 475
 476/* Reset statistics counters */
 477struct reset_stat_cfg {
 478        u8    msg;
 479        /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
 480        u16   rx_stat_mask;
 481        /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
 482        u8    tx_stat_mask;
 483        /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
 484         * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
 485         * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
 486         * ..
 487         * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
 488         * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
 489         */
 490        u16   rq_stat_mask;
 491        /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
 492         * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
 493         * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
 494         * ..
 495         * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
 496         * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
 497         */
 498        u16   sq_stat_mask;
 499};
 500
 501/* 128 bit shared memory between PF and each VF */
 502union nic_mbx {
 503        struct { u8 msg; }      msg;
 504        struct nic_cfg_msg      nic_cfg;
 505        struct qs_cfg_msg       qs;
 506        struct rq_cfg_msg       rq;
 507        struct sq_cfg_msg       sq;
 508        struct set_mac_msg      mac;
 509        struct set_frs_msg      frs;
 510        struct cpi_cfg_msg      cpi_cfg;
 511        struct rss_sz_msg       rss_size;
 512        struct rss_cfg_msg      rss_cfg;
 513        struct bgx_stats_msg    bgx_stats;
 514        struct bgx_link_status  link_status;
 515        struct sqs_alloc        sqs_alloc;
 516        struct nicvf_ptr        nicvf;
 517        struct set_loopback     lbk;
 518        struct reset_stat_cfg   reset_stat;
 519};
 520
 521#define NIC_NODE_ID_MASK        0x03
 522#define NIC_NODE_ID_SHIFT       44
 523
 524static inline int nic_get_node_id(struct pci_dev *pdev)
 525{
 526        u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
 527        return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
 528}
 529
 530static inline bool pass1_silicon(struct pci_dev *pdev)
 531{
 532        return (pdev->revision < 8) &&
 533                (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
 534}
 535
 536static inline bool pass2_silicon(struct pci_dev *pdev)
 537{
 538        return (pdev->revision >= 8) &&
 539                (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
 540}
 541
 542int nicvf_set_real_num_queues(struct net_device *netdev,
 543                              int tx_queues, int rx_queues);
 544int nicvf_open(struct net_device *netdev);
 545int nicvf_stop(struct net_device *netdev);
 546int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
 547void nicvf_config_rss(struct nicvf *nic);
 548void nicvf_set_rss_key(struct nicvf *nic);
 549void nicvf_set_ethtool_ops(struct net_device *netdev);
 550void nicvf_update_stats(struct nicvf *nic);
 551void nicvf_update_lmac_stats(struct nicvf *nic);
 552
 553#endif /* NIC_H */
 554