linux/include/linux/qed/qed_if.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#ifndef _QED_IF_H
   8#define _QED_IF_H
   9
  10#include <linux/types.h>
  11#include <linux/interrupt.h>
  12#include <linux/netdevice.h>
  13#include <linux/pci.h>
  14#include <linux/skbuff.h>
  15#include <asm/byteorder.h>
  16#include <linux/io.h>
  17#include <linux/compiler.h>
  18#include <linux/kernel.h>
  19#include <linux/list.h>
  20#include <linux/slab.h>
  21#include <linux/qed/common_hsi.h>
  22#include <linux/qed/qed_chain.h>
  23#include <linux/io-64-nonatomic-lo-hi.h>
  24
  25enum dcbx_protocol_type {
  26        DCBX_PROTOCOL_ISCSI,
  27        DCBX_PROTOCOL_FCOE,
  28        DCBX_PROTOCOL_ROCE,
  29        DCBX_PROTOCOL_ROCE_V2,
  30        DCBX_PROTOCOL_ETH,
  31        DCBX_MAX_PROTOCOL_TYPE
  32};
  33
  34#define QED_ROCE_PROTOCOL_INDEX (3)
  35
  36#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
  37#define QED_LLDP_PORT_ID_STAT_LEN 4
  38#define QED_DCBX_MAX_APP_PROTOCOL 32
  39#define QED_MAX_PFC_PRIORITIES 8
  40#define QED_DCBX_DSCP_SIZE 64
  41
  42struct qed_dcbx_lldp_remote {
  43        u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
  44        u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
  45        bool enable_rx;
  46        bool enable_tx;
  47        u32 tx_interval;
  48        u32 max_credit;
  49};
  50
  51struct qed_dcbx_lldp_local {
  52        u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
  53        u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
  54};
  55
  56struct qed_dcbx_app_prio {
  57        u8 roce;
  58        u8 roce_v2;
  59        u8 fcoe;
  60        u8 iscsi;
  61        u8 eth;
  62};
  63
  64struct qed_dbcx_pfc_params {
  65        bool willing;
  66        bool enabled;
  67        u8 prio[QED_MAX_PFC_PRIORITIES];
  68        u8 max_tc;
  69};
  70
  71enum qed_dcbx_sf_ieee_type {
  72        QED_DCBX_SF_IEEE_ETHTYPE,
  73        QED_DCBX_SF_IEEE_TCP_PORT,
  74        QED_DCBX_SF_IEEE_UDP_PORT,
  75        QED_DCBX_SF_IEEE_TCP_UDP_PORT
  76};
  77
  78struct qed_app_entry {
  79        bool ethtype;
  80        enum qed_dcbx_sf_ieee_type sf_ieee;
  81        bool enabled;
  82        u8 prio;
  83        u16 proto_id;
  84        enum dcbx_protocol_type proto_type;
  85};
  86
  87struct qed_dcbx_params {
  88        struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
  89        u16 num_app_entries;
  90        bool app_willing;
  91        bool app_valid;
  92        bool app_error;
  93        bool ets_willing;
  94        bool ets_enabled;
  95        bool ets_cbs;
  96        bool valid;
  97        u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
  98        u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
  99        u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
 100        struct qed_dbcx_pfc_params pfc;
 101        u8 max_ets_tc;
 102};
 103
 104struct qed_dcbx_admin_params {
 105        struct qed_dcbx_params params;
 106        bool valid;
 107};
 108
 109struct qed_dcbx_remote_params {
 110        struct qed_dcbx_params params;
 111        bool valid;
 112};
 113
 114struct qed_dcbx_operational_params {
 115        struct qed_dcbx_app_prio app_prio;
 116        struct qed_dcbx_params params;
 117        bool valid;
 118        bool enabled;
 119        bool ieee;
 120        bool cee;
 121        bool local;
 122        u32 err;
 123};
 124
 125struct qed_dcbx_get {
 126        struct qed_dcbx_operational_params operational;
 127        struct qed_dcbx_lldp_remote lldp_remote;
 128        struct qed_dcbx_lldp_local lldp_local;
 129        struct qed_dcbx_remote_params remote;
 130        struct qed_dcbx_admin_params local;
 131};
 132
 133enum qed_nvm_images {
 134        QED_NVM_IMAGE_ISCSI_CFG,
 135        QED_NVM_IMAGE_FCOE_CFG,
 136        QED_NVM_IMAGE_MDUMP,
 137        QED_NVM_IMAGE_NVM_CFG1,
 138        QED_NVM_IMAGE_DEFAULT_CFG,
 139        QED_NVM_IMAGE_NVM_META,
 140};
 141
 142struct qed_link_eee_params {
 143        u32 tx_lpi_timer;
 144#define QED_EEE_1G_ADV          BIT(0)
 145#define QED_EEE_10G_ADV         BIT(1)
 146
 147        /* Capabilities are represented using QED_EEE_*_ADV values */
 148        u8 adv_caps;
 149        u8 lp_adv_caps;
 150        bool enable;
 151        bool tx_lpi_enable;
 152};
 153
 154enum qed_led_mode {
 155        QED_LED_MODE_OFF,
 156        QED_LED_MODE_ON,
 157        QED_LED_MODE_RESTORE
 158};
 159
 160struct qed_mfw_tlv_eth {
 161        u16 lso_maxoff_size;
 162        bool lso_maxoff_size_set;
 163        u16 lso_minseg_size;
 164        bool lso_minseg_size_set;
 165        u8 prom_mode;
 166        bool prom_mode_set;
 167        u16 tx_descr_size;
 168        bool tx_descr_size_set;
 169        u16 rx_descr_size;
 170        bool rx_descr_size_set;
 171        u16 netq_count;
 172        bool netq_count_set;
 173        u32 tcp4_offloads;
 174        bool tcp4_offloads_set;
 175        u32 tcp6_offloads;
 176        bool tcp6_offloads_set;
 177        u16 tx_descr_qdepth;
 178        bool tx_descr_qdepth_set;
 179        u16 rx_descr_qdepth;
 180        bool rx_descr_qdepth_set;
 181        u8 iov_offload;
 182#define QED_MFW_TLV_IOV_OFFLOAD_NONE            (0)
 183#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE      (1)
 184#define QED_MFW_TLV_IOV_OFFLOAD_VEB             (2)
 185#define QED_MFW_TLV_IOV_OFFLOAD_VEPA            (3)
 186        bool iov_offload_set;
 187        u8 txqs_empty;
 188        bool txqs_empty_set;
 189        u8 rxqs_empty;
 190        bool rxqs_empty_set;
 191        u8 num_txqs_full;
 192        bool num_txqs_full_set;
 193        u8 num_rxqs_full;
 194        bool num_rxqs_full_set;
 195};
 196
 197#define QED_MFW_TLV_TIME_SIZE   14
 198struct qed_mfw_tlv_time {
 199        bool b_set;
 200        u8 month;
 201        u8 day;
 202        u8 hour;
 203        u8 min;
 204        u16 msec;
 205        u16 usec;
 206};
 207
 208struct qed_mfw_tlv_fcoe {
 209        u8 scsi_timeout;
 210        bool scsi_timeout_set;
 211        u32 rt_tov;
 212        bool rt_tov_set;
 213        u32 ra_tov;
 214        bool ra_tov_set;
 215        u32 ed_tov;
 216        bool ed_tov_set;
 217        u32 cr_tov;
 218        bool cr_tov_set;
 219        u8 boot_type;
 220        bool boot_type_set;
 221        u8 npiv_state;
 222        bool npiv_state_set;
 223        u32 num_npiv_ids;
 224        bool num_npiv_ids_set;
 225        u8 switch_name[8];
 226        bool switch_name_set;
 227        u16 switch_portnum;
 228        bool switch_portnum_set;
 229        u8 switch_portid[3];
 230        bool switch_portid_set;
 231        u8 vendor_name[8];
 232        bool vendor_name_set;
 233        u8 switch_model[8];
 234        bool switch_model_set;
 235        u8 switch_fw_version[8];
 236        bool switch_fw_version_set;
 237        u8 qos_pri;
 238        bool qos_pri_set;
 239        u8 port_alias[3];
 240        bool port_alias_set;
 241        u8 port_state;
 242#define QED_MFW_TLV_PORT_STATE_OFFLINE  (0)
 243#define QED_MFW_TLV_PORT_STATE_LOOP             (1)
 244#define QED_MFW_TLV_PORT_STATE_P2P              (2)
 245#define QED_MFW_TLV_PORT_STATE_FABRIC           (3)
 246        bool port_state_set;
 247        u16 fip_tx_descr_size;
 248        bool fip_tx_descr_size_set;
 249        u16 fip_rx_descr_size;
 250        bool fip_rx_descr_size_set;
 251        u16 link_failures;
 252        bool link_failures_set;
 253        u8 fcoe_boot_progress;
 254        bool fcoe_boot_progress_set;
 255        u64 rx_bcast;
 256        bool rx_bcast_set;
 257        u64 tx_bcast;
 258        bool tx_bcast_set;
 259        u16 fcoe_txq_depth;
 260        bool fcoe_txq_depth_set;
 261        u16 fcoe_rxq_depth;
 262        bool fcoe_rxq_depth_set;
 263        u64 fcoe_rx_frames;
 264        bool fcoe_rx_frames_set;
 265        u64 fcoe_rx_bytes;
 266        bool fcoe_rx_bytes_set;
 267        u64 fcoe_tx_frames;
 268        bool fcoe_tx_frames_set;
 269        u64 fcoe_tx_bytes;
 270        bool fcoe_tx_bytes_set;
 271        u16 crc_count;
 272        bool crc_count_set;
 273        u32 crc_err_src_fcid[5];
 274        bool crc_err_src_fcid_set[5];
 275        struct qed_mfw_tlv_time crc_err[5];
 276        u16 losync_err;
 277        bool losync_err_set;
 278        u16 losig_err;
 279        bool losig_err_set;
 280        u16 primtive_err;
 281        bool primtive_err_set;
 282        u16 disparity_err;
 283        bool disparity_err_set;
 284        u16 code_violation_err;
 285        bool code_violation_err_set;
 286        u32 flogi_param[4];
 287        bool flogi_param_set[4];
 288        struct qed_mfw_tlv_time flogi_tstamp;
 289        u32 flogi_acc_param[4];
 290        bool flogi_acc_param_set[4];
 291        struct qed_mfw_tlv_time flogi_acc_tstamp;
 292        u32 flogi_rjt;
 293        bool flogi_rjt_set;
 294        struct qed_mfw_tlv_time flogi_rjt_tstamp;
 295        u32 fdiscs;
 296        bool fdiscs_set;
 297        u8 fdisc_acc;
 298        bool fdisc_acc_set;
 299        u8 fdisc_rjt;
 300        bool fdisc_rjt_set;
 301        u8 plogi;
 302        bool plogi_set;
 303        u8 plogi_acc;
 304        bool plogi_acc_set;
 305        u8 plogi_rjt;
 306        bool plogi_rjt_set;
 307        u32 plogi_dst_fcid[5];
 308        bool plogi_dst_fcid_set[5];
 309        struct qed_mfw_tlv_time plogi_tstamp[5];
 310        u32 plogi_acc_src_fcid[5];
 311        bool plogi_acc_src_fcid_set[5];
 312        struct qed_mfw_tlv_time plogi_acc_tstamp[5];
 313        u8 tx_plogos;
 314        bool tx_plogos_set;
 315        u8 plogo_acc;
 316        bool plogo_acc_set;
 317        u8 plogo_rjt;
 318        bool plogo_rjt_set;
 319        u32 plogo_src_fcid[5];
 320        bool plogo_src_fcid_set[5];
 321        struct qed_mfw_tlv_time plogo_tstamp[5];
 322        u8 rx_logos;
 323        bool rx_logos_set;
 324        u8 tx_accs;
 325        bool tx_accs_set;
 326        u8 tx_prlis;
 327        bool tx_prlis_set;
 328        u8 rx_accs;
 329        bool rx_accs_set;
 330        u8 tx_abts;
 331        bool tx_abts_set;
 332        u8 rx_abts_acc;
 333        bool rx_abts_acc_set;
 334        u8 rx_abts_rjt;
 335        bool rx_abts_rjt_set;
 336        u32 abts_dst_fcid[5];
 337        bool abts_dst_fcid_set[5];
 338        struct qed_mfw_tlv_time abts_tstamp[5];
 339        u8 rx_rscn;
 340        bool rx_rscn_set;
 341        u32 rx_rscn_nport[4];
 342        bool rx_rscn_nport_set[4];
 343        u8 tx_lun_rst;
 344        bool tx_lun_rst_set;
 345        u8 abort_task_sets;
 346        bool abort_task_sets_set;
 347        u8 tx_tprlos;
 348        bool tx_tprlos_set;
 349        u8 tx_nos;
 350        bool tx_nos_set;
 351        u8 rx_nos;
 352        bool rx_nos_set;
 353        u8 ols;
 354        bool ols_set;
 355        u8 lr;
 356        bool lr_set;
 357        u8 lrr;
 358        bool lrr_set;
 359        u8 tx_lip;
 360        bool tx_lip_set;
 361        u8 rx_lip;
 362        bool rx_lip_set;
 363        u8 eofa;
 364        bool eofa_set;
 365        u8 eofni;
 366        bool eofni_set;
 367        u8 scsi_chks;
 368        bool scsi_chks_set;
 369        u8 scsi_cond_met;
 370        bool scsi_cond_met_set;
 371        u8 scsi_busy;
 372        bool scsi_busy_set;
 373        u8 scsi_inter;
 374        bool scsi_inter_set;
 375        u8 scsi_inter_cond_met;
 376        bool scsi_inter_cond_met_set;
 377        u8 scsi_rsv_conflicts;
 378        bool scsi_rsv_conflicts_set;
 379        u8 scsi_tsk_full;
 380        bool scsi_tsk_full_set;
 381        u8 scsi_aca_active;
 382        bool scsi_aca_active_set;
 383        u8 scsi_tsk_abort;
 384        bool scsi_tsk_abort_set;
 385        u32 scsi_rx_chk[5];
 386        bool scsi_rx_chk_set[5];
 387        struct qed_mfw_tlv_time scsi_chk_tstamp[5];
 388};
 389
 390struct qed_mfw_tlv_iscsi {
 391        u8 target_llmnr;
 392        bool target_llmnr_set;
 393        u8 header_digest;
 394        bool header_digest_set;
 395        u8 data_digest;
 396        bool data_digest_set;
 397        u8 auth_method;
 398#define QED_MFW_TLV_AUTH_METHOD_NONE            (1)
 399#define QED_MFW_TLV_AUTH_METHOD_CHAP            (2)
 400#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP     (3)
 401        bool auth_method_set;
 402        u16 boot_taget_portal;
 403        bool boot_taget_portal_set;
 404        u16 frame_size;
 405        bool frame_size_set;
 406        u16 tx_desc_size;
 407        bool tx_desc_size_set;
 408        u16 rx_desc_size;
 409        bool rx_desc_size_set;
 410        u8 boot_progress;
 411        bool boot_progress_set;
 412        u16 tx_desc_qdepth;
 413        bool tx_desc_qdepth_set;
 414        u16 rx_desc_qdepth;
 415        bool rx_desc_qdepth_set;
 416        u64 rx_frames;
 417        bool rx_frames_set;
 418        u64 rx_bytes;
 419        bool rx_bytes_set;
 420        u64 tx_frames;
 421        bool tx_frames_set;
 422        u64 tx_bytes;
 423        bool tx_bytes_set;
 424};
 425
 426enum qed_db_rec_width {
 427        DB_REC_WIDTH_32B,
 428        DB_REC_WIDTH_64B,
 429};
 430
 431enum qed_db_rec_space {
 432        DB_REC_KERNEL,
 433        DB_REC_USER,
 434};
 435
 436#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
 437                                            (void __iomem *)(reg_addr))
 438
 439#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
 440
 441#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
 442                                              (void __iomem *)(reg_addr))
 443
 444#define QED_COALESCE_MAX 0x1FF
 445#define QED_DEFAULT_RX_USECS 12
 446#define QED_DEFAULT_TX_USECS 48
 447
 448/* forward */
 449struct qed_dev;
 450
 451struct qed_eth_pf_params {
 452        /* The following parameters are used during HW-init
 453         * and these parameters need to be passed as arguments
 454         * to update_pf_params routine invoked before slowpath start
 455         */
 456        u16 num_cons;
 457
 458        /* per-VF number of CIDs */
 459        u8 num_vf_cons;
 460#define ETH_PF_PARAMS_VF_CONS_DEFAULT   (32)
 461
 462        /* To enable arfs, previous to HW-init a positive number needs to be
 463         * set [as filters require allocated searcher ILT memory].
 464         * This will set the maximal number of configured steering-filters.
 465         */
 466        u32 num_arfs_filters;
 467};
 468
 469struct qed_fcoe_pf_params {
 470        /* The following parameters are used during protocol-init */
 471        u64 glbl_q_params_addr;
 472        u64 bdq_pbl_base_addr[2];
 473
 474        /* The following parameters are used during HW-init
 475         * and these parameters need to be passed as arguments
 476         * to update_pf_params routine invoked before slowpath start
 477         */
 478        u16 num_cons;
 479        u16 num_tasks;
 480
 481        /* The following parameters are used during protocol-init */
 482        u16 sq_num_pbl_pages;
 483
 484        u16 cq_num_entries;
 485        u16 cmdq_num_entries;
 486        u16 rq_buffer_log_size;
 487        u16 mtu;
 488        u16 dummy_icid;
 489        u16 bdq_xoff_threshold[2];
 490        u16 bdq_xon_threshold[2];
 491        u16 rq_buffer_size;
 492        u8 num_cqs;             /* num of global CQs */
 493        u8 log_page_size;
 494        u8 gl_rq_pi;
 495        u8 gl_cmd_pi;
 496        u8 debug_mode;
 497        u8 is_target;
 498        u8 bdq_pbl_num_entries[2];
 499};
 500
 501/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
 502struct qed_iscsi_pf_params {
 503        u64 glbl_q_params_addr;
 504        u64 bdq_pbl_base_addr[3];
 505        u16 cq_num_entries;
 506        u16 cmdq_num_entries;
 507        u32 two_msl_timer;
 508        u16 tx_sws_timer;
 509
 510        /* The following parameters are used during HW-init
 511         * and these parameters need to be passed as arguments
 512         * to update_pf_params routine invoked before slowpath start
 513         */
 514        u16 num_cons;
 515        u16 num_tasks;
 516
 517        /* The following parameters are used during protocol-init */
 518        u16 half_way_close_timeout;
 519        u16 bdq_xoff_threshold[3];
 520        u16 bdq_xon_threshold[3];
 521        u16 cmdq_xoff_threshold;
 522        u16 cmdq_xon_threshold;
 523        u16 rq_buffer_size;
 524
 525        u8 num_sq_pages_in_ring;
 526        u8 num_r2tq_pages_in_ring;
 527        u8 num_uhq_pages_in_ring;
 528        u8 num_queues;
 529        u8 log_page_size;
 530        u8 rqe_log_size;
 531        u8 max_fin_rt;
 532        u8 gl_rq_pi;
 533        u8 gl_cmd_pi;
 534        u8 debug_mode;
 535        u8 ll2_ooo_queue_id;
 536
 537        u8 is_target;
 538        u8 is_soc_en;
 539        u8 soc_num_of_blocks_log;
 540        u8 bdq_pbl_num_entries[3];
 541};
 542
 543struct qed_rdma_pf_params {
 544        /* Supplied to QED during resource allocation (may affect the ILT and
 545         * the doorbell BAR).
 546         */
 547        u32 min_dpis;           /* number of requested DPIs */
 548        u32 num_qps;            /* number of requested Queue Pairs */
 549        u32 num_srqs;           /* number of requested SRQ */
 550        u8 roce_edpm_mode;      /* see QED_ROCE_EDPM_MODE_ENABLE */
 551        u8 gl_pi;               /* protocol index */
 552
 553        /* Will allocate rate limiters to be used with QPs */
 554        u8 enable_dcqcn;
 555};
 556
 557struct qed_pf_params {
 558        struct qed_eth_pf_params eth_pf_params;
 559        struct qed_fcoe_pf_params fcoe_pf_params;
 560        struct qed_iscsi_pf_params iscsi_pf_params;
 561        struct qed_rdma_pf_params rdma_pf_params;
 562};
 563
 564enum qed_int_mode {
 565        QED_INT_MODE_INTA,
 566        QED_INT_MODE_MSIX,
 567        QED_INT_MODE_MSI,
 568        QED_INT_MODE_POLL,
 569};
 570
 571struct qed_sb_info {
 572        struct status_block_e4 *sb_virt;
 573        dma_addr_t sb_phys;
 574        u32 sb_ack; /* Last given ack */
 575        u16 igu_sb_id;
 576        void __iomem *igu_addr;
 577        u8 flags;
 578#define QED_SB_INFO_INIT        0x1
 579#define QED_SB_INFO_SETUP       0x2
 580
 581        struct qed_dev *cdev;
 582};
 583
 584enum qed_hw_err_type {
 585        QED_HW_ERR_FAN_FAIL,
 586        QED_HW_ERR_MFW_RESP_FAIL,
 587        QED_HW_ERR_HW_ATTN,
 588        QED_HW_ERR_DMAE_FAIL,
 589        QED_HW_ERR_RAMROD_FAIL,
 590        QED_HW_ERR_FW_ASSERT,
 591        QED_HW_ERR_LAST,
 592};
 593
 594enum qed_dev_type {
 595        QED_DEV_TYPE_BB,
 596        QED_DEV_TYPE_AH,
 597        QED_DEV_TYPE_E5,
 598};
 599
 600struct qed_dev_info {
 601        unsigned long   pci_mem_start;
 602        unsigned long   pci_mem_end;
 603        unsigned int    pci_irq;
 604        u8              num_hwfns;
 605
 606        u8              hw_mac[ETH_ALEN];
 607
 608        /* FW version */
 609        u16             fw_major;
 610        u16             fw_minor;
 611        u16             fw_rev;
 612        u16             fw_eng;
 613
 614        /* MFW version */
 615        u32             mfw_rev;
 616#define QED_MFW_VERSION_0_MASK          0x000000FF
 617#define QED_MFW_VERSION_0_OFFSET        0
 618#define QED_MFW_VERSION_1_MASK          0x0000FF00
 619#define QED_MFW_VERSION_1_OFFSET        8
 620#define QED_MFW_VERSION_2_MASK          0x00FF0000
 621#define QED_MFW_VERSION_2_OFFSET        16
 622#define QED_MFW_VERSION_3_MASK          0xFF000000
 623#define QED_MFW_VERSION_3_OFFSET        24
 624
 625        u32             flash_size;
 626        bool            b_arfs_capable;
 627        bool            b_inter_pf_switch;
 628        bool            tx_switching;
 629        bool            rdma_supported;
 630        u16             mtu;
 631
 632        bool wol_support;
 633        bool smart_an;
 634
 635        /* MBI version */
 636        u32 mbi_version;
 637#define QED_MBI_VERSION_0_MASK          0x000000FF
 638#define QED_MBI_VERSION_0_OFFSET        0
 639#define QED_MBI_VERSION_1_MASK          0x0000FF00
 640#define QED_MBI_VERSION_1_OFFSET        8
 641#define QED_MBI_VERSION_2_MASK          0x00FF0000
 642#define QED_MBI_VERSION_2_OFFSET        16
 643
 644        enum qed_dev_type dev_type;
 645
 646        /* Output parameters for qede */
 647        bool            vxlan_enable;
 648        bool            gre_enable;
 649        bool            geneve_enable;
 650
 651        u8              abs_pf_id;
 652};
 653
 654enum qed_sb_type {
 655        QED_SB_TYPE_L2_QUEUE,
 656        QED_SB_TYPE_CNQ,
 657        QED_SB_TYPE_STORAGE,
 658};
 659
 660enum qed_protocol {
 661        QED_PROTOCOL_ETH,
 662        QED_PROTOCOL_ISCSI,
 663        QED_PROTOCOL_FCOE,
 664};
 665
 666enum qed_fec_mode {
 667        QED_FEC_MODE_NONE                       = BIT(0),
 668        QED_FEC_MODE_FIRECODE                   = BIT(1),
 669        QED_FEC_MODE_RS                         = BIT(2),
 670        QED_FEC_MODE_AUTO                       = BIT(3),
 671        QED_FEC_MODE_UNSUPPORTED                = BIT(4),
 672};
 673
 674struct qed_link_params {
 675        bool                                    link_up;
 676
 677        u32                                     override_flags;
 678#define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
 679#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
 680#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
 681#define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
 682#define QED_LINK_OVERRIDE_LOOPBACK_MODE         BIT(4)
 683#define QED_LINK_OVERRIDE_EEE_CONFIG            BIT(5)
 684#define QED_LINK_OVERRIDE_FEC_CONFIG            BIT(6)
 685
 686        bool                                    autoneg;
 687        __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
 688        u32                                     forced_speed;
 689
 690        u32                                     pause_config;
 691#define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
 692#define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
 693#define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
 694
 695        u32                                     loopback_mode;
 696#define QED_LINK_LOOPBACK_NONE                  BIT(0)
 697#define QED_LINK_LOOPBACK_INT_PHY               BIT(1)
 698#define QED_LINK_LOOPBACK_EXT_PHY               BIT(2)
 699#define QED_LINK_LOOPBACK_EXT                   BIT(3)
 700#define QED_LINK_LOOPBACK_MAC                   BIT(4)
 701#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123     BIT(5)
 702#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301     BIT(6)
 703#define QED_LINK_LOOPBACK_PCS_AH_ONLY           BIT(7)
 704#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY   BIT(8)
 705#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY   BIT(9)
 706
 707        struct qed_link_eee_params              eee;
 708        u32                                     fec;
 709};
 710
 711struct qed_link_output {
 712        bool                                    link_up;
 713
 714        __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
 715        __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
 716        __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
 717
 718        u32                                     speed;     /* In Mb/s */
 719        u8                                      duplex;    /* In DUPLEX defs */
 720        u8                                      port;      /* In PORT defs */
 721        bool                                    autoneg;
 722        u32                                     pause_config;
 723
 724        /* EEE - capability & param */
 725        bool                                    eee_supported;
 726        bool                                    eee_active;
 727        u8                                      sup_caps;
 728        struct qed_link_eee_params              eee;
 729
 730        u32                                     sup_fec;
 731        u32                                     active_fec;
 732};
 733
 734struct qed_probe_params {
 735        enum qed_protocol protocol;
 736        u32 dp_module;
 737        u8 dp_level;
 738        bool is_vf;
 739        bool recov_in_prog;
 740};
 741
 742#define QED_DRV_VER_STR_SIZE 12
 743struct qed_slowpath_params {
 744        u32     int_mode;
 745        u8      drv_major;
 746        u8      drv_minor;
 747        u8      drv_rev;
 748        u8      drv_eng;
 749        u8      name[QED_DRV_VER_STR_SIZE];
 750};
 751
 752#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
 753
 754struct qed_int_info {
 755        struct msix_entry       *msix;
 756        u8                      msix_cnt;
 757
 758        /* This should be updated by the protocol driver */
 759        u8                      used_cnt;
 760};
 761
 762struct qed_generic_tlvs {
 763#define QED_TLV_IP_CSUM         BIT(0)
 764#define QED_TLV_LSO             BIT(1)
 765        u16 feat_flags;
 766#define QED_TLV_MAC_COUNT       3
 767        u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
 768};
 769
 770#define QED_I2C_DEV_ADDR_A0 0xA0
 771#define QED_I2C_DEV_ADDR_A2 0xA2
 772
 773#define QED_NVM_SIGNATURE 0x12435687
 774
 775enum qed_nvm_flash_cmd {
 776        QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
 777        QED_NVM_FLASH_CMD_FILE_START = 0x3,
 778        QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
 779        QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5,
 780        QED_NVM_FLASH_CMD_NVM_MAX,
 781};
 782
 783struct qed_common_cb_ops {
 784        void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
 785        void (*link_update)(void *dev, struct qed_link_output *link);
 786        void (*schedule_recovery_handler)(void *dev);
 787        void (*schedule_hw_err_handler)(void *dev,
 788                                        enum qed_hw_err_type err_type);
 789        void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
 790        void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
 791        void (*get_protocol_tlv_data)(void *dev, void *data);
 792        void (*bw_update)(void *dev);
 793};
 794
 795struct qed_selftest_ops {
 796/**
 797 * @brief selftest_interrupt - Perform interrupt test
 798 *
 799 * @param cdev
 800 *
 801 * @return 0 on success, error otherwise.
 802 */
 803        int (*selftest_interrupt)(struct qed_dev *cdev);
 804
 805/**
 806 * @brief selftest_memory - Perform memory test
 807 *
 808 * @param cdev
 809 *
 810 * @return 0 on success, error otherwise.
 811 */
 812        int (*selftest_memory)(struct qed_dev *cdev);
 813
 814/**
 815 * @brief selftest_register - Perform register test
 816 *
 817 * @param cdev
 818 *
 819 * @return 0 on success, error otherwise.
 820 */
 821        int (*selftest_register)(struct qed_dev *cdev);
 822
 823/**
 824 * @brief selftest_clock - Perform clock test
 825 *
 826 * @param cdev
 827 *
 828 * @return 0 on success, error otherwise.
 829 */
 830        int (*selftest_clock)(struct qed_dev *cdev);
 831
 832/**
 833 * @brief selftest_nvram - Perform nvram test
 834 *
 835 * @param cdev
 836 *
 837 * @return 0 on success, error otherwise.
 838 */
 839        int (*selftest_nvram) (struct qed_dev *cdev);
 840};
 841
 842struct qed_common_ops {
 843        struct qed_selftest_ops *selftest;
 844
 845        struct qed_dev* (*probe)(struct pci_dev *dev,
 846                                 struct qed_probe_params *params);
 847
 848        void            (*remove)(struct qed_dev *cdev);
 849
 850        int             (*set_power_state)(struct qed_dev *cdev,
 851                                           pci_power_t state);
 852
 853        void (*set_name) (struct qed_dev *cdev, char name[]);
 854
 855        /* Client drivers need to make this call before slowpath_start.
 856         * PF params required for the call before slowpath_start is
 857         * documented within the qed_pf_params structure definition.
 858         */
 859        void            (*update_pf_params)(struct qed_dev *cdev,
 860                                            struct qed_pf_params *params);
 861        int             (*slowpath_start)(struct qed_dev *cdev,
 862                                          struct qed_slowpath_params *params);
 863
 864        int             (*slowpath_stop)(struct qed_dev *cdev);
 865
 866        /* Requests to use `cnt' interrupts for fastpath.
 867         * upon success, returns number of interrupts allocated for fastpath.
 868         */
 869        int             (*set_fp_int)(struct qed_dev *cdev,
 870                                      u16 cnt);
 871
 872        /* Fills `info' with pointers required for utilizing interrupts */
 873        int             (*get_fp_int)(struct qed_dev *cdev,
 874                                      struct qed_int_info *info);
 875
 876        u32             (*sb_init)(struct qed_dev *cdev,
 877                                   struct qed_sb_info *sb_info,
 878                                   void *sb_virt_addr,
 879                                   dma_addr_t sb_phy_addr,
 880                                   u16 sb_id,
 881                                   enum qed_sb_type type);
 882
 883        u32             (*sb_release)(struct qed_dev *cdev,
 884                                      struct qed_sb_info *sb_info,
 885                                      u16 sb_id,
 886                                      enum qed_sb_type type);
 887
 888        void            (*simd_handler_config)(struct qed_dev *cdev,
 889                                               void *token,
 890                                               int index,
 891                                               void (*handler)(void *));
 892
 893        void            (*simd_handler_clean)(struct qed_dev *cdev,
 894                                              int index);
 895        int (*dbg_grc)(struct qed_dev *cdev,
 896                       void *buffer, u32 *num_dumped_bytes);
 897
 898        int (*dbg_grc_size)(struct qed_dev *cdev);
 899
 900        int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
 901
 902        int (*dbg_all_data_size) (struct qed_dev *cdev);
 903
 904/**
 905 * @brief can_link_change - can the instance change the link or not
 906 *
 907 * @param cdev
 908 *
 909 * @return true if link-change is allowed, false otherwise.
 910 */
 911        bool (*can_link_change)(struct qed_dev *cdev);
 912
 913/**
 914 * @brief set_link - set links according to params
 915 *
 916 * @param cdev
 917 * @param params - values used to override the default link configuration
 918 *
 919 * @return 0 on success, error otherwise.
 920 */
 921        int             (*set_link)(struct qed_dev *cdev,
 922                                    struct qed_link_params *params);
 923
 924/**
 925 * @brief get_link - returns the current link state.
 926 *
 927 * @param cdev
 928 * @param if_link - structure to be filled with current link configuration.
 929 */
 930        void            (*get_link)(struct qed_dev *cdev,
 931                                    struct qed_link_output *if_link);
 932
 933/**
 934 * @brief - drains chip in case Tx completions fail to arrive due to pause.
 935 *
 936 * @param cdev
 937 */
 938        int             (*drain)(struct qed_dev *cdev);
 939
 940/**
 941 * @brief update_msglvl - update module debug level
 942 *
 943 * @param cdev
 944 * @param dp_module
 945 * @param dp_level
 946 */
 947        void            (*update_msglvl)(struct qed_dev *cdev,
 948                                         u32 dp_module,
 949                                         u8 dp_level);
 950
 951        int             (*chain_alloc)(struct qed_dev *cdev,
 952                                       struct qed_chain *chain,
 953                                       struct qed_chain_init_params *params);
 954
 955        void            (*chain_free)(struct qed_dev *cdev,
 956                                      struct qed_chain *p_chain);
 957
 958/**
 959 * @brief nvm_flash - Flash nvm data.
 960 *
 961 * @param cdev
 962 * @param name - file containing the data
 963 *
 964 * @return 0 on success, error otherwise.
 965 */
 966        int (*nvm_flash)(struct qed_dev *cdev, const char *name);
 967
 968/**
 969 * @brief nvm_get_image - reads an entire image from nvram
 970 *
 971 * @param cdev
 972 * @param type - type of the request nvram image
 973 * @param buf - preallocated buffer to fill with the image
 974 * @param len - length of the allocated buffer
 975 *
 976 * @return 0 on success, error otherwise
 977 */
 978        int (*nvm_get_image)(struct qed_dev *cdev,
 979                             enum qed_nvm_images type, u8 *buf, u16 len);
 980
 981/**
 982 * @brief set_coalesce - Configure Rx coalesce value in usec
 983 *
 984 * @param cdev
 985 * @param rx_coal - Rx coalesce value in usec
 986 * @param tx_coal - Tx coalesce value in usec
 987 * @param qid - Queue index
 988 * @param sb_id - Status Block Id
 989 *
 990 * @return 0 on success, error otherwise.
 991 */
 992        int (*set_coalesce)(struct qed_dev *cdev,
 993                            u16 rx_coal, u16 tx_coal, void *handle);
 994
 995/**
 996 * @brief set_led - Configure LED mode
 997 *
 998 * @param cdev
 999 * @param mode - LED mode
1000 *
1001 * @return 0 on success, error otherwise.
1002 */
1003        int (*set_led)(struct qed_dev *cdev,
1004                       enum qed_led_mode mode);
1005
1006/**
1007 * @brief attn_clr_enable - Prevent attentions from being reasserted
1008 *
1009 * @param cdev
1010 * @param clr_enable
1011 */
1012        void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
1013
1014/**
1015 * @brief db_recovery_add - add doorbell information to the doorbell
1016 * recovery mechanism.
1017 *
1018 * @param cdev
1019 * @param db_addr - doorbell address
1020 * @param db_data - address of where db_data is stored
1021 * @param db_is_32b - doorbell is 32b pr 64b
1022 * @param db_is_user - doorbell recovery addresses are user or kernel space
1023 */
1024        int (*db_recovery_add)(struct qed_dev *cdev,
1025                               void __iomem *db_addr,
1026                               void *db_data,
1027                               enum qed_db_rec_width db_width,
1028                               enum qed_db_rec_space db_space);
1029
1030/**
1031 * @brief db_recovery_del - remove doorbell information from the doorbell
1032 * recovery mechanism. db_data serves as key (db_addr is not unique).
1033 *
1034 * @param cdev
1035 * @param db_addr - doorbell address
1036 * @param db_data - address where db_data is stored. Serves as key for the
1037 *                  entry to delete.
1038 */
1039        int (*db_recovery_del)(struct qed_dev *cdev,
1040                               void __iomem *db_addr, void *db_data);
1041
1042/**
1043 * @brief recovery_process - Trigger a recovery process
1044 *
1045 * @param cdev
1046 *
1047 * @return 0 on success, error otherwise.
1048 */
1049        int (*recovery_process)(struct qed_dev *cdev);
1050
1051/**
1052 * @brief recovery_prolog - Execute the prolog operations of a recovery process
1053 *
1054 * @param cdev
1055 *
1056 * @return 0 on success, error otherwise.
1057 */
1058        int (*recovery_prolog)(struct qed_dev *cdev);
1059
1060/**
1061 * @brief update_drv_state - API to inform the change in the driver state.
1062 *
1063 * @param cdev
1064 * @param active
1065 *
1066 */
1067        int (*update_drv_state)(struct qed_dev *cdev, bool active);
1068
1069/**
1070 * @brief update_mac - API to inform the change in the mac address
1071 *
1072 * @param cdev
1073 * @param mac
1074 *
1075 */
1076        int (*update_mac)(struct qed_dev *cdev, u8 *mac);
1077
1078/**
1079 * @brief update_mtu - API to inform the change in the mtu
1080 *
1081 * @param cdev
1082 * @param mtu
1083 *
1084 */
1085        int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
1086
1087/**
1088 * @brief update_wol - update of changes in the WoL configuration
1089 *
1090 * @param cdev
1091 * @param enabled - true iff WoL should be enabled.
1092 */
1093        int (*update_wol) (struct qed_dev *cdev, bool enabled);
1094
1095/**
1096 * @brief read_module_eeprom
1097 *
1098 * @param cdev
1099 * @param buf - buffer
1100 * @param dev_addr - PHY device memory region
1101 * @param offset - offset into eeprom contents to be read
1102 * @param len - buffer length, i.e., max bytes to be read
1103 */
1104        int (*read_module_eeprom)(struct qed_dev *cdev,
1105                                  char *buf, u8 dev_addr, u32 offset, u32 len);
1106
1107/**
1108 * @brief get_affin_hwfn_idx
1109 *
1110 * @param cdev
1111 */
1112        u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
1113
1114/**
1115 * @brief read_nvm_cfg - Read NVM config attribute value.
1116 * @param cdev
1117 * @param buf - buffer
1118 * @param cmd - NVM CFG command id
1119 * @param entity_id - Entity id
1120 *
1121 */
1122        int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
1123                            u32 entity_id);
1124/**
1125 * @brief read_nvm_cfg - Read NVM config attribute value.
1126 * @param cdev
1127 * @param cmd - NVM CFG command id
1128 *
1129 * @return config id length, 0 on error.
1130 */
1131        int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
1132
1133/**
1134 * @brief set_grc_config - Configure value for grc config id.
1135 * @param cdev
1136 * @param cfg_id - grc config id
1137 * @param val - grc config value
1138 *
1139 */
1140        int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
1141};
1142
1143#define MASK_FIELD(_name, _value) \
1144        ((_value) &= (_name ## _MASK))
1145
1146#define FIELD_VALUE(_name, _value) \
1147        ((_value & _name ## _MASK) << _name ## _SHIFT)
1148
1149#define SET_FIELD(value, name, flag)                           \
1150        do {                                                   \
1151                (value) &= ~(name ## _MASK << name ## _SHIFT); \
1152                (value) |= (((u64)flag) << (name ## _SHIFT));  \
1153        } while (0)
1154
1155#define GET_FIELD(value, name) \
1156        (((value) >> (name ## _SHIFT)) & name ## _MASK)
1157
1158#define GET_MFW_FIELD(name, field) \
1159        (((name) & (field ## _MASK)) >> (field ## _OFFSET))
1160
1161#define SET_MFW_FIELD(name, field, value)                                \
1162        do {                                                             \
1163                (name) &= ~(field ## _MASK);                             \
1164                (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
1165        } while (0)
1166
1167#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
1168
1169/* Debug print definitions */
1170#define DP_ERR(cdev, fmt, ...)                                  \
1171        do {                                                    \
1172                pr_err("[%s:%d(%s)]" fmt,                       \
1173                       __func__, __LINE__,                      \
1174                       DP_NAME(cdev) ? DP_NAME(cdev) : "",      \
1175                       ## __VA_ARGS__);                         \
1176        } while (0)
1177
1178#define DP_NOTICE(cdev, fmt, ...)                                     \
1179        do {                                                          \
1180                if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
1181                        pr_notice("[%s:%d(%s)]" fmt,                  \
1182                                  __func__, __LINE__,                 \
1183                                  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1184                                  ## __VA_ARGS__);                    \
1185                                                                      \
1186                }                                                     \
1187        } while (0)
1188
1189#define DP_INFO(cdev, fmt, ...)                                       \
1190        do {                                                          \
1191                if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
1192                        pr_notice("[%s:%d(%s)]" fmt,                  \
1193                                  __func__, __LINE__,                 \
1194                                  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1195                                  ## __VA_ARGS__);                    \
1196                }                                                     \
1197        } while (0)
1198
1199#define DP_VERBOSE(cdev, module, fmt, ...)                              \
1200        do {                                                            \
1201                if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
1202                             ((cdev)->dp_module & module))) {           \
1203                        pr_notice("[%s:%d(%s)]" fmt,                    \
1204                                  __func__, __LINE__,                   \
1205                                  DP_NAME(cdev) ? DP_NAME(cdev) : "",   \
1206                                  ## __VA_ARGS__);                      \
1207                }                                                       \
1208        } while (0)
1209
1210enum DP_LEVEL {
1211        QED_LEVEL_VERBOSE       = 0x0,
1212        QED_LEVEL_INFO          = 0x1,
1213        QED_LEVEL_NOTICE        = 0x2,
1214        QED_LEVEL_ERR           = 0x3,
1215};
1216
1217#define QED_LOG_LEVEL_SHIFT     (30)
1218#define QED_LOG_VERBOSE_MASK    (0x3fffffff)
1219#define QED_LOG_INFO_MASK       (0x40000000)
1220#define QED_LOG_NOTICE_MASK     (0x80000000)
1221
1222enum DP_MODULE {
1223        QED_MSG_SPQ     = 0x10000,
1224        QED_MSG_STATS   = 0x20000,
1225        QED_MSG_DCB     = 0x40000,
1226        QED_MSG_IOV     = 0x80000,
1227        QED_MSG_SP      = 0x100000,
1228        QED_MSG_STORAGE = 0x200000,
1229        QED_MSG_CXT     = 0x800000,
1230        QED_MSG_LL2     = 0x1000000,
1231        QED_MSG_ILT     = 0x2000000,
1232        QED_MSG_RDMA    = 0x4000000,
1233        QED_MSG_DEBUG   = 0x8000000,
1234        /* to be added...up to 0x8000000 */
1235};
1236
1237enum qed_mf_mode {
1238        QED_MF_DEFAULT,
1239        QED_MF_OVLAN,
1240        QED_MF_NPAR,
1241};
1242
1243struct qed_eth_stats_common {
1244        u64     no_buff_discards;
1245        u64     packet_too_big_discard;
1246        u64     ttl0_discard;
1247        u64     rx_ucast_bytes;
1248        u64     rx_mcast_bytes;
1249        u64     rx_bcast_bytes;
1250        u64     rx_ucast_pkts;
1251        u64     rx_mcast_pkts;
1252        u64     rx_bcast_pkts;
1253        u64     mftag_filter_discards;
1254        u64     mac_filter_discards;
1255        u64     gft_filter_drop;
1256        u64     tx_ucast_bytes;
1257        u64     tx_mcast_bytes;
1258        u64     tx_bcast_bytes;
1259        u64     tx_ucast_pkts;
1260        u64     tx_mcast_pkts;
1261        u64     tx_bcast_pkts;
1262        u64     tx_err_drop_pkts;
1263        u64     tpa_coalesced_pkts;
1264        u64     tpa_coalesced_events;
1265        u64     tpa_aborts_num;
1266        u64     tpa_not_coalesced_pkts;
1267        u64     tpa_coalesced_bytes;
1268
1269        /* port */
1270        u64     rx_64_byte_packets;
1271        u64     rx_65_to_127_byte_packets;
1272        u64     rx_128_to_255_byte_packets;
1273        u64     rx_256_to_511_byte_packets;
1274        u64     rx_512_to_1023_byte_packets;
1275        u64     rx_1024_to_1518_byte_packets;
1276        u64     rx_crc_errors;
1277        u64     rx_mac_crtl_frames;
1278        u64     rx_pause_frames;
1279        u64     rx_pfc_frames;
1280        u64     rx_align_errors;
1281        u64     rx_carrier_errors;
1282        u64     rx_oversize_packets;
1283        u64     rx_jabbers;
1284        u64     rx_undersize_packets;
1285        u64     rx_fragments;
1286        u64     tx_64_byte_packets;
1287        u64     tx_65_to_127_byte_packets;
1288        u64     tx_128_to_255_byte_packets;
1289        u64     tx_256_to_511_byte_packets;
1290        u64     tx_512_to_1023_byte_packets;
1291        u64     tx_1024_to_1518_byte_packets;
1292        u64     tx_pause_frames;
1293        u64     tx_pfc_frames;
1294        u64     brb_truncates;
1295        u64     brb_discards;
1296        u64     rx_mac_bytes;
1297        u64     rx_mac_uc_packets;
1298        u64     rx_mac_mc_packets;
1299        u64     rx_mac_bc_packets;
1300        u64     rx_mac_frames_ok;
1301        u64     tx_mac_bytes;
1302        u64     tx_mac_uc_packets;
1303        u64     tx_mac_mc_packets;
1304        u64     tx_mac_bc_packets;
1305        u64     tx_mac_ctrl_frames;
1306        u64     link_change_count;
1307};
1308
1309struct qed_eth_stats_bb {
1310        u64 rx_1519_to_1522_byte_packets;
1311        u64 rx_1519_to_2047_byte_packets;
1312        u64 rx_2048_to_4095_byte_packets;
1313        u64 rx_4096_to_9216_byte_packets;
1314        u64 rx_9217_to_16383_byte_packets;
1315        u64 tx_1519_to_2047_byte_packets;
1316        u64 tx_2048_to_4095_byte_packets;
1317        u64 tx_4096_to_9216_byte_packets;
1318        u64 tx_9217_to_16383_byte_packets;
1319        u64 tx_lpi_entry_count;
1320        u64 tx_total_collisions;
1321};
1322
1323struct qed_eth_stats_ah {
1324        u64 rx_1519_to_max_byte_packets;
1325        u64 tx_1519_to_max_byte_packets;
1326};
1327
1328struct qed_eth_stats {
1329        struct qed_eth_stats_common common;
1330
1331        union {
1332                struct qed_eth_stats_bb bb;
1333                struct qed_eth_stats_ah ah;
1334        };
1335};
1336
1337#define QED_SB_IDX              0x0002
1338
1339#define RX_PI           0
1340#define TX_PI(tc)       (RX_PI + 1 + tc)
1341
1342struct qed_sb_cnt_info {
1343        /* Original, current, and free SBs for PF */
1344        int orig;
1345        int cnt;
1346        int free_cnt;
1347
1348        /* Original, current and free SBS for child VFs */
1349        int iov_orig;
1350        int iov_cnt;
1351        int free_cnt_iov;
1352};
1353
1354static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
1355{
1356        u32 prod = 0;
1357        u16 rc = 0;
1358
1359        prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
1360               STATUS_BLOCK_E4_PROD_INDEX_MASK;
1361        if (sb_info->sb_ack != prod) {
1362                sb_info->sb_ack = prod;
1363                rc |= QED_SB_IDX;
1364        }
1365
1366        /* Let SB update */
1367        return rc;
1368}
1369
1370/**
1371 *
1372 * @brief This function creates an update command for interrupts that is
1373 *        written to the IGU.
1374 *
1375 * @param sb_info       - This is the structure allocated and
1376 *                 initialized per status block. Assumption is
1377 *                 that it was initialized using qed_sb_init
1378 * @param int_cmd       - Enable/Disable/Nop
1379 * @param upd_flg       - whether igu consumer should be
1380 *                 updated.
1381 *
1382 * @return inline void
1383 */
1384static inline void qed_sb_ack(struct qed_sb_info *sb_info,
1385                              enum igu_int_cmd int_cmd,
1386                              u8 upd_flg)
1387{
1388        u32 igu_ack;
1389
1390        igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1391                   (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1392                   (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1393                   (IGU_SEG_ACCESS_REG <<
1394                    IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1395
1396        DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
1397
1398        /* Both segments (interrupts & acks) are written to same place address;
1399         * Need to guarantee all commands will be received (in-order) by HW.
1400         */
1401        barrier();
1402}
1403
1404static inline void __internal_ram_wr(void *p_hwfn,
1405                                     void __iomem *addr,
1406                                     int size,
1407                                     u32 *data)
1408
1409{
1410        unsigned int i;
1411
1412        for (i = 0; i < size / sizeof(*data); i++)
1413                DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
1414}
1415
1416static inline void internal_ram_wr(void __iomem *addr,
1417                                   int size,
1418                                   u32 *data)
1419{
1420        __internal_ram_wr(NULL, addr, size, data);
1421}
1422
1423enum qed_rss_caps {
1424        QED_RSS_IPV4            = 0x1,
1425        QED_RSS_IPV6            = 0x2,
1426        QED_RSS_IPV4_TCP        = 0x4,
1427        QED_RSS_IPV6_TCP        = 0x8,
1428        QED_RSS_IPV4_UDP        = 0x10,
1429        QED_RSS_IPV6_UDP        = 0x20,
1430};
1431
1432#define QED_RSS_IND_TABLE_SIZE 128
1433#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
1434#endif
1435