dpdk/drivers/net/qede/base/ecore_hsi_common.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright (c) 2016 - 2018 Cavium Inc.
   3 * All rights reserved.
   4 * www.cavium.com
   5 */
   6
   7#ifndef __ECORE_HSI_COMMON__
   8#define __ECORE_HSI_COMMON__
   9/********************************/
  10/* Add include to common target */
  11/********************************/
  12#include "common_hsi.h"
  13#include "mcp_public.h"
  14
  15
  16/*
  17 * opcodes for the event ring
  18 */
  19enum common_event_opcode {
  20        COMMON_EVENT_PF_START,
  21        COMMON_EVENT_PF_STOP,
  22        COMMON_EVENT_VF_START,
  23        COMMON_EVENT_VF_STOP,
  24        COMMON_EVENT_VF_PF_CHANNEL,
  25        COMMON_EVENT_VF_FLR,
  26        COMMON_EVENT_PF_UPDATE,
  27        COMMON_EVENT_MALICIOUS_VF,
  28        COMMON_EVENT_RL_UPDATE,
  29        COMMON_EVENT_EMPTY,
  30        MAX_COMMON_EVENT_OPCODE
  31};
  32
  33
  34/*
  35 * Common Ramrod Command IDs
  36 */
  37enum common_ramrod_cmd_id {
  38        COMMON_RAMROD_UNUSED,
  39        COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
  40        COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
  41        COMMON_RAMROD_VF_START /* VF Function Start */,
  42        COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
  43        COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
  44        COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
  45        COMMON_RAMROD_EMPTY /* Empty Ramrod */,
  46        MAX_COMMON_RAMROD_CMD_ID
  47};
  48
  49
  50/*
  51 * The core storm context for the Ystorm
  52 */
  53struct ystorm_core_conn_st_ctx {
  54        __le32 reserved[4];
  55};
  56
  57/*
  58 * The core storm context for the Pstorm
  59 */
  60struct pstorm_core_conn_st_ctx {
  61        __le32 reserved[20];
  62};
  63
  64/*
  65 * Core Slowpath Connection storm context of Xstorm
  66 */
  67struct xstorm_core_conn_st_ctx {
  68        __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
  69        __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
  70/* Consolidation Ring Base Address */
  71        struct regpair consolid_base_addr;
  72        __le16 spq_cons /* SPQ Ring Consumer */;
  73        __le16 consolid_cons /* Consolidation Ring Consumer */;
  74        __le32 reserved0[55] /* Pad to 15 cycles */;
  75};
  76
  77struct xstorm_core_conn_ag_ctx {
  78        u8 reserved0 /* cdu_validation */;
  79        u8 state /* state */;
  80        u8 flags0;
  81#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
  82#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
  83#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
  84#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
  85#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
  86#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
  87#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
  88#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
  89#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
  90#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
  91/* cf_array_active */
  92#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1
  93#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
  94#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
  95#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
  96#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
  97#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
  98        u8 flags1;
  99#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
 100#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
 101#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
 102#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
 103#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
 104#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
 105#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
 106#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
 107#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
 108#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
 109#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
 110#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
 111#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
 112#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
 113#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
 114#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
 115        u8 flags2;
 116#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
 117#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
 118#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
 119#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
 120#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
 121#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
 122/* timer_stop_all */
 123#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3
 124#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
 125        u8 flags3;
 126#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
 127#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
 128#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
 129#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
 130#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
 131#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
 132#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
 133#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
 134        u8 flags4;
 135#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
 136#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
 137#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
 138#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
 139#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
 140#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
 141#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
 142#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
 143        u8 flags5;
 144#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
 145#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
 146#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
 147#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
 148#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
 149#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
 150#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
 151#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
 152        u8 flags6;
 153#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
 154#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
 155#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
 156#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
 157#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
 158#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
 159#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
 160#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
 161        u8 flags7;
 162#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
 163#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
 164#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
 165#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
 166#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
 167#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
 168#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
 169#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
 170#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
 171#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
 172        u8 flags8;
 173#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
 174#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
 175#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
 176#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
 177#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
 178#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
 179#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
 180#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
 181#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
 182#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
 183#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
 184#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
 185#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
 186#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
 187#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
 188#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
 189        u8 flags9;
 190#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
 191#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
 192#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
 193#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
 194#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
 195#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
 196#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
 197#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
 198#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
 199#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
 200#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
 201#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
 202#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
 203#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
 204/* cf_array_cf_en */
 205#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1
 206#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
 207        u8 flags10;
 208#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
 209#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
 210#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
 211#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
 212#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
 213#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
 214#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
 215#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
 216#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
 217#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
 218#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
 219#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
 220#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
 221#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
 222#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
 223#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
 224        u8 flags11;
 225#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
 226#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
 227#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
 228#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
 229#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
 230#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
 231#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
 232#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
 233#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
 234#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
 235#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
 236#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
 237#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
 238#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
 239#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
 240#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
 241        u8 flags12;
 242#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
 243#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
 244#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
 245#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
 246#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
 247#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
 248#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
 249#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
 250#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
 251#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
 252#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
 253#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
 254#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
 255#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
 256#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
 257#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
 258        u8 flags13;
 259#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
 260#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
 261#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
 262#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
 263#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
 264#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
 265#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
 266#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
 267#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
 268#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
 269#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
 270#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
 271#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
 272#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
 273#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
 274#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
 275        u8 flags14;
 276#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
 277#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
 278#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
 279#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
 280#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
 281#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
 282#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
 283#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
 284#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
 285#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
 286#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
 287#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
 288#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
 289#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
 290        u8 byte2 /* byte2 */;
 291        __le16 physical_q0 /* physical_q0 */;
 292        __le16 consolid_prod /* physical_q1 */;
 293        __le16 reserved16 /* physical_q2 */;
 294        __le16 tx_bd_cons /* word3 */;
 295        __le16 tx_bd_or_spq_prod /* word4 */;
 296        __le16 updated_qm_pq_id /* word5 */;
 297        __le16 conn_dpi /* conn_dpi */;
 298        u8 byte3 /* byte3 */;
 299        u8 byte4 /* byte4 */;
 300        u8 byte5 /* byte5 */;
 301        u8 byte6 /* byte6 */;
 302        __le32 reg0 /* reg0 */;
 303        __le32 reg1 /* reg1 */;
 304        __le32 reg2 /* reg2 */;
 305        __le32 reg3 /* reg3 */;
 306        __le32 reg4 /* reg4 */;
 307        __le32 reg5 /* cf_array0 */;
 308        __le32 reg6 /* cf_array1 */;
 309        __le16 word7 /* word7 */;
 310        __le16 word8 /* word8 */;
 311        __le16 word9 /* word9 */;
 312        __le16 word10 /* word10 */;
 313        __le32 reg7 /* reg7 */;
 314        __le32 reg8 /* reg8 */;
 315        __le32 reg9 /* reg9 */;
 316        u8 byte7 /* byte7 */;
 317        u8 byte8 /* byte8 */;
 318        u8 byte9 /* byte9 */;
 319        u8 byte10 /* byte10 */;
 320        u8 byte11 /* byte11 */;
 321        u8 byte12 /* byte12 */;
 322        u8 byte13 /* byte13 */;
 323        u8 byte14 /* byte14 */;
 324        u8 byte15 /* byte15 */;
 325        u8 e5_reserved /* e5_reserved */;
 326        __le16 word11 /* word11 */;
 327        __le32 reg10 /* reg10 */;
 328        __le32 reg11 /* reg11 */;
 329        __le32 reg12 /* reg12 */;
 330        __le32 reg13 /* reg13 */;
 331        __le32 reg14 /* reg14 */;
 332        __le32 reg15 /* reg15 */;
 333        __le32 reg16 /* reg16 */;
 334        __le32 reg17 /* reg17 */;
 335        __le32 reg18 /* reg18 */;
 336        __le32 reg19 /* reg19 */;
 337        __le16 word12 /* word12 */;
 338        __le16 word13 /* word13 */;
 339        __le16 word14 /* word14 */;
 340        __le16 word15 /* word15 */;
 341};
 342
 343struct tstorm_core_conn_ag_ctx {
 344        u8 byte0 /* cdu_validation */;
 345        u8 byte1 /* state */;
 346        u8 flags0;
 347#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
 348#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
 349#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
 350#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
 351#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1 /* bit2 */
 352#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
 353#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1 /* bit3 */
 354#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
 355#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1 /* bit4 */
 356#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
 357#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1 /* bit5 */
 358#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
 359#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
 360#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
 361        u8 flags1;
 362#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
 363#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
 364#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
 365#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
 366#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
 367#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
 368#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
 369#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
 370        u8 flags2;
 371#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
 372#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
 373#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
 374#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
 375#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3 /* cf7 */
 376#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
 377#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3 /* cf8 */
 378#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
 379        u8 flags3;
 380#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3 /* cf9 */
 381#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
 382#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3 /* cf10 */
 383#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
 384#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
 385#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
 386#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
 387#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
 388#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
 389#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
 390#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
 391#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
 392        u8 flags4;
 393#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
 394#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
 395#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
 396#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
 397#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
 398#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
 399#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1 /* cf7en */
 400#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
 401#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1 /* cf8en */
 402#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
 403#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1 /* cf9en */
 404#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
 405#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1 /* cf10en */
 406#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
 407#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
 408#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
 409        u8 flags5;
 410#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
 411#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
 412#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
 413#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
 414#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
 415#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
 416#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
 417#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
 418#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
 419#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
 420#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
 421#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
 422#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
 423#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
 424#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
 425#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
 426        __le32 reg0 /* reg0 */;
 427        __le32 reg1 /* reg1 */;
 428        __le32 reg2 /* reg2 */;
 429        __le32 reg3 /* reg3 */;
 430        __le32 reg4 /* reg4 */;
 431        __le32 reg5 /* reg5 */;
 432        __le32 reg6 /* reg6 */;
 433        __le32 reg7 /* reg7 */;
 434        __le32 reg8 /* reg8 */;
 435        u8 byte2 /* byte2 */;
 436        u8 byte3 /* byte3 */;
 437        __le16 word0 /* word0 */;
 438        u8 byte4 /* byte4 */;
 439        u8 byte5 /* byte5 */;
 440        __le16 word1 /* word1 */;
 441        __le16 word2 /* conn_dpi */;
 442        __le16 word3 /* word3 */;
 443        __le32 reg9 /* reg9 */;
 444        __le32 reg10 /* reg10 */;
 445};
 446
 447struct ustorm_core_conn_ag_ctx {
 448        u8 reserved /* cdu_validation */;
 449        u8 byte1 /* state */;
 450        u8 flags0;
 451#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
 452#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
 453#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
 454#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
 455#define USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
 456#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
 457#define USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
 458#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
 459#define USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
 460#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
 461        u8 flags1;
 462#define USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
 463#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
 464#define USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
 465#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
 466#define USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
 467#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
 468#define USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
 469#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
 470        u8 flags2;
 471#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
 472#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
 473#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
 474#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
 475#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
 476#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
 477#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
 478#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
 479#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
 480#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
 481#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
 482#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
 483#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
 484#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
 485#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
 486#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
 487        u8 flags3;
 488#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
 489#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
 490#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
 491#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
 492#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
 493#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
 494#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
 495#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
 496#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
 497#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
 498#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
 499#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
 500#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
 501#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
 502#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
 503#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
 504        u8 byte2 /* byte2 */;
 505        u8 byte3 /* byte3 */;
 506        __le16 word0 /* conn_dpi */;
 507        __le16 word1 /* word1 */;
 508        __le32 rx_producers /* reg0 */;
 509        __le32 reg1 /* reg1 */;
 510        __le32 reg2 /* reg2 */;
 511        __le32 reg3 /* reg3 */;
 512        __le16 word2 /* word2 */;
 513        __le16 word3 /* word3 */;
 514};
 515
 516/*
 517 * The core storm context for the Mstorm
 518 */
 519struct mstorm_core_conn_st_ctx {
 520        __le32 reserved[40];
 521};
 522
 523/*
 524 * The core storm context for the Ustorm
 525 */
 526struct ustorm_core_conn_st_ctx {
 527        __le32 reserved[20];
 528};
 529
 530/*
 531 * The core storm context for the Tstorm
 532 */
 533struct tstorm_core_conn_st_ctx {
 534        __le32 reserved[4];
 535};
 536
 537/*
 538 * core connection context
 539 */
 540struct core_conn_context {
 541/* ystorm storm context */
 542        struct ystorm_core_conn_st_ctx ystorm_st_context;
 543        struct regpair ystorm_st_padding[2] /* padding */;
 544/* pstorm storm context */
 545        struct pstorm_core_conn_st_ctx pstorm_st_context;
 546        struct regpair pstorm_st_padding[2] /* padding */;
 547/* xstorm storm context */
 548        struct xstorm_core_conn_st_ctx xstorm_st_context;
 549/* xstorm aggregative context */
 550        struct xstorm_core_conn_ag_ctx xstorm_ag_context;
 551/* tstorm aggregative context */
 552        struct tstorm_core_conn_ag_ctx tstorm_ag_context;
 553/* ustorm aggregative context */
 554        struct ustorm_core_conn_ag_ctx ustorm_ag_context;
 555/* mstorm storm context */
 556        struct mstorm_core_conn_st_ctx mstorm_st_context;
 557/* ustorm storm context */
 558        struct ustorm_core_conn_st_ctx ustorm_st_context;
 559        struct regpair ustorm_st_padding[2] /* padding */;
 560/* tstorm storm context */
 561        struct tstorm_core_conn_st_ctx tstorm_st_context;
 562        struct regpair tstorm_st_padding[2] /* padding */;
 563};
 564
 565
 566/*
 567 * How ll2 should deal with packet upon errors
 568 */
 569enum core_error_handle {
 570        LL2_DROP_PACKET /* If error occurs drop packet */,
 571        LL2_DO_NOTHING /* If error occurs do nothing */,
 572        LL2_ASSERT /* If error occurs assert */,
 573        MAX_CORE_ERROR_HANDLE
 574};
 575
 576
 577/*
 578 * opcodes for the event ring
 579 */
 580enum core_event_opcode {
 581        CORE_EVENT_TX_QUEUE_START,
 582        CORE_EVENT_TX_QUEUE_STOP,
 583        CORE_EVENT_RX_QUEUE_START,
 584        CORE_EVENT_RX_QUEUE_STOP,
 585        CORE_EVENT_RX_QUEUE_FLUSH,
 586        CORE_EVENT_TX_QUEUE_UPDATE,
 587        CORE_EVENT_QUEUE_STATS_QUERY,
 588        MAX_CORE_EVENT_OPCODE
 589};
 590
 591
 592/*
 593 * The L4 pseudo checksum mode for Core
 594 */
 595enum core_l4_pseudo_checksum_mode {
 596/* Pseudo Checksum on packet is calculated with the correct packet length. */
 597        CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
 598/* Pseudo Checksum on packet is calculated with zero length. */
 599        CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
 600        MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
 601};
 602
 603
 604/*
 605 * Light-L2 RX Producers in Tstorm RAM
 606 */
 607struct core_ll2_port_stats {
 608        struct regpair gsi_invalid_hdr;
 609        struct regpair gsi_invalid_pkt_length;
 610        struct regpair gsi_unsupported_pkt_typ;
 611        struct regpair gsi_crcchksm_error;
 612};
 613
 614
 615/*
 616 * LL2 TX Per Queue Stats
 617 */
 618struct core_ll2_pstorm_per_queue_stat {
 619/* number of total bytes sent without errors */
 620        struct regpair sent_ucast_bytes;
 621/* number of total bytes sent without errors */
 622        struct regpair sent_mcast_bytes;
 623/* number of total bytes sent without errors */
 624        struct regpair sent_bcast_bytes;
 625/* number of total packets sent without errors */
 626        struct regpair sent_ucast_pkts;
 627/* number of total packets sent without errors */
 628        struct regpair sent_mcast_pkts;
 629/* number of total packets sent without errors */
 630        struct regpair sent_bcast_pkts;
 631/* number of total packets dropped due to errors */
 632        struct regpair error_drop_pkts;
 633};
 634
 635
 636struct core_ll2_tstorm_per_queue_stat {
 637/* Number of packets discarded because they are bigger than MTU */
 638        struct regpair packet_too_big_discard;
 639/* Number of packets discarded due to lack of host buffers */
 640        struct regpair no_buff_discard;
 641};
 642
 643struct core_ll2_ustorm_per_queue_stat {
 644        struct regpair rcv_ucast_bytes;
 645        struct regpair rcv_mcast_bytes;
 646        struct regpair rcv_bcast_bytes;
 647        struct regpair rcv_ucast_pkts;
 648        struct regpair rcv_mcast_pkts;
 649        struct regpair rcv_bcast_pkts;
 650};
 651
 652
 653/*
 654 * Light-L2 RX Producers
 655 */
 656struct core_ll2_rx_prod {
 657        __le16 bd_prod /* BD Producer */;
 658        __le16 cqe_prod /* CQE Producer */;
 659};
 660
 661
 662
 663struct core_ll2_tx_per_queue_stat {
 664/* PSTORM per queue statistics */
 665        struct core_ll2_pstorm_per_queue_stat pstorm_stat;
 666};
 667
 668
 669
 670/*
 671 * Structure for doorbell data, in PWM mode, for RX producers update.
 672 */
 673struct core_pwm_prod_update_data {
 674        __le16 icid /* internal CID */;
 675        u8 reserved0;
 676        u8 params;
 677/* aggregative command. Set DB_AGG_CMD_SET for producer update
 678 * (use enum db_agg_cmd_sel)
 679 */
 680#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK    0x3
 681#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT   0
 682#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK  0x3F /* Set 0. */
 683#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
 684        struct core_ll2_rx_prod prod /* Producers. */;
 685};
 686
 687
 688/*
 689 * Ramrod data for rx/tx queue statistics query ramrod
 690 */
 691struct core_queue_stats_query_ramrod_data {
 692        u8 rx_stat /* If set, collect RX queue statistics. */;
 693        u8 tx_stat /* If set, collect TX queue statistics. */;
 694        __le16 reserved[3];
 695/* Address of RX statistic buffer. core_ll2_rx_per_queue_stat struct will be
 696 * write to this address.
 697 */
 698        struct regpair rx_stat_addr;
 699/* Address of TX statistic buffer. core_ll2_tx_per_queue_stat struct will be
 700 * write to this address.
 701 */
 702        struct regpair tx_stat_addr;
 703};
 704
 705
 706/*
 707 * Core Ramrod Command IDs (light L2)
 708 */
 709enum core_ramrod_cmd_id {
 710        CORE_RAMROD_UNUSED,
 711        CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
 712        CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
 713        CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
 714        CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
 715        CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
 716        CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
 717        CORE_RAMROD_QUEUE_STATS_QUERY /* Queue Statist Query Ramrod */,
 718        MAX_CORE_RAMROD_CMD_ID
 719};
 720
 721
 722/*
 723 * Core RX CQE Type for Light L2
 724 */
 725enum core_roce_flavor_type {
 726        CORE_ROCE,
 727        CORE_RROCE,
 728        MAX_CORE_ROCE_FLAVOR_TYPE
 729};
 730
 731
 732/*
 733 * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
 734 */
 735struct core_rx_action_on_error {
 736        u8 error_type;
 737/* ll2 how to handle error packet_too_big (use enum core_error_handle) */
 738#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK  0x3
 739#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
 740/* ll2 how to handle error with no_buff  (use enum core_error_handle) */
 741#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK         0x3
 742#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT        2
 743#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK        0xF
 744#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT       4
 745};
 746
 747
 748/*
 749 * Core RX BD for Light L2
 750 */
 751struct core_rx_bd {
 752        struct regpair addr;
 753        __le16 reserved[4];
 754};
 755
 756
 757/*
 758 * Core RX CM offload BD for Light L2
 759 */
 760struct core_rx_bd_with_buff_len {
 761        struct regpair addr;
 762        __le16 buff_length;
 763        __le16 reserved[3];
 764};
 765
 766/*
 767 * Core RX CM offload BD for Light L2
 768 */
 769union core_rx_bd_union {
 770        struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
 771/* Core Rx Bd with dynamic buffer length */
 772        struct core_rx_bd_with_buff_len rx_bd_with_len;
 773};
 774
 775
 776
 777/*
 778 * Opaque Data for Light L2 RX CQE .
 779 */
 780struct core_rx_cqe_opaque_data {
 781        __le32 data[2] /* Opaque CQE Data */;
 782};
 783
 784
 785/*
 786 * Core RX CQE Type for Light L2
 787 */
 788enum core_rx_cqe_type {
 789        CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
 790        CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
 791        CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
 792        CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
 793        MAX_CORE_RX_CQE_TYPE
 794};
 795
 796
 797/*
 798 * Core RX CQE for Light L2 .
 799 */
 800struct core_rx_fast_path_cqe {
 801        u8 type /* CQE type */;
 802/* Offset (in bytes) of the packet from start of the buffer */
 803        u8 placement_offset;
 804/* Parsing and error flags from the parser */
 805        struct parsing_and_err_flags parse_flags;
 806        __le16 packet_length /* Total packet length (from the parser) */;
 807        __le16 vlan /* 802.1q VLAN tag */;
 808        struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
 809/* bit- map: each bit represents a specific error. errors indications are
 810 * provided by the cracker. see spec for detailed description
 811 */
 812        struct parsing_err_flags err_flags;
 813        __le16 reserved0;
 814        __le32 reserved1[3];
 815};
 816
 817/*
 818 * Core Rx CM offload CQE .
 819 */
 820struct core_rx_gsi_offload_cqe {
 821        u8 type /* CQE type */;
 822        u8 data_length_error /* set if gsi data is bigger than buff */;
 823/* Parsing and error flags from the parser */
 824        struct parsing_and_err_flags parse_flags;
 825        __le16 data_length /* Total packet length (from the parser) */;
 826        __le16 vlan /* 802.1q VLAN tag */;
 827        __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
 828        __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
 829/* These are the lower 16 bit of QP id in RoCE BTH header */
 830        __le16 qp_id;
 831        __le32 src_qp /* Source QP from DETH header */;
 832        struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
 833        __le32 reserved;
 834};
 835
 836/*
 837 * Core RX CQE for Light L2 .
 838 */
 839struct core_rx_slow_path_cqe {
 840        u8 type /* CQE type */;
 841        u8 ramrod_cmd_id;
 842        __le16 echo;
 843        struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
 844        __le32 reserved1[5];
 845};
 846
 847/*
 848 * Core RX CM offload BD for Light L2
 849 */
 850union core_rx_cqe_union {
 851        struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
 852        struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
 853        struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
 854};
 855
 856
 857
 858
 859
 860/*
 861 * Ramrod data for rx queue start ramrod
 862 */
 863struct core_rx_start_ramrod_data {
 864        struct regpair bd_base /* Address of the first BD page */;
 865        struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
 866        __le16 mtu /* MTU */;
 867        __le16 sb_id /* Status block ID */;
 868        u8 sb_index /* Status block index */;
 869        u8 complete_cqe_flg /* if set - post completion to the CQE ring */;
 870        u8 complete_event_flg /* if set - post completion to the event ring */;
 871        u8 drop_ttl0_flg /* if set - drop packet with ttl=0 */;
 872        __le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
 873/* if set - 802.1q tag will be removed and copied to CQE */
 874        u8 inner_vlan_stripping_en;
 875/* if set - outer tag wont be stripped, valid only in MF OVLAN mode. */
 876        u8 outer_vlan_stripping_dis;
 877        u8 queue_id /* Light L2 RX Queue ID */;
 878        u8 main_func_queue /* Set if this is the main PFs LL2 queue */;
 879/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
 880 * main_func_queue is set.
 881 */
 882        u8 mf_si_bcast_accept_all;
 883/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
 884 * main_func_queue is set.
 885 */
 886        u8 mf_si_mcast_accept_all;
 887/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
 888 * zero out, used for TenantDcb
 889 */
 890/* Specifies how ll2 should deal with RX packets errors */
 891        struct core_rx_action_on_error action_on_error;
 892        u8 gsi_offload_flag /* set for GSI offload mode */;
 893/* If set, queue is subject for RX VFC classification. */
 894        u8 vport_id_valid;
 895        u8 vport_id /* Queue VPORT for RX VFC classification. */;
 896        u8 zero_prod_flg /* If set, zero RX producers. */;
 897/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
 898 * zero out, used for TenantDcb
 899 */
 900        u8 wipe_inner_vlan_pri_en;
 901        u8 reserved[2];
 902};
 903
 904
 905/*
 906 * Ramrod data for rx queue stop ramrod
 907 */
 908struct core_rx_stop_ramrod_data {
 909        u8 complete_cqe_flg /* post completion to the CQE ring if set */;
 910        u8 complete_event_flg /* post completion to the event ring if set */;
 911        u8 queue_id /* Light L2 RX Queue ID */;
 912        u8 reserved1;
 913        __le16 reserved2[2];
 914};
 915
 916
 917/*
 918 * Flags for Core TX BD
 919 */
 920struct core_tx_bd_data {
 921        __le16 as_bitfield;
 922/* Do not allow additional VLAN manipulations on this packet (DCB) */
 923#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK         0x1
 924#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT        0
 925/* Insert VLAN into packet. Cannot be set for LB packets
 926 * (tx_dst == CORE_TX_DEST_LB)
 927 */
 928#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK          0x1
 929#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT         1
 930/* This is the first BD of the packet (for debug) */
 931#define CORE_TX_BD_DATA_START_BD_MASK                0x1
 932#define CORE_TX_BD_DATA_START_BD_SHIFT               2
 933/* Calculate the IP checksum for the packet */
 934#define CORE_TX_BD_DATA_IP_CSUM_MASK                 0x1
 935#define CORE_TX_BD_DATA_IP_CSUM_SHIFT                3
 936/* Calculate the L4 checksum for the packet */
 937#define CORE_TX_BD_DATA_L4_CSUM_MASK                 0x1
 938#define CORE_TX_BD_DATA_L4_CSUM_SHIFT                4
 939/* Packet is IPv6 with extensions */
 940#define CORE_TX_BD_DATA_IPV6_EXT_MASK                0x1
 941#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT               5
 942/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
 943 * 0-TCP, 1-UDP
 944 */
 945#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK             0x1
 946#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT            6
 947/* The pseudo checksum mode to place in the L4 checksum field. Required only
 948 * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
 949 */
 950#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK     0x1
 951#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT    7
 952/* Number of BDs that make up one packet - width wide enough to present
 953 * CORE_LL2_TX_MAX_BDS_PER_PACKET
 954 */
 955#define CORE_TX_BD_DATA_NBDS_MASK                    0xF
 956#define CORE_TX_BD_DATA_NBDS_SHIFT                   8
 957/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
 958 * connType is ROCE (use enum core_roce_flavor_type)
 959 */
 960#define CORE_TX_BD_DATA_ROCE_FLAV_MASK               0x1
 961#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT              12
 962/* Calculate ip length */
 963#define CORE_TX_BD_DATA_IP_LEN_MASK                  0x1
 964#define CORE_TX_BD_DATA_IP_LEN_SHIFT                 13
 965/* disables the STAG insertion, relevant only in MF OVLAN mode. */
 966#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK  0x1
 967#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
 968#define CORE_TX_BD_DATA_RESERVED0_MASK               0x1
 969#define CORE_TX_BD_DATA_RESERVED0_SHIFT              15
 970};
 971
 972/*
 973 * Core TX BD for Light L2
 974 */
 975struct core_tx_bd {
 976        struct regpair addr /* Buffer Address */;
 977        __le16 nbytes /* Number of Bytes in Buffer */;
 978/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
 979 * packets: echo data to pass to Rx
 980 */
 981        __le16 nw_vlan_or_lb_echo;
 982        struct core_tx_bd_data bd_data /* BD Flags */;
 983        __le16 bitfield1;
 984/* L4 Header Offset from start of packet (in Words). This is needed if both
 985 * l4_csum and ipv6_ext are set
 986 */
 987#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK  0x3FFF
 988#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
 989/* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
 990#define CORE_TX_BD_TX_DST_MASK           0x3
 991#define CORE_TX_BD_TX_DST_SHIFT          14
 992};
 993
 994
 995
 996/*
 997 * Light L2 TX Destination
 998 */
 999enum core_tx_dest {
1000        CORE_TX_DEST_NW /* TX Destination to the Network */,
1001        CORE_TX_DEST_LB /* TX Destination to the Loopback */,
1002        CORE_TX_DEST_RESERVED,
1003        CORE_TX_DEST_DROP /* TX Drop */,
1004        MAX_CORE_TX_DEST
1005};
1006
1007
1008/*
1009 * Ramrod data for tx queue start ramrod
1010 */
1011struct core_tx_start_ramrod_data {
1012        struct regpair pbl_base_addr /* Address of the pbl page */;
1013        __le16 mtu /* Maximum transmission unit */;
1014        __le16 sb_id /* Status block ID */;
1015        u8 sb_index /* Status block protocol index */;
1016        u8 stats_en /* Statistics Enable */;
1017        u8 stats_id /* Statistics Counter ID */;
1018        u8 conn_type /* connection type that loaded ll2 */;
1019        __le16 pbl_size /* Number of BD pages pointed by PBL */;
1020        __le16 qm_pq_id /* QM PQ ID */;
1021        u8 gsi_offload_flag /* set for GSI offload mode */;
1022        u8 ctx_stats_en /* Context statistics enable */;
1023/* If set, queue is part of VPORT and subject for TX switching. */
1024        u8 vport_id_valid;
1025/* vport id of the current connection, used to access non_rdma_in_to_in_pri_map
1026 * which is per vport
1027 */
1028        u8 vport_id;
1029};
1030
1031
1032/*
1033 * Ramrod data for tx queue stop ramrod
1034 */
1035struct core_tx_stop_ramrod_data {
1036        __le32 reserved0[2];
1037};
1038
1039
1040/*
1041 * Ramrod data for tx queue update ramrod
1042 */
1043struct core_tx_update_ramrod_data {
1044        u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
1045        u8 reserved0;
1046        __le16 qm_pq_id /* Updated QM PQ ID */;
1047        __le32 reserved1[1];
1048};
1049
1050
1051/*
1052 * Enum flag for what type of dcb data to update
1053 */
1054enum dcb_dscp_update_mode {
1055/* use when no change should be done to DCB data */
1056        DONT_UPDATE_DCB_DSCP,
1057        UPDATE_DCB /* use to update only L2 (vlan) priority */,
1058        UPDATE_DSCP /* use to update only IP DSCP */,
1059        UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
1060        MAX_DCB_DSCP_UPDATE_FLAG
1061};
1062
1063
1064struct eth_mstorm_per_pf_stat {
1065        struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
1066        struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
1067        struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
1068        struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
1069};
1070
1071
1072struct eth_mstorm_per_queue_stat {
1073/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
1074        struct regpair ttl0_discard;
1075/* Number of packets discarded because they are bigger than MTU */
1076        struct regpair packet_too_big_discard;
1077/* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */
1078        struct regpair no_buff_discard;
1079/* Number of packets discarded because of no active Rx connection */
1080        struct regpair not_active_discard;
1081/* number of coalesced packets in all TPA aggregations */
1082        struct regpair tpa_coalesced_pkts;
1083/* total number of TPA aggregations */
1084        struct regpair tpa_coalesced_events;
1085/* number of aggregations, which abnormally ended */
1086        struct regpair tpa_aborts_num;
1087/* total TCP payload length in all TPA aggregations */
1088        struct regpair tpa_coalesced_bytes;
1089};
1090
1091
1092/*
1093 * Ethernet TX Per PF
1094 */
1095struct eth_pstorm_per_pf_stat {
1096/* number of total ucast bytes sent on loopback port without errors */
1097        struct regpair sent_lb_ucast_bytes;
1098/* number of total mcast bytes sent on loopback port without errors */
1099        struct regpair sent_lb_mcast_bytes;
1100/* number of total bcast bytes sent on loopback port without errors */
1101        struct regpair sent_lb_bcast_bytes;
1102/* number of total ucast packets sent on loopback port without errors */
1103        struct regpair sent_lb_ucast_pkts;
1104/* number of total mcast packets sent on loopback port without errors */
1105        struct regpair sent_lb_mcast_pkts;
1106/* number of total bcast packets sent on loopback port without errors */
1107        struct regpair sent_lb_bcast_pkts;
1108        struct regpair sent_gre_bytes /* Sent GRE bytes */;
1109        struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
1110        struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
1111        struct regpair sent_mpls_bytes /* Sent MPLS bytes */;
1112        struct regpair sent_gre_mpls_bytes /* Sent GRE MPLS bytes (E5 Only) */;
1113        struct regpair sent_udp_mpls_bytes /* Sent GRE MPLS bytes (E5 Only) */;
1114        struct regpair sent_gre_pkts /* Sent GRE packets (E5 Only) */;
1115        struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
1116        struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
1117        struct regpair sent_mpls_pkts /* Sent MPLS packets (E5 Only) */;
1118        struct regpair sent_gre_mpls_pkts /* Sent GRE MPLS packets (E5 Only) */;
1119        struct regpair sent_udp_mpls_pkts /* Sent UDP MPLS packets (E5 Only) */;
1120        struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
1121        struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
1122        struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
1123        struct regpair mpls_drop_pkts /* Dropped MPLS TX packets (E5 Only) */;
1124/* Dropped GRE MPLS TX packets (E5 Only) */
1125        struct regpair gre_mpls_drop_pkts;
1126/* Dropped UDP MPLS TX packets (E5 Only) */
1127        struct regpair udp_mpls_drop_pkts;
1128};
1129
1130
1131/*
1132 * Ethernet TX Per Queue Stats
1133 */
1134struct eth_pstorm_per_queue_stat {
1135/* number of total bytes sent without errors */
1136        struct regpair sent_ucast_bytes;
1137/* number of total bytes sent without errors */
1138        struct regpair sent_mcast_bytes;
1139/* number of total bytes sent without errors */
1140        struct regpair sent_bcast_bytes;
1141/* number of total packets sent without errors */
1142        struct regpair sent_ucast_pkts;
1143/* number of total packets sent without errors */
1144        struct regpair sent_mcast_pkts;
1145/* number of total packets sent without errors */
1146        struct regpair sent_bcast_pkts;
1147/* number of total packets dropped due to errors */
1148        struct regpair error_drop_pkts;
1149};
1150
1151
1152/*
1153 * ETH Rx producers data
1154 */
1155struct eth_rx_rate_limit {
1156/* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
1157        __le16 mult;
1158/* Constant term to add (or subtract from number of cycles) */
1159        __le16 cnst;
1160        u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
1161        u8 reserved0;
1162        __le16 reserved1;
1163};
1164
1165
1166/* Update RSS indirection table entry command. One outstanding command supported
1167 * per PF.
1168 */
1169struct eth_tstorm_rss_update_data {
1170/* Valid flag. Driver must set this flag, FW clear valid flag when ready for new
1171 * RSS update command.
1172 */
1173        u8 valid;
1174/* Global VPORT ID. If RSS is disable for VPORT, RSS update command will be
1175 * ignored.
1176 */
1177        u8 vport_id;
1178        u8 ind_table_index /* RSS indirect table index that will be updated. */;
1179        u8 reserved;
1180        __le16 ind_table_value /* RSS indirect table new value. */;
1181        __le16 reserved1 /* reserved. */;
1182};
1183
1184
1185struct eth_ustorm_per_pf_stat {
1186/* number of total ucast bytes received on loopback port without errors */
1187        struct regpair rcv_lb_ucast_bytes;
1188/* number of total mcast bytes received on loopback port without errors */
1189        struct regpair rcv_lb_mcast_bytes;
1190/* number of total bcast bytes received on loopback port without errors */
1191        struct regpair rcv_lb_bcast_bytes;
1192/* number of total ucast packets received on loopback port without errors */
1193        struct regpair rcv_lb_ucast_pkts;
1194/* number of total mcast packets received on loopback port without errors */
1195        struct regpair rcv_lb_mcast_pkts;
1196/* number of total bcast packets received on loopback port without errors */
1197        struct regpair rcv_lb_bcast_pkts;
1198        struct regpair rcv_gre_bytes /* Received GRE bytes */;
1199        struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
1200        struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
1201        struct regpair rcv_gre_pkts /* Received GRE packets */;
1202        struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
1203        struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
1204};
1205
1206
1207struct eth_ustorm_per_queue_stat {
1208        struct regpair rcv_ucast_bytes;
1209        struct regpair rcv_mcast_bytes;
1210        struct regpair rcv_bcast_bytes;
1211        struct regpair rcv_ucast_pkts;
1212        struct regpair rcv_mcast_pkts;
1213        struct regpair rcv_bcast_pkts;
1214};
1215
1216
1217/*
1218 * Event Ring VF-PF Channel data
1219 */
1220struct vf_pf_channel_eqe_data {
1221        struct regpair msg_addr /* VF-PF message address */;
1222};
1223
1224/*
1225 * Event Ring malicious VF data
1226 */
1227struct malicious_vf_eqe_data {
1228        u8 vf_id /* Malicious VF ID */;
1229        u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
1230        __le16 reserved[3];
1231};
1232
1233/*
1234 * Event Ring initial cleanup data
1235 */
1236struct initial_cleanup_eqe_data {
1237        u8 vf_id /* VF ID */;
1238        u8 reserved[7];
1239};
1240
1241/*
1242 * Event Data Union
1243 */
1244union event_ring_data {
1245        u8 bytes[8] /* Byte Array */;
1246        struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
1247        struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
1248/* Dedicated fields to iscsi connect done results */
1249        struct iscsi_connect_done_results iscsi_conn_done_info;
1250        union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */;
1251        struct nvmf_eqe_data nvmf_data /* Dedicated field for NVMf data */;
1252        struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
1253/* VF Initial Cleanup data */
1254        struct initial_cleanup_eqe_data vf_init_cleanup;
1255};
1256
1257
1258/*
1259 * Event Ring Entry
1260 */
1261struct event_ring_entry {
1262        u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
1263        u8 opcode /* Event Opcode (Per Protocol Type) */;
1264        u8 reserved0 /* Reserved */;
1265        u8 vfId /* vfId for this event, 0xFF if this is a PF event */;
1266        __le16 echo /* Echo value from ramrod data on the host */;
1267/* FW return code for SP ramrods. Use (according to protocol) eth_return_code,
1268 * or rdma_fw_return_code, or fcoe_completion_status
1269 */
1270        u8 fw_return_code;
1271        u8 flags;
1272/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
1273#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
1274#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
1275#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
1276#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
1277        union event_ring_data data;
1278};
1279
1280/*
1281 * Event Ring Next Page Address
1282 */
1283struct event_ring_next_addr {
1284        struct regpair addr /* Next Page Address */;
1285        __le32 reserved[2] /* Reserved */;
1286};
1287
1288/*
1289 * Event Ring Element
1290 */
1291union event_ring_element {
1292        struct event_ring_entry entry /* Event Ring Entry */;
1293/* Event Ring Next Page Address */
1294        struct event_ring_next_addr next_addr;
1295};
1296
1297
1298
1299/*
1300 * Ports mode
1301 */
1302enum fw_flow_ctrl_mode {
1303        flow_ctrl_pause,
1304        flow_ctrl_pfc,
1305        MAX_FW_FLOW_CTRL_MODE
1306};
1307
1308
1309/*
1310 * GFT profile type.
1311 */
1312enum gft_profile_type {
1313/* tunnel type, inner 4 tuple, IP type and L4 type match. */
1314        GFT_PROFILE_TYPE_4_TUPLE,
1315/* tunnel type, inner L4 destination port, IP type and L4 type match. */
1316        GFT_PROFILE_TYPE_L4_DST_PORT,
1317/* tunnel type, inner IP destination address and IP type match. */
1318        GFT_PROFILE_TYPE_IP_DST_ADDR,
1319/* tunnel type, inner IP source address and IP type match. */
1320        GFT_PROFILE_TYPE_IP_SRC_ADDR,
1321        GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
1322        MAX_GFT_PROFILE_TYPE
1323};
1324
1325
1326/*
1327 * Major and Minor hsi Versions
1328 */
1329struct hsi_fp_ver_struct {
1330        u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
1331        u8 major_ver_arr[2] /* Major Version of driver loading pf */;
1332};
1333
1334
1335/*
1336 * Integration Phase
1337 */
1338enum integ_phase {
1339        INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
1340        INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
1341        INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
1342        MAX_INTEG_PHASE
1343};
1344
1345
1346/*
1347 * Ports mode
1348 */
1349enum iwarp_ll2_tx_queues {
1350/* LL2 queue for OOO packets sent in-order by the driver */
1351        IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
1352/* LL2 queue for unaligned packets sent aligned by the driver */
1353        IWARP_LL2_ALIGNED_TX_QUEUE,
1354/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
1355 * driver
1356 */
1357        IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
1358        IWARP_LL2_ERROR /* Error indication */,
1359        MAX_IWARP_LL2_TX_QUEUES
1360};
1361
1362
1363/*
1364 * Malicious VF error ID
1365 */
1366enum malicious_vf_error_id {
1367        MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
1368/* Writing to VF/PF channel when it is not ready */
1369        VF_PF_CHANNEL_NOT_READY,
1370        VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
1371        VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
1372/* TX packet is shorter then reported on BDs or from minimal size */
1373        ETH_PACKET_TOO_SMALL,
1374/* Tx packet with marked as insert VLAN when its illegal */
1375        ETH_ILLEGAL_VLAN_MODE,
1376        ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
1377/* TX packet has illegal inband tags marked */
1378        ETH_ILLEGAL_INBAND_TAGS,
1379/* Vlan cant be added to inband tag */
1380        ETH_VLAN_INSERT_AND_INBAND_VLAN,
1381/* indicated number of BDs for the packet is illegal */
1382        ETH_ILLEGAL_NBDS,
1383        ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
1384/* There are not enough BDs for transmission of even one packet */
1385        ETH_INSUFFICIENT_BDS,
1386        ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
1387        ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
1388/* empty BD (which not contains control flags) is illegal  */
1389        ETH_ZERO_SIZE_BD,
1390        ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit  */,
1391/* In LSO its expected that on the local BD ring there will be at least MSS
1392 * bytes of data
1393 */
1394        ETH_INSUFFICIENT_PAYLOAD,
1395        ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
1396/* Tunneled packet with IPv6+Ext without a proper number of BDs */
1397        ETH_TUNN_IPV6_EXT_NBD_ERR,
1398        ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
1399        ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
1400/* packet scanned is too large (can be 9700 at most) */
1401        ETH_PACKET_SIZE_TOO_LARGE,
1402/* Tx packet with marked as insert VLAN when its illegal */
1403        CORE_ILLEGAL_VLAN_MODE,
1404/* indicated number of BDs for the packet is illegal */
1405        CORE_ILLEGAL_NBDS,
1406        CORE_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
1407/* There are not enough BDs for transmission of even one packet */
1408        CORE_INSUFFICIENT_BDS,
1409/* TX packet is shorter then reported on BDs or from minimal size */
1410        CORE_PACKET_TOO_SMALL,
1411        CORE_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
1412        CORE_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
1413        CORE_MTU_VIOLATION /* TX packet is greater then MTU */,
1414        CORE_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
1415        CORE_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
1416        MAX_MALICIOUS_VF_ERROR_ID
1417};
1418
1419
1420
1421/*
1422 * Mstorm non-triggering VF zone
1423 */
1424struct mstorm_non_trigger_vf_zone {
1425/* VF statistic bucket */
1426        struct eth_mstorm_per_queue_stat eth_queue_stat;
1427/* VF RX queues producers */
1428        struct eth_rx_prod_data
1429                eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
1430};
1431
1432
1433/*
1434 * Mstorm VF zone
1435 */
1436struct mstorm_vf_zone {
1437/* non-interrupt-triggering zone */
1438        struct mstorm_non_trigger_vf_zone non_trigger;
1439};
1440
1441
1442/*
1443 * vlan header including TPID and TCI fields
1444 */
1445struct vlan_header {
1446        __le16 tpid /* Tag Protocol Identifier */;
1447        __le16 tci /* Tag Control Information */;
1448};
1449
1450/*
1451 * outer tag configurations
1452 */
1453struct outer_tag_config_struct {
1454/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
1455 * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
1456 * else - 0.
1457 */
1458        u8 enable_stag_pri_change;
1459/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
1460        u8 pri_map_valid;
1461        u8 reserved[2];
1462/* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol
1463 * identifier and outer tag control information
1464 */
1465        struct vlan_header outer_tag;
1466/* Map from inner to outer priority. Set pri_map_valid when init map */
1467        u8 inner_to_outer_pri_map[8];
1468};
1469
1470
1471/*
1472 * personality per PF
1473 */
1474enum personality_type {
1475        BAD_PERSONALITY_TYP,
1476        PERSONALITY_ISCSI /* iSCSI and LL2 */,
1477        PERSONALITY_FCOE /* Fcoe and LL2 */,
1478        PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
1479        PERSONALITY_RDMA /* Roce and LL2 */,
1480        PERSONALITY_CORE /* CORE(LL2) */,
1481        PERSONALITY_ETH /* Ethernet */,
1482        PERSONALITY_TOE /* Toe and LL2 */,
1483        MAX_PERSONALITY_TYPE
1484};
1485
1486
1487/*
1488 * tunnel configuration
1489 */
1490struct pf_start_tunnel_config {
1491/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
1492 * FW will use a default port
1493 */
1494        u8 set_vxlan_udp_port_flg;
1495/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
1496 * FW will use a default port
1497 */
1498        u8 set_geneve_udp_port_flg;
1499/* Set no-innet-L2 VXLAN tunnel UDP destination port to
1500 * no_inner_l2_vxlan_udp_port. If not set - FW will use a default port
1501 */
1502        u8 set_no_inner_l2_vxlan_udp_port_flg;
1503        u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
1504/* Rx classification scheme for l2 GENEVE tunnel. */
1505        u8 tunnel_clss_l2geneve;
1506/* Rx classification scheme for ip GENEVE tunnel. */
1507        u8 tunnel_clss_ipgeneve;
1508        u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
1509        u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
1510/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
1511        __le16 vxlan_udp_port;
1512/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
1513        __le16 geneve_udp_port;
1514/* no-innet-L2 VXLAN  tunnel UDP destination port. Valid if
1515 * set_no_inner_l2_vxlan_udp_port_flg=1
1516 */
1517        __le16 no_inner_l2_vxlan_udp_port;
1518        __le16 reserved[3];
1519};
1520
1521/*
1522 * Ramrod data for PF start ramrod
1523 */
1524struct pf_start_ramrod_data {
1525        struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
1526/* PBL address of consolidation queue */
1527        struct regpair consolid_q_pbl_addr;
1528/* tunnel configuration. */
1529        struct pf_start_tunnel_config tunnel_config;
1530        __le16 event_ring_sb_id /* Status block ID */;
1531/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
1532        u8 base_vf_id;
1533        u8 num_vfs /* Amount of vfs owned by PF */;
1534        u8 event_ring_num_pages /* Number of PBL pages in event ring */;
1535        u8 event_ring_sb_index /* Status block index */;
1536        u8 path_id /* HW path ID (engine ID) */;
1537        u8 warning_as_error /* In FW asserts, treat warning as error */;
1538/* If not set - throw a warning for each ramrod (for debug) */
1539        u8 dont_log_ramrods;
1540        u8 personality /* define what type of personality is new PF */;
1541/* Log type mask. Each bit set enables a corresponding event type logging.
1542 * Event types are defined as ASSERT_LOG_TYPE_xxx
1543 */
1544        __le16 log_type_mask;
1545        u8 mf_mode /* Multi function mode */;
1546        u8 integ_phase /* Integration phase */;
1547/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
1548        u8 allow_npar_tx_switching;
1549        u8 reserved0;
1550/* FP HSI version to be used by FW */
1551        struct hsi_fp_ver_struct hsi_fp_ver;
1552/* Outer tag configurations */
1553        struct outer_tag_config_struct outer_tag_config;
1554};
1555
1556
1557
1558/*
1559 * Per protocol DCB data
1560 */
1561struct protocol_dcb_data {
1562        u8 dcb_enable_flag /* Enable DCB */;
1563        u8 dscp_enable_flag /* Enable updating DSCP value */;
1564        u8 dcb_priority /* DCB priority */;
1565        u8 dcb_tc /* DCB TC */;
1566        u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
1567/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged
1568 * frames
1569 */
1570        u8 dcb_dont_add_vlan0;
1571};
1572
1573/*
1574 * Update tunnel configuration
1575 */
1576struct pf_update_tunnel_config {
1577/* Update RX per PF tunnel classification scheme. */
1578        u8 update_rx_pf_clss;
1579/* Update per PORT default tunnel RX classification scheme for traffic with
1580 * unknown unicast outer MAC in NPAR mode.
1581 */
1582        u8 update_rx_def_ucast_clss;
1583/* Update per PORT default tunnel RX classification scheme for traffic with non
1584 * unicast outer MAC in NPAR mode.
1585 */
1586        u8 update_rx_def_non_ucast_clss;
1587/* Update VXLAN tunnel UDP destination port. */
1588        u8 set_vxlan_udp_port_flg;
1589/* Update GENEVE tunnel UDP destination port. */
1590        u8 set_geneve_udp_port_flg;
1591/* Update no-innet-L2 VXLAN  tunnel UDP destination port. */
1592        u8 set_no_inner_l2_vxlan_udp_port_flg;
1593        u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
1594/* Classification scheme for l2 GENEVE tunnel. */
1595        u8 tunnel_clss_l2geneve;
1596/* Classification scheme for ip GENEVE tunnel. */
1597        u8 tunnel_clss_ipgeneve;
1598        u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
1599        u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
1600        u8 reserved;
1601        __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
1602        __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
1603/* no-innet-L2 VXLAN  tunnel UDP destination port. */
1604        __le16 no_inner_l2_vxlan_udp_port;
1605        __le16 reserved1[3];
1606};
1607
1608/*
1609 * Data for port update ramrod
1610 */
1611struct pf_update_ramrod_data {
1612/* Update Eth DCB  data indication (use enum dcb_dscp_update_mode) */
1613        u8 update_eth_dcb_data_mode;
1614/* Update FCOE DCB  data indication (use enum dcb_dscp_update_mode) */
1615        u8 update_fcoe_dcb_data_mode;
1616/* Update iSCSI DCB  data indication (use enum dcb_dscp_update_mode) */
1617        u8 update_iscsi_dcb_data_mode;
1618        u8 update_roce_dcb_data_mode /* Update ROCE DCB  data indication */;
1619/* Update RROCE (RoceV2) DCB  data indication */
1620        u8 update_rroce_dcb_data_mode;
1621        u8 update_iwarp_dcb_data_mode /* Update IWARP DCB  data indication */;
1622        u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
1623/* Update Enable STAG Priority Change indication */
1624        u8 update_enable_stag_pri_change;
1625        struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
1626        struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
1627/* core iscsi related fields */
1628        struct protocol_dcb_data iscsi_dcb_data;
1629        struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
1630/* core roce related fields */
1631        struct protocol_dcb_data rroce_dcb_data;
1632/* core iwarp related fields */
1633        struct protocol_dcb_data iwarp_dcb_data;
1634        __le16 mf_vlan /* new outer vlan id value */;
1635/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
1636 * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
1637 * else - 0
1638 */
1639        u8 enable_stag_pri_change;
1640        u8 reserved;
1641/* tunnel configuration. */
1642        struct pf_update_tunnel_config tunnel_config;
1643};
1644
1645
1646
1647/*
1648 * Ports mode
1649 */
1650enum ports_mode {
1651        ENGX2_PORTX1 /* 2 engines x 1 port */,
1652        ENGX2_PORTX2 /* 2 engines x 2 ports */,
1653        ENGX1_PORTX1 /* 1 engine  x 1 port */,
1654        ENGX1_PORTX2 /* 1 engine  x 2 ports */,
1655        ENGX1_PORTX4 /* 1 engine  x 4 ports */,
1656        MAX_PORTS_MODE
1657};
1658
1659
1660
1661/*
1662 * use to index in hsi_fp_[major|minor]_ver_arr per protocol
1663 */
1664enum protocol_version_array_key {
1665        ETH_VER_KEY = 0,
1666        ROCE_VER_KEY,
1667        MAX_PROTOCOL_VERSION_ARRAY_KEY
1668};
1669
1670
1671
1672/*
1673 * RDMA TX Stats
1674 */
1675struct rdma_sent_stats {
1676        struct regpair sent_bytes /* number of total RDMA bytes sent */;
1677        struct regpair sent_pkts /* number of total RDMA packets sent */;
1678};
1679
1680/*
1681 * Pstorm non-triggering VF zone
1682 */
1683struct pstorm_non_trigger_vf_zone {
1684/* VF statistic bucket */
1685        struct eth_pstorm_per_queue_stat eth_queue_stat;
1686        struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
1687};
1688
1689
1690/*
1691 * Pstorm VF zone
1692 */
1693struct pstorm_vf_zone {
1694/* non-interrupt-triggering zone */
1695        struct pstorm_non_trigger_vf_zone non_trigger;
1696        struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
1697};
1698
1699
1700/*
1701 * Ramrod Header of SPQE
1702 */
1703struct ramrod_header {
1704        __le32 cid /* Slowpath Connection CID */;
1705        u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
1706        u8 protocol_id /* Ramrod Protocol ID */;
1707        __le16 echo /* Ramrod echo */;
1708};
1709
1710
1711/*
1712 * RDMA RX Stats
1713 */
1714struct rdma_rcv_stats {
1715        struct regpair rcv_bytes /* number of total RDMA bytes received */;
1716        struct regpair rcv_pkts /* number of total RDMA packets received */;
1717};
1718
1719
1720
1721/*
1722 * Data for update QCN/DCQCN RL ramrod
1723 */
1724struct rl_update_ramrod_data {
1725        u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
1726/* Update DCQCN global params: timeout, g, k. */
1727        u8 dcqcn_update_param_flg;
1728        u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
1729        u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
1730        u8 rl_stop_flg /* Stop RL. */;
1731        u8 rl_id_first /* ID of first or single RL, that will be updated. */;
1732/* ID of last RL, that will be updated. If clear, single RL will updated. */
1733        u8 rl_id_last;
1734        u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
1735/* If set, alpha will be reset to 1 when the state machine is idle. */
1736        u8 dcqcn_reset_alpha_on_idle;
1737/* Byte counter threshold to change rate increase stage. */
1738        u8 rl_bc_stage_th;
1739/* Timer threshold to change rate increase stage. */
1740        u8 rl_timer_stage_th;
1741        u8 reserved1;
1742        __le32 rl_bc_rate /* Byte Counter Limit. */;
1743        __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
1744        __le16 rl_r_ai /* Active increase rate. */;
1745        __le16 rl_r_hai /* Hyper active increase rate. */;
1746        __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
1747        __le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
1748        __le32 dcqcn_timeuot_us /* DCQCN timeout. */;
1749        __le32 qcn_timeuot_us /* QCN timeout. */;
1750        __le32 reserved2;
1751};
1752
1753
1754/*
1755 * Slowpath Element (SPQE)
1756 */
1757struct slow_path_element {
1758        struct ramrod_header hdr /* Ramrod Header */;
1759        struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
1760};
1761
1762
1763/*
1764 * Tstorm non-triggering VF zone
1765 */
1766struct tstorm_non_trigger_vf_zone {
1767        struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
1768};
1769
1770
1771struct tstorm_per_port_stat {
1772/* packet is dropped because it was truncated in NIG */
1773        struct regpair trunc_error_discard;
1774/* packet is dropped because of Ethernet FCS error */
1775        struct regpair mac_error_discard;
1776/* packet is dropped because classification was unsuccessful */
1777        struct regpair mftag_filter_discard;
1778/* packet was passed to Ethernet and dropped because of no mac filter match */
1779        struct regpair eth_mac_filter_discard;
1780/* packet passed to Light L2 and dropped because Light L2 is not configured for
1781 * this PF
1782 */
1783        struct regpair ll2_mac_filter_discard;
1784/* packet passed to Light L2 and dropped because Light L2 is not configured for
1785 * this PF
1786 */
1787        struct regpair ll2_conn_disabled_discard;
1788/* packet is an ISCSI irregular packet */
1789        struct regpair iscsi_irregular_pkt;
1790/* packet is an FCOE irregular packet */
1791        struct regpair fcoe_irregular_pkt;
1792/* packet is an ROCE irregular packet */
1793        struct regpair roce_irregular_pkt;
1794/* packet is an IWARP irregular packet */
1795        struct regpair iwarp_irregular_pkt;
1796/* packet is an ETH irregular packet */
1797        struct regpair eth_irregular_pkt;
1798/* packet is an TOE irregular packet */
1799        struct regpair toe_irregular_pkt;
1800/* packet is an PREROCE irregular packet */
1801        struct regpair preroce_irregular_pkt;
1802        struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
1803/* VXLAN dropped packets */
1804        struct regpair eth_vxlan_tunn_filter_discard;
1805/* GENEVE dropped packets */
1806        struct regpair eth_geneve_tunn_filter_discard;
1807        struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
1808};
1809
1810
1811/*
1812 * Tstorm VF zone
1813 */
1814struct tstorm_vf_zone {
1815/* non-interrupt-triggering zone */
1816        struct tstorm_non_trigger_vf_zone non_trigger;
1817};
1818
1819
1820/*
1821 * Tunnel classification scheme
1822 */
1823enum tunnel_clss {
1824/* Use MAC and VLAN from first L2 header for vport classification. */
1825        TUNNEL_CLSS_MAC_VLAN = 0,
1826/* Use MAC from first L2 header and VNI from tunnel header for vport
1827 * classification
1828 */
1829        TUNNEL_CLSS_MAC_VNI,
1830/* Use MAC and VLAN from last L2 header for vport classification */
1831        TUNNEL_CLSS_INNER_MAC_VLAN,
1832/* Use MAC from last L2 header and VNI from tunnel header for vport
1833 * classification
1834 */
1835        TUNNEL_CLSS_INNER_MAC_VNI,
1836/* Use MAC and VLAN from last L2 header for vport classification. If no exact
1837 * match, use MAC and VLAN from first L2 header for classification.
1838 */
1839        TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
1840        MAX_TUNNEL_CLSS
1841};
1842
1843
1844
1845/*
1846 * Ustorm non-triggering VF zone
1847 */
1848struct ustorm_non_trigger_vf_zone {
1849/* VF statistic bucket */
1850        struct eth_ustorm_per_queue_stat eth_queue_stat;
1851        struct regpair vf_pf_msg_addr /* VF-PF message address */;
1852};
1853
1854
1855/*
1856 * Ustorm triggering VF zone
1857 */
1858struct ustorm_trigger_vf_zone {
1859        u8 vf_pf_msg_valid /* VF-PF message valid flag */;
1860        u8 reserved[7];
1861};
1862
1863
1864/*
1865 * Ustorm VF zone
1866 */
1867struct ustorm_vf_zone {
1868/* non-interrupt-triggering zone */
1869        struct ustorm_non_trigger_vf_zone non_trigger;
1870        struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
1871};
1872
1873
1874/*
1875 * VF-PF channel data
1876 */
1877struct vf_pf_channel_data {
1878/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
1879 * is ready for a new transaction.
1880 */
1881        __le32 ready;
1882/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
1883 * valid.
1884 */
1885        u8 valid;
1886        u8 reserved0;
1887        __le16 reserved1;
1888};
1889
1890
1891/*
1892 * Ramrod data for VF start ramrod
1893 */
1894struct vf_start_ramrod_data {
1895        u8 vf_id /* VF ID */;
1896/* If set, initial cleanup ack will be sent to parent PF SP event queue */
1897        u8 enable_flr_ack;
1898        __le16 opaque_fid /* VF opaque FID */;
1899        u8 personality /* define what type of personality is new VF */;
1900        u8 reserved[7];
1901/* FP HSI version to be used by FW */
1902        struct hsi_fp_ver_struct hsi_fp_ver;
1903};
1904
1905
1906/*
1907 * Ramrod data for VF start ramrod
1908 */
1909struct vf_stop_ramrod_data {
1910        u8 vf_id /* VF ID */;
1911        u8 reserved0;
1912        __le16 reserved1;
1913        __le32 reserved2;
1914};
1915
1916
1917/*
1918 * VF zone size mode.
1919 */
1920enum vf_zone_size_mode {
1921/* Default VF zone size. Up to 192 VF supported. */
1922        VF_ZONE_SIZE_MODE_DEFAULT,
1923/* Doubled VF zone size. Up to 96 VF supported. */
1924        VF_ZONE_SIZE_MODE_DOUBLE,
1925/* Quad VF zone size. Up to 48 VF supported. */
1926        VF_ZONE_SIZE_MODE_QUAD,
1927        MAX_VF_ZONE_SIZE_MODE
1928};
1929
1930
1931
1932
1933/*
1934 * Xstorm non-triggering VF zone
1935 */
1936struct xstorm_non_trigger_vf_zone {
1937        struct regpair non_edpm_ack_pkts /* RoCE received statistics */;
1938};
1939
1940
1941/*
1942 * Tstorm VF zone
1943 */
1944struct xstorm_vf_zone {
1945/* non-interrupt-triggering zone */
1946        struct xstorm_non_trigger_vf_zone non_trigger;
1947};
1948
1949
1950
1951/*
1952 * Attentions status block
1953 */
1954struct atten_status_block {
1955        __le32 atten_bits;
1956        __le32 atten_ack;
1957        __le16 reserved0;
1958        __le16 sb_index /* status block running index */;
1959        __le32 reserved1;
1960};
1961
1962
1963/*
1964 * DMAE command
1965 */
1966struct dmae_cmd {
1967        __le32 opcode;
1968/* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
1969#define DMAE_CMD_SRC_MASK              0x1
1970#define DMAE_CMD_SRC_SHIFT             0
1971/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
1972 * (use enum dmae_cmd_dst_enum)
1973 */
1974#define DMAE_CMD_DST_MASK              0x3
1975#define DMAE_CMD_DST_SHIFT             1
1976/* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
1977#define DMAE_CMD_C_DST_MASK            0x1
1978#define DMAE_CMD_C_DST_SHIFT           3
1979/* Reset the CRC result (do not use the previous result as the seed) */
1980#define DMAE_CMD_CRC_RESET_MASK        0x1
1981#define DMAE_CMD_CRC_RESET_SHIFT       4
1982/* Reset the source address in the next go to the same source address of the
1983 * previous go
1984 */
1985#define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1
1986#define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
1987/* Reset the destination address in the next go to the same destination address
1988 * of the previous go
1989 */
1990#define DMAE_CMD_DST_ADDR_RESET_MASK   0x1
1991#define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
1992/* 0   completion function is the same as src function, 1 - 0 completion
1993 * function is the same as dst function (use enum dmae_cmd_comp_func_enum)
1994 */
1995#define DMAE_CMD_COMP_FUNC_MASK        0x1
1996#define DMAE_CMD_COMP_FUNC_SHIFT       7
1997/* 0 - Do not write a completion word, 1 - Write a completion word
1998 * (use enum dmae_cmd_comp_word_en_enum)
1999 */
2000#define DMAE_CMD_COMP_WORD_EN_MASK     0x1
2001#define DMAE_CMD_COMP_WORD_EN_SHIFT    8
2002/* 0 - Do not write a CRC word, 1 - Write a CRC word
2003 * (use enum dmae_cmd_comp_crc_en_enum)
2004 */
2005#define DMAE_CMD_COMP_CRC_EN_MASK      0x1
2006#define DMAE_CMD_COMP_CRC_EN_SHIFT     9
2007/* The CRC word should be taken from the DMAE address space from address 9+X,
2008 * where X is the value in these bits.
2009 */
2010#define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7
2011#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
2012#define DMAE_CMD_RESERVED1_MASK        0x1
2013#define DMAE_CMD_RESERVED1_SHIFT       13
2014#define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
2015#define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
2016/* The field specifies how the completion word is affected by PCIe read error. 0
2017 * Send a regular completion, 1 - Send a completion with an error indication,
2018 * 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
2019 */
2020#define DMAE_CMD_ERR_HANDLING_MASK     0x3
2021#define DMAE_CMD_ERR_HANDLING_SHIFT    16
2022/* The port ID to be placed on the  RF FID  field of the GRC bus. this field is
2023 * used both when GRC is the destination and when it is the source of the DMAE
2024 * transaction.
2025 */
2026#define DMAE_CMD_PORT_ID_MASK          0x3
2027#define DMAE_CMD_PORT_ID_SHIFT         18
2028/* Source PCI function number [3:0] */
2029#define DMAE_CMD_SRC_PF_ID_MASK        0xF
2030#define DMAE_CMD_SRC_PF_ID_SHIFT       20
2031/* Destination PCI function number [3:0] */
2032#define DMAE_CMD_DST_PF_ID_MASK        0xF
2033#define DMAE_CMD_DST_PF_ID_SHIFT       24
2034#define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1 /* Source VFID valid */
2035#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
2036#define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1 /* Destination VFID valid */
2037#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
2038#define DMAE_CMD_RESERVED2_MASK        0x3
2039#define DMAE_CMD_RESERVED2_SHIFT       30
2040/* PCIe source address low in bytes or GRC source address in DW */
2041        __le32 src_addr_lo;
2042/* PCIe source address high in bytes or reserved (if source is GRC) */
2043        __le32 src_addr_hi;
2044/* PCIe destination address low in bytes or GRC destination address in DW */
2045        __le32 dst_addr_lo;
2046/* PCIe destination address high in bytes or reserved (if destination is GRC) */
2047        __le32 dst_addr_hi;
2048        __le16 length_dw /* Length in DW */;
2049        __le16 opcode_b;
2050#define DMAE_CMD_SRC_VF_ID_MASK        0xFF /* Source VF id */
2051#define DMAE_CMD_SRC_VF_ID_SHIFT       0
2052#define DMAE_CMD_DST_VF_ID_MASK        0xFF /* Destination VF id */
2053#define DMAE_CMD_DST_VF_ID_SHIFT       8
2054/* PCIe completion address low in bytes or GRC completion address in DW */
2055        __le32 comp_addr_lo;
2056/* PCIe completion address high in bytes or reserved (if completion address is
2057 * GRC)
2058 */
2059        __le32 comp_addr_hi;
2060        __le32 comp_val /* Value to write to completion address */;
2061        __le32 crc32 /* crc16 result */;
2062        __le32 crc_32_c /* crc32_c result */;
2063        __le16 crc16 /* crc16 result */;
2064        __le16 crc16_c /* crc16_c result */;
2065        __le16 crc10 /* crc_t10 result */;
2066        __le16 error_bit_reserved;
2067#define DMAE_CMD_ERROR_BIT_MASK        0x1 /* Error bit */
2068#define DMAE_CMD_ERROR_BIT_SHIFT       0
2069#define DMAE_CMD_RESERVED_MASK         0x7FFF
2070#define DMAE_CMD_RESERVED_SHIFT        1
2071        __le16 xsum16 /* checksum16 result  */;
2072        __le16 xsum8 /* checksum8 result  */;
2073};
2074
2075
2076enum dmae_cmd_comp_crc_en_enum {
2077        dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
2078        dmae_cmd_comp_crc_enabled /* Write a CRC word */,
2079        MAX_DMAE_CMD_COMP_CRC_EN_ENUM
2080};
2081
2082
2083enum dmae_cmd_comp_func_enum {
2084/* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */
2085        dmae_cmd_comp_func_to_src,
2086/* completion word and/or CRC will be sent to DST-PCI function/DST VFID */
2087        dmae_cmd_comp_func_to_dst,
2088        MAX_DMAE_CMD_COMP_FUNC_ENUM
2089};
2090
2091
2092enum dmae_cmd_comp_word_en_enum {
2093        dmae_cmd_comp_word_disabled /* Do not write a completion word */,
2094        dmae_cmd_comp_word_enabled /* Write the completion word */,
2095        MAX_DMAE_CMD_COMP_WORD_EN_ENUM
2096};
2097
2098
2099enum dmae_cmd_c_dst_enum {
2100        dmae_cmd_c_dst_pcie,
2101        dmae_cmd_c_dst_grc,
2102        MAX_DMAE_CMD_C_DST_ENUM
2103};
2104
2105
2106enum dmae_cmd_dst_enum {
2107        dmae_cmd_dst_none_0,
2108        dmae_cmd_dst_pcie,
2109        dmae_cmd_dst_grc,
2110        dmae_cmd_dst_none_3,
2111        MAX_DMAE_CMD_DST_ENUM
2112};
2113
2114
2115enum dmae_cmd_error_handling_enum {
2116/* Send a regular completion (with no error indication) */
2117        dmae_cmd_error_handling_send_regular_comp,
2118/* Send a completion with an error indication (i.e. set bit 31 of the completion
2119 * word)
2120 */
2121        dmae_cmd_error_handling_send_comp_with_err,
2122        dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
2123        MAX_DMAE_CMD_ERROR_HANDLING_ENUM
2124};
2125
2126
2127enum dmae_cmd_src_enum {
2128        dmae_cmd_src_pcie /* The source is the PCIe */,
2129        dmae_cmd_src_grc /* The source is the GRC */,
2130        MAX_DMAE_CMD_SRC_ENUM
2131};
2132
2133
2134/*
2135 * DMAE parameters
2136 */
2137struct dmae_params {
2138        __le32 flags;
2139/* If set and the source is a block of length DMAE_MAX_RW_SIZE and the
2140 * destination is larger, the source block will be duplicated as many
2141 * times as required to fill the destination block. This is used mostly
2142 * to write a zeroed buffer to destination address using DMA
2143 */
2144#define DMAE_PARAMS_RW_REPL_SRC_MASK     0x1
2145#define DMAE_PARAMS_RW_REPL_SRC_SHIFT    0
2146/* If set, the source is a VF, and the source VF ID is taken from the
2147 * src_vf_id parameter.
2148 */
2149#define DMAE_PARAMS_SRC_VF_VALID_MASK    0x1
2150#define DMAE_PARAMS_SRC_VF_VALID_SHIFT   1
2151/* If set, the destination is a VF, and the destination VF ID is taken
2152 * from the dst_vf_id parameter.
2153 */
2154#define DMAE_PARAMS_DST_VF_VALID_MASK    0x1
2155#define DMAE_PARAMS_DST_VF_VALID_SHIFT   2
2156/* If set, a completion is sent to the destination function.
2157 * Otherwise its sent to the source function.
2158 */
2159#define DMAE_PARAMS_COMPLETION_DST_MASK  0x1
2160#define DMAE_PARAMS_COMPLETION_DST_SHIFT 3
2161/* If set, the port ID is taken from the port_id parameter.
2162 * Otherwise, the current port ID is used.
2163 */
2164#define DMAE_PARAMS_PORT_VALID_MASK      0x1
2165#define DMAE_PARAMS_PORT_VALID_SHIFT     4
2166/* If set, the source PF ID is taken from the src_pf_id parameter.
2167 * Otherwise, the current PF ID is used.
2168 */
2169#define DMAE_PARAMS_SRC_PF_VALID_MASK    0x1
2170#define DMAE_PARAMS_SRC_PF_VALID_SHIFT   5
2171/* If set, the destination PF ID is taken from the dst_pf_id parameter.
2172 * Otherwise, the current PF ID is used
2173 */
2174#define DMAE_PARAMS_DST_PF_VALID_MASK    0x1
2175#define DMAE_PARAMS_DST_PF_VALID_SHIFT   6
2176#define DMAE_PARAMS_RESERVED_MASK        0x1FFFFFF
2177#define DMAE_PARAMS_RESERVED_SHIFT       7
2178        u8 src_vf_id /* Source VF ID, valid only if src_vf_valid is set */;
2179        u8 dst_vf_id /* Destination VF ID, valid only if dst_vf_valid is set */;
2180        u8 port_id /* Port ID, valid only if port_valid is set */;
2181        u8 src_pf_id /* Source PF ID, valid only if src_pf_valid is set */;
2182        u8 dst_pf_id /* Destination PF ID, valid only if dst_pf_valid is set */;
2183        u8 reserved1;
2184        __le16 reserved2;
2185};
2186
2187
2188struct fw_asserts_ram_section {
2189/* The offset of the section in the RAM in RAM lines (64-bit units) */
2190        __le16 section_ram_line_offset;
2191/* The size of the section in RAM lines (64-bit units) */
2192        __le16 section_ram_line_size;
2193/* The offset of the asserts list within the section in dwords */
2194        u8 list_dword_offset;
2195/* The size of an assert list element in dwords */
2196        u8 list_element_dword_size;
2197        u8 list_num_elements /* The number of elements in the asserts list */;
2198/* The offset of the next list index field within the section in dwords */
2199        u8 list_next_index_dword_offset;
2200};
2201
2202
2203struct fw_ver_num {
2204        u8 major /* Firmware major version number */;
2205        u8 minor /* Firmware minor version number */;
2206        u8 rev /* Firmware revision version number */;
2207        u8 eng /* Firmware engineering version number (for bootleg versions) */;
2208};
2209
2210struct fw_ver_info {
2211        __le16 tools_ver /* Tools version number */;
2212        u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
2213        u8 reserved1;
2214        struct fw_ver_num num /* FW version number */;
2215        __le32 timestamp /* FW Timestamp in unix time  (sec. since 1970) */;
2216        __le32 reserved2;
2217};
2218
2219struct fw_info {
2220        struct fw_ver_info ver /* FW version information */;
2221/* Info regarding the FW asserts section in the Storm RAM */
2222        struct fw_asserts_ram_section fw_asserts_section;
2223};
2224
2225
2226struct fw_info_location {
2227        __le32 grc_addr /* GRC address where the fw_info struct is located. */;
2228/* Size of the fw_info structure (thats located at the grc_addr). */
2229        __le32 size;
2230};
2231
2232
2233/* DMAE parameters */
2234struct ecore_dmae_params {
2235        u32 flags;
2236/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
2237 * source is a block of length DMAE_MAX_RW_SIZE and the
2238 * destination is larger, the source block will be duplicated as
2239 * many times as required to fill the destination block. This is
2240 * used mostly to write a zeroed buffer to destination address
2241 * using DMA
2242 */
2243#define ECORE_DMAE_PARAMS_RW_REPL_SRC_MASK        0x1
2244#define ECORE_DMAE_PARAMS_RW_REPL_SRC_SHIFT       0
2245#define ECORE_DMAE_PARAMS_SRC_VF_VALID_MASK       0x1
2246#define ECORE_DMAE_PARAMS_SRC_VF_VALID_SHIFT      1
2247#define ECORE_DMAE_PARAMS_DST_VF_VALID_MASK       0x1
2248#define ECORE_DMAE_PARAMS_DST_VF_VALID_SHIFT      2
2249#define ECORE_DMAE_PARAMS_COMPLETION_DST_MASK     0x1
2250#define ECORE_DMAE_PARAMS_COMPLETION_DST_SHIFT    3
2251#define ECORE_DMAE_PARAMS_PORT_VALID_MASK         0x1
2252#define ECORE_DMAE_PARAMS_PORT_VALID_SHIFT        4
2253#define ECORE_DMAE_PARAMS_SRC_PF_VALID_MASK       0x1
2254#define ECORE_DMAE_PARAMS_SRC_PF_VALID_SHIFT      5
2255#define ECORE_DMAE_PARAMS_DST_PF_VALID_MASK       0x1
2256#define ECORE_DMAE_PARAMS_DST_PF_VALID_SHIFT      6
2257#define ECORE_DMAE_PARAMS_RESERVED_MASK           0x1FFFFFF
2258#define ECORE_DMAE_PARAMS_RESERVED_SHIFT          7
2259        u8 src_vfid;
2260        u8 dst_vfid;
2261        u8 port_id;
2262        u8 src_pfid;
2263        u8 dst_pfid;
2264        u8 reserved1;
2265        __le16 reserved2;
2266};
2267
2268/*
2269 * IGU cleanup command
2270 */
2271struct igu_cleanup {
2272        __le32 sb_id_and_flags;
2273#define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
2274#define IGU_CLEANUP_RESERVED0_SHIFT    0
2275/* cleanup clear - 0, set - 1 */
2276#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1
2277#define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
2278#define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
2279#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
2280/* must always be set (use enum command_type_bit) */
2281#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1U
2282#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
2283        __le32 reserved1;
2284};
2285
2286
2287/*
2288 * IGU firmware driver command
2289 */
2290union igu_command {
2291        struct igu_prod_cons_update prod_cons_update;
2292        struct igu_cleanup cleanup;
2293};
2294
2295
2296/*
2297 * IGU firmware driver command
2298 */
2299struct igu_command_reg_ctrl {
2300        __le16 opaque_fid;
2301        __le16 igu_command_reg_ctrl_fields;
2302#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
2303#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
2304#define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
2305#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
2306/* command typ: 0 - read, 1 - write */
2307#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1
2308#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
2309};
2310
2311
2312/*
2313 * IGU mapping line structure
2314 */
2315struct igu_mapping_line {
2316        __le32 igu_mapping_line_fields;
2317#define IGU_MAPPING_LINE_VALID_MASK            0x1
2318#define IGU_MAPPING_LINE_VALID_SHIFT           0
2319#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
2320#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
2321/* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
2322#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF
2323#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
2324#define IGU_MAPPING_LINE_PF_VALID_MASK         0x1 /* PF-1, VF-0 */
2325#define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
2326#define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
2327#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
2328#define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
2329#define IGU_MAPPING_LINE_RESERVED_SHIFT        24
2330};
2331
2332
2333/*
2334 * IGU MSIX line structure
2335 */
2336struct igu_msix_vector {
2337        struct regpair address;
2338        __le32 data;
2339        __le32 msix_vector_fields;
2340#define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
2341#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
2342#define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
2343#define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
2344#define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
2345#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
2346#define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
2347#define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
2348};
2349
2350
2351struct mstorm_core_conn_ag_ctx {
2352        u8 byte0 /* cdu_validation */;
2353        u8 byte1 /* state */;
2354        u8 flags0;
2355#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2356#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2357#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2358#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2359#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2360#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2361#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2362#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2363#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2364#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2365        u8 flags1;
2366#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2367#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2368#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2369#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2370#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2371#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2372#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2373#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2374#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2375#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2376#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2377#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2378#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2379#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2380#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2381#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2382        __le16 word0 /* word0 */;
2383        __le16 word1 /* word1 */;
2384        __le32 reg0 /* reg0 */;
2385        __le32 reg1 /* reg1 */;
2386};
2387
2388
2389/*
2390 * per encapsulation type enabling flags
2391 */
2392struct prs_reg_encapsulation_type_en {
2393        u8 flags;
2394/* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
2395#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1
2396#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
2397/* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
2398#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1
2399#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
2400/* Enable bit for VXLAN encapsulation. */
2401#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1
2402#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
2403/* Enable bit for T-Tag encapsulation. */
2404#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1
2405#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
2406/* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
2407#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1
2408#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
2409/* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
2410#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1
2411#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
2412#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
2413#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
2414};
2415
2416
2417enum pxp_tph_st_hint {
2418        TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
2419        TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
2420/* Device Write and Host Read, or Host Write and Device Read */
2421        TPH_ST_HINT_TARGET,
2422/* Device Write and Host Read, or Host Write and Device Read - with temporal
2423 * reuse
2424 */
2425        TPH_ST_HINT_TARGET_PRIO,
2426        MAX_PXP_TPH_ST_HINT
2427};
2428
2429
2430/*
2431 * QM hardware structure of enable bypass credit mask
2432 */
2433struct qm_rf_bypass_mask {
2434        u8 flags;
2435#define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
2436#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
2437#define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
2438#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
2439#define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
2440#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
2441#define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
2442#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
2443#define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
2444#define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
2445#define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
2446#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
2447#define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
2448#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
2449#define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
2450#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
2451};
2452
2453
2454/*
2455 * QM hardware structure of opportunistic credit mask
2456 */
2457struct qm_rf_opportunistic_mask {
2458        __le16 flags;
2459#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
2460#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
2461#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
2462#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
2463#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
2464#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
2465#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
2466#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
2467#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
2468#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
2469#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
2470#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
2471#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
2472#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
2473#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
2474#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
2475#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
2476#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
2477#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
2478#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
2479};
2480
2481
2482/*
2483 * QM hardware structure of QM map memory
2484 */
2485struct qm_rf_pq_map {
2486        __le32 reg;
2487#define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1 /* PQ active */
2488#define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
2489#define QM_RF_PQ_MAP_RL_ID_MASK             0xFF /* RL ID */
2490#define QM_RF_PQ_MAP_RL_ID_SHIFT            1
2491/* the first PQ associated with the VPORT and VOQ of this PQ */
2492#define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF
2493#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
2494#define QM_RF_PQ_MAP_VOQ_MASK               0x1F /* VOQ */
2495#define QM_RF_PQ_MAP_VOQ_SHIFT              18
2496#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
2497#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
2498#define QM_RF_PQ_MAP_RL_VALID_MASK          0x1 /* RL active */
2499#define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
2500#define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
2501#define QM_RF_PQ_MAP_RESERVED_SHIFT         26
2502};
2503
2504
2505/*
2506 * Completion params for aggregated interrupt completion
2507 */
2508struct sdm_agg_int_comp_params {
2509        __le16 params;
2510/* the number of aggregated interrupt, 0-31 */
2511#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK      0x3F
2512#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT     0
2513/* 1 - set a bit in aggregated vector, 0 - dont set */
2514#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK  0x1
2515#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
2516/* Number of bit in the aggregated vector, 0-279 (TBD) */
2517#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK     0x1FF
2518#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT    7
2519};
2520
2521
2522/*
2523 * SDM operation gen command (generate aggregative interrupt)
2524 */
2525struct sdm_op_gen {
2526        __le32 command;
2527/* completion parameters 0-15 */
2528#define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF
2529#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
2530#define SDM_OP_GEN_COMP_TYPE_MASK   0xF /* completion type 16-19 */
2531#define SDM_OP_GEN_COMP_TYPE_SHIFT  16
2532#define SDM_OP_GEN_RESERVED_MASK    0xFFF /* reserved 20-31 */
2533#define SDM_OP_GEN_RESERVED_SHIFT   20
2534};
2535
2536struct ystorm_core_conn_ag_ctx {
2537        u8 byte0 /* cdu_validation */;
2538        u8 byte1 /* state */;
2539        u8 flags0;
2540#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2541#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2542#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2543#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2544#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2545#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2546#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2547#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2548#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2549#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2550        u8 flags1;
2551#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2552#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2553#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2554#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2555#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2556#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2557#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2558#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2559#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2560#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2561#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2562#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2563#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2564#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2565#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2566#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2567        u8 byte2 /* byte2 */;
2568        u8 byte3 /* byte3 */;
2569        __le16 word0 /* word0 */;
2570        __le32 reg0 /* reg0 */;
2571        __le32 reg1 /* reg1 */;
2572        __le16 word1 /* word1 */;
2573        __le16 word2 /* word2 */;
2574        __le16 word3 /* word3 */;
2575        __le16 word4 /* word4 */;
2576        __le32 reg2 /* reg2 */;
2577        __le32 reg3 /* reg3 */;
2578};
2579
2580/*********/
2581/* DEBUG */
2582/*********/
2583
2584#define MFW_TRACE_SIGNATURE     0x25071946
2585
2586/* The trace in the buffer */
2587#define MFW_TRACE_EVENTID_MASK          0x00ffff
2588#define MFW_TRACE_PRM_SIZE_MASK         0x0f0000
2589#define MFW_TRACE_PRM_SIZE_OFFSET       16
2590#define MFW_TRACE_ENTRY_SIZE            3
2591
2592struct mcp_trace {
2593        u32     signature;      /* Help to identify that the trace is valid */
2594        u32     size;           /* the size of the trace buffer in bytes*/
2595        u32     curr_level;     /* 2 - all will be written to the buffer
2596                                 * 1 - debug trace will not be written
2597                                 * 0 - just errors will be written to the buffer
2598                                 */
2599        /* a bit per module, 1 means mask it off, 0 means add it to the trace
2600         * buffer
2601         */
2602        u32     modules_mask[2];
2603
2604        /* Warning: the following pointers are assumed to be 32bits as they are
2605         * used only in the MFW
2606         */
2607        /* The next trace will be written to this offset */
2608        u32     trace_prod;
2609        /* The oldest valid trace starts at this offset (usually very close
2610         * after the current producer)
2611         */
2612        u32     trace_oldest;
2613};
2614
2615enum spad_sections {
2616        SPAD_SECTION_TRACE,
2617        SPAD_SECTION_NVM_CFG,
2618        SPAD_SECTION_PUBLIC,
2619        SPAD_SECTION_PRIVATE,
2620        SPAD_SECTION_MAX
2621};
2622
2623#define MCP_TRACE_SIZE          2048    /* 2kb */
2624
2625/* This section is located at a fixed location in the beginning of the
2626 * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
2627 * All the rest of data has a floating location which differs from version to
2628 * version, and is pointed by the mcp_meta_data below.
2629 * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
2630 * with it from nvram in order to clear this portion.
2631 */
2632struct static_init {
2633        u32 num_sections;
2634        offsize_t sections[SPAD_SECTION_MAX];
2635#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
2636
2637        struct mcp_trace trace;
2638#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
2639        u8 trace_buffer[MCP_TRACE_SIZE];
2640#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
2641        /* running_mfw has the same definition as in nvm_map.h.
2642         * This bit indicate both the running dir, and the running bundle.
2643         * It is set once when the LIM is loaded.
2644         */
2645        u32 running_mfw;
2646#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
2647        u32 build_time;
2648#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
2649        u32 reset_type;
2650#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
2651        u32 mfw_secure_mode;
2652#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
2653        u16 pme_status_pf_bitmap;
2654#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
2655        u16 pme_enable_pf_bitmap;
2656#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
2657        u32 mim_nvm_addr;
2658        u32 mim_start_addr;
2659        u32 ah_pcie_link_params;
2660#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK     (0x000000ff)
2661#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT    (0)
2662#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK     (0x0000ff00)
2663#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT    (8)
2664#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK      (0x00ff0000)
2665#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT     (16)
2666#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK       (0xff000000)
2667#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT      (24)
2668#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
2669
2670        u32 rsrv_persist[5];    /* Persist reserved for MFW upgrades */
2671};
2672
2673#define NVM_MAGIC_VALUE         0x669955aa
2674
2675enum nvm_image_type {
2676        NVM_TYPE_TIM1 = 0x01,
2677        NVM_TYPE_TIM2 = 0x02,
2678        NVM_TYPE_MIM1 = 0x03,
2679        NVM_TYPE_MIM2 = 0x04,
2680        NVM_TYPE_MBA = 0x05,
2681        NVM_TYPE_MODULES_PN = 0x06,
2682        NVM_TYPE_VPD = 0x07,
2683        NVM_TYPE_MFW_TRACE1 = 0x08,
2684        NVM_TYPE_MFW_TRACE2 = 0x09,
2685        NVM_TYPE_NVM_CFG1 = 0x0a,
2686        NVM_TYPE_L2B = 0x0b,
2687        NVM_TYPE_DIR1 = 0x0c,
2688        NVM_TYPE_EAGLE_FW1 = 0x0d,
2689        NVM_TYPE_FALCON_FW1 = 0x0e,
2690        NVM_TYPE_PCIE_FW1 = 0x0f,
2691        NVM_TYPE_HW_SET = 0x10,
2692        NVM_TYPE_LIM = 0x11,
2693        NVM_TYPE_AVS_FW1 = 0x12,
2694        NVM_TYPE_DIR2 = 0x13,
2695        NVM_TYPE_CCM = 0x14,
2696        NVM_TYPE_EAGLE_FW2 = 0x15,
2697        NVM_TYPE_FALCON_FW2 = 0x16,
2698        NVM_TYPE_PCIE_FW2 = 0x17,
2699        NVM_TYPE_AVS_FW2 = 0x18,
2700        NVM_TYPE_INIT_HW = 0x19,
2701        NVM_TYPE_DEFAULT_CFG = 0x1a,
2702        NVM_TYPE_MDUMP = 0x1b,
2703        NVM_TYPE_META = 0x1c,
2704        NVM_TYPE_ISCSI_CFG = 0x1d,
2705        NVM_TYPE_FCOE_CFG = 0x1f,
2706        NVM_TYPE_ETH_PHY_FW1 = 0x20,
2707        NVM_TYPE_ETH_PHY_FW2 = 0x21,
2708        NVM_TYPE_BDN = 0x22,
2709        NVM_TYPE_8485X_PHY_FW = 0x23,
2710        NVM_TYPE_PUB_KEY = 0x24,
2711        NVM_TYPE_RECOVERY = 0x25,
2712        NVM_TYPE_PLDM = 0x26,
2713        NVM_TYPE_UPK1 = 0x27,
2714        NVM_TYPE_UPK2 = 0x28,
2715        NVM_TYPE_MASTER_KC = 0x29,
2716        NVM_TYPE_BACKUP_KC = 0x2a,
2717        NVM_TYPE_HW_DUMP = 0x2b,
2718        NVM_TYPE_HW_DUMP_OUT = 0x2c,
2719        NVM_TYPE_BIN_NVM_META = 0x30,
2720        NVM_TYPE_ROM_TEST = 0xf0,
2721        NVM_TYPE_88X33X0_PHY_FW = 0x31,
2722        NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
2723        NVM_TYPE_MAX,
2724};
2725
2726#define DIR_ID_1    (0)
2727
2728#endif /* __ECORE_HSI_COMMON__ */
2729