uboot/arch/arm/include/asm/arch-octeontx2/csrs/csrs-cgx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier:    GPL-2.0
   2 *
   3 * Copyright (C) 2020 Marvell International Ltd.
   4 *
   5 * https://spdx.org/licenses
   6 */
   7#ifndef __CSRS_CGX_H__
   8#define __CSRS_CGX_H__
   9
  10/**
  11 * @file
  12 *
  13 * Configuration and status register (CSR) address and type definitions for
  14 * CGX.
  15 *
  16 * This file is auto generated.  Do not edit.
  17 *
  18 */
  19
  20/**
  21 * Enumeration cgx_bar_e
  22 *
  23 * CGX Base Address Register Enumeration Enumerates the base address
  24 * registers.
  25 */
  26#define CGX_BAR_E_CGXX_PF_BAR0(a) (0x87e0e0000000ll + 0x1000000ll * (a))
  27#define CGX_BAR_E_CGXX_PF_BAR0_SIZE 0x100000ull
  28#define CGX_BAR_E_CGXX_PF_BAR4(a) (0x87e0e0400000ll + 0x1000000ll * (a))
  29#define CGX_BAR_E_CGXX_PF_BAR4_SIZE 0x100000ull
  30
  31/**
  32 * Enumeration cgx_int_vec_e
  33 *
  34 * CGX MSI-X Vector Enumeration Enumeration the MSI-X interrupt vectors.
  35 */
  36#define CGX_INT_VEC_E_CMRX_INT(a) (0 + 9 * (a))
  37#define CGX_INT_VEC_E_CMRX_SW(a) (0x26 + (a))
  38#define CGX_INT_VEC_E_CMR_MEM_INT (0x24)
  39#define CGX_INT_VEC_E_GMPX_GMI_RX_INT(a) (5 + 9 * (a))
  40#define CGX_INT_VEC_E_GMPX_GMI_TX_INT(a) (6 + 9 * (a))
  41#define CGX_INT_VEC_E_GMPX_GMI_WOL_INT(a) (7 + 9 * (a))
  42#define CGX_INT_VEC_E_GMPX_PCS_INT(a) (4 + 9 * (a))
  43#define CGX_INT_VEC_E_SMUX_RX_INT(a) (2 + 9 * (a))
  44#define CGX_INT_VEC_E_SMUX_RX_WOL_INT(a) (8 + 9 * (a))
  45#define CGX_INT_VEC_E_SMUX_TX_INT(a) (3 + 9 * (a))
  46#define CGX_INT_VEC_E_SPUX_INT(a) (1 + 9 * (a))
  47#define CGX_INT_VEC_E_SW (0x25)
  48
  49/**
  50 * Enumeration cgx_lmac_types_e
  51 *
  52 * CGX LMAC Type Enumeration Enumerates the LMAC Types that CGX supports.
  53 */
  54#define CGX_LMAC_TYPES_E_FIFTYG_R (8)
  55#define CGX_LMAC_TYPES_E_FORTYG_R (4)
  56#define CGX_LMAC_TYPES_E_HUNDREDG_R (9)
  57#define CGX_LMAC_TYPES_E_QSGMII (6)
  58#define CGX_LMAC_TYPES_E_RGMII (5)
  59#define CGX_LMAC_TYPES_E_RXAUI (2)
  60#define CGX_LMAC_TYPES_E_SGMII (0)
  61#define CGX_LMAC_TYPES_E_TENG_R (3)
  62#define CGX_LMAC_TYPES_E_TWENTYFIVEG_R (7)
  63#define CGX_LMAC_TYPES_E_USXGMII (0xa)
  64#define CGX_LMAC_TYPES_E_XAUI (1)
  65
  66/**
  67 * Enumeration cgx_opcode_e
  68 *
  69 * INTERNAL: CGX Error Opcode Enumeration  Enumerates the error opcodes
  70 * created by CGX and presented to NCSI/NIX.
  71 */
  72#define CGX_OPCODE_E_RE_FCS (7)
  73#define CGX_OPCODE_E_RE_FCS_RCV (8)
  74#define CGX_OPCODE_E_RE_JABBER (2)
  75#define CGX_OPCODE_E_RE_NONE (0)
  76#define CGX_OPCODE_E_RE_PARTIAL (1)
  77#define CGX_OPCODE_E_RE_RX_CTL (0xb)
  78#define CGX_OPCODE_E_RE_SKIP (0xc)
  79#define CGX_OPCODE_E_RE_TERMINATE (9)
  80
  81/**
  82 * Enumeration cgx_spu_br_train_cst_e
  83 *
  84 * INTERNAL: CGX Training Coefficient Status Enumeration  2-bit status
  85 * for each coefficient as defined in IEEE 802.3, Table 72-5.
  86 */
  87#define CGX_SPU_BR_TRAIN_CST_E_MAXIMUM (3)
  88#define CGX_SPU_BR_TRAIN_CST_E_MINIMUM (2)
  89#define CGX_SPU_BR_TRAIN_CST_E_NOT_UPDATED (0)
  90#define CGX_SPU_BR_TRAIN_CST_E_UPDATED (1)
  91
  92/**
  93 * Enumeration cgx_spu_br_train_cup_e
  94 *
  95 * INTERNAL:CGX Training Coefficient Enumeration  2-bit command for each
  96 * coefficient as defined in IEEE 802.3, Table 72-4.
  97 */
  98#define CGX_SPU_BR_TRAIN_CUP_E_DECREMENT (1)
  99#define CGX_SPU_BR_TRAIN_CUP_E_HOLD (0)
 100#define CGX_SPU_BR_TRAIN_CUP_E_INCREMENT (2)
 101#define CGX_SPU_BR_TRAIN_CUP_E_RSV_CMD (3)
 102
 103/**
 104 * Enumeration cgx_usxgmii_rate_e
 105 *
 106 * CGX USXGMII Rate Enumeration Enumerates the USXGMII sub-port type
 107 * rate, CGX()_SPU()_CONTROL1[USXGMII_RATE].  Selecting a rate higher
 108 * than the maximum allowed for a given port sub-type (specified by
 109 * CGX()_SPU()_CONTROL1[USXGMII_TYPE]), e.g., selecting ::RATE_2HG (2.5
 110 * Gbps) for CGX_USXGMII_TYPE_E::SXGMII_2G, will cause unpredictable
 111 * behavior. USXGMII hardware-based autonegotiation may change this
 112 * setting.
 113 */
 114#define CGX_USXGMII_RATE_E_RATE_100M (1)
 115#define CGX_USXGMII_RATE_E_RATE_10G (5)
 116#define CGX_USXGMII_RATE_E_RATE_10M (0)
 117#define CGX_USXGMII_RATE_E_RATE_1G (2)
 118#define CGX_USXGMII_RATE_E_RATE_20G (6)
 119#define CGX_USXGMII_RATE_E_RATE_2HG (3)
 120#define CGX_USXGMII_RATE_E_RATE_5G (4)
 121#define CGX_USXGMII_RATE_E_RSV_RATE (7)
 122
 123/**
 124 * Enumeration cgx_usxgmii_type_e
 125 *
 126 * CGX USXGMII Port Sub-Type Enumeration Enumerates the USXGMII sub-port
 127 * type, CGX()_SPU()_CONTROL1[USXGMII_TYPE].  The description indicates
 128 * the maximum rate and the maximum number of ports (LMACs) for each sub-
 129 * type. The minimum rate for any port is 10M. The rate selection for
 130 * each LMAC is made using CGX()_SPU()_CONTROL1[USXGMII_RATE] and the
 131 * number of active ports/LMACs is implicitly determined by the value
 132 * given to CGX()_CMR()_CONFIG[ENABLE] for each LMAC.  Selecting a rate
 133 * higher than the maximum allowed for a given port sub-type or enabling
 134 * more LMACs than the maximum allowed for a given port sub-type will
 135 * cause unpredictable behavior.
 136 */
 137#define CGX_USXGMII_TYPE_E_DXGMII_10G (3)
 138#define CGX_USXGMII_TYPE_E_DXGMII_20G (5)
 139#define CGX_USXGMII_TYPE_E_DXGMII_5G (4)
 140#define CGX_USXGMII_TYPE_E_QXGMII_10G (7)
 141#define CGX_USXGMII_TYPE_E_QXGMII_20G (6)
 142#define CGX_USXGMII_TYPE_E_SXGMII_10G (0)
 143#define CGX_USXGMII_TYPE_E_SXGMII_2G (2)
 144#define CGX_USXGMII_TYPE_E_SXGMII_5G (1)
 145
 146/**
 147 * Structure cgx_spu_br_lane_train_status_s
 148 *
 149 * INTERNAL:CGX Lane Training Status Structure  This is the group of lane
 150 * status bits for a single lane in the BASE-R PMD status register (MDIO
 151 * address 1.151) as defined in IEEE 802.3ba-2010, Table 45-55.
 152 */
 153union cgx_spu_br_lane_train_status_s {
 154        u32 u;
 155        struct cgx_spu_br_lane_train_status_s_s {
 156                u32 rx_trained                       : 1;
 157                u32 frame_lock                       : 1;
 158                u32 training                         : 1;
 159                u32 training_failure                 : 1;
 160                u32 reserved_4_31                    : 28;
 161        } s;
 162        /* struct cgx_spu_br_lane_train_status_s_s cn; */
 163};
 164
 165/**
 166 * Structure cgx_spu_br_train_cup_s
 167 *
 168 * INTERNAL:CGX Lane Training Coefficient Structure  This is the
 169 * coefficient update field of the BASE-R link training packet as defined
 170 * in IEEE 802.3, Table 72-4.
 171 */
 172union cgx_spu_br_train_cup_s {
 173        u32 u;
 174        struct cgx_spu_br_train_cup_s_s {
 175                u32 pre_cup                          : 2;
 176                u32 main_cup                         : 2;
 177                u32 post_cup                         : 2;
 178                u32 reserved_6_11                    : 6;
 179                u32 init                             : 1;
 180                u32 preset                           : 1;
 181                u32 reserved_14_31                   : 18;
 182        } s;
 183        struct cgx_spu_br_train_cup_s_cn {
 184                u32 pre_cup                          : 2;
 185                u32 main_cup                         : 2;
 186                u32 post_cup                         : 2;
 187                u32 reserved_6_11                    : 6;
 188                u32 init                             : 1;
 189                u32 preset                           : 1;
 190                u32 reserved_14_15                   : 2;
 191                u32 reserved_16_31                   : 16;
 192        } cn;
 193};
 194
 195/**
 196 * Structure cgx_spu_br_train_rep_s
 197 *
 198 * INTERNAL:CGX Training Report Structure  This is the status report
 199 * field of the BASE-R link training packet as defined in IEEE 802.3,
 200 * Table 72-5.
 201 */
 202union cgx_spu_br_train_rep_s {
 203        u32 u;
 204        struct cgx_spu_br_train_rep_s_s {
 205                u32 pre_cst                          : 2;
 206                u32 main_cst                         : 2;
 207                u32 post_cst                         : 2;
 208                u32 reserved_6_14                    : 9;
 209                u32 rx_ready                         : 1;
 210                u32 reserved_16_31                   : 16;
 211        } s;
 212        /* struct cgx_spu_br_train_rep_s_s cn; */
 213};
 214
 215/**
 216 * Structure cgx_spu_sds_cu_s
 217 *
 218 * INTERNAL: CGX Training Coeffiecient Structure  This structure is
 219 * similar to CGX_SPU_BR_TRAIN_CUP_S format, but with reserved fields
 220 * removed and [RCVR_READY] field added.
 221 */
 222union cgx_spu_sds_cu_s {
 223        u32 u;
 224        struct cgx_spu_sds_cu_s_s {
 225                u32 pre_cu                           : 2;
 226                u32 main_cu                          : 2;
 227                u32 post_cu                          : 2;
 228                u32 initialize                       : 1;
 229                u32 preset                           : 1;
 230                u32 rcvr_ready                       : 1;
 231                u32 reserved_9_31                    : 23;
 232        } s;
 233        /* struct cgx_spu_sds_cu_s_s cn; */
 234};
 235
 236/**
 237 * Structure cgx_spu_sds_skew_status_s
 238 *
 239 * CGX Skew Status Structure Provides receive skew information detected
 240 * for a physical SerDes lane when it is assigned to a multilane
 241 * LMAC/LPCS. Contents are valid when RX deskew is done for the
 242 * associated LMAC/LPCS.
 243 */
 244union cgx_spu_sds_skew_status_s {
 245        u32 u;
 246        struct cgx_spu_sds_skew_status_s_s {
 247                u32 am_timestamp                     : 12;
 248                u32 reserved_12_15                   : 4;
 249                u32 am_lane_id                       : 5;
 250                u32 reserved_21_22                   : 2;
 251                u32 lane_skew                        : 7;
 252                u32 reserved_30_31                   : 2;
 253        } s;
 254        /* struct cgx_spu_sds_skew_status_s_s cn; */
 255};
 256
 257/**
 258 * Structure cgx_spu_sds_sr_s
 259 *
 260 * INTERNAL: CGX Lane Training Coefficient Structure  Similar to
 261 * CGX_SPU_BR_TRAIN_REP_S format, but with reserved and RX ready fields
 262 * removed.
 263 */
 264union cgx_spu_sds_sr_s {
 265        u32 u;
 266        struct cgx_spu_sds_sr_s_s {
 267                u32 pre_status                       : 2;
 268                u32 main_status                      : 2;
 269                u32 post_status                      : 2;
 270                u32 reserved_6_31                    : 26;
 271        } s;
 272        /* struct cgx_spu_sds_sr_s_s cn; */
 273};
 274
 275/**
 276 * Register (RSL) cgx#_active_pc
 277 *
 278 * CGX ACTIVE PC Register This register counts the conditional clocks for
 279 * power management.
 280 */
 281union cgxx_active_pc {
 282        u64 u;
 283        struct cgxx_active_pc_s {
 284                u64 cnt                              : 64;
 285        } s;
 286        /* struct cgxx_active_pc_s cn; */
 287};
 288
 289static inline u64 CGXX_ACTIVE_PC(void)
 290        __attribute__ ((pure, always_inline));
 291static inline u64 CGXX_ACTIVE_PC(void)
 292{
 293        return 0x2010;
 294}
 295
 296/**
 297 * Register (RSL) cgx#_cmr#_activity
 298 *
 299 * CGX CMR Activity Registers
 300 */
 301union cgxx_cmrx_activity {
 302        u64 u;
 303        struct cgxx_cmrx_activity_s {
 304                u64 act_tx_lo                        : 1;
 305                u64 act_tx_hi                        : 1;
 306                u64 pause_tx                         : 1;
 307                u64 act_rx_lo                        : 1;
 308                u64 act_rx_hi                        : 1;
 309                u64 pause_rx                         : 1;
 310                u64 reserved_6_63                    : 58;
 311        } s;
 312        /* struct cgxx_cmrx_activity_s cn; */
 313};
 314
 315static inline u64 CGXX_CMRX_ACTIVITY(u64 a)
 316        __attribute__ ((pure, always_inline));
 317static inline u64 CGXX_CMRX_ACTIVITY(u64 a)
 318{
 319        return 0x5f8 + 0x40000 * a;
 320}
 321
 322/**
 323 * Register (RSL) cgx#_cmr#_config
 324 *
 325 * CGX CMR Configuration Registers Logical MAC/PCS configuration
 326 * registers; one per LMAC. The maximum number of LMACs (and maximum LMAC
 327 * ID) that can be enabled by these registers is limited by
 328 * CGX()_CMR_RX_LMACS[LMACS] and CGX()_CMR_TX_LMACS[LMACS].  Internal:
 329 * \<pre\> Example configurations:   ------------------------------------
 330 * ---------------------------------------   Configuration
 331 * LMACS  Register             [ENABLE]    [LMAC_TYPE]   ----------------
 332 * -----------------------------------------------------------
 333 * 1x50G+1x25G+1xSGMII     4      CGXn_CMR0_CONFIG     1           8
 334 * CGXn_CMR1_CONFIG     0           --
 335 * CGXn_CMR2_CONFIG     1           7
 336 * CGXn_CMR3_CONFIG     1           0   ---------------------------------
 337 * ------------------------------------------   USXGMII
 338 * 1-4    CGXn_CMR0_CONFIG     1           a
 339 * CGXn_CMR1_CONFIG     1           a
 340 * CGXn_CMR2_CONFIG     1           a
 341 * CGXn_CMR3_CONFIG     1           a   ---------------------------------
 342 * ------------------------------------------   1x100GBASE-R4           1
 343 * CGXn_CMR0_CONFIG     1           9
 344 * CGXn_CMR1_CONFIG     0           --
 345 * CGXn_CMR2_CONFIG     0           --
 346 * CGXn_CMR3_CONFIG     0           --   --------------------------------
 347 * -------------------------------------------   2x50GBASE-R2
 348 * 2      CGXn_CMR0_CONFIG     1           8
 349 * CGXn_CMR1_CONFIG     1           8
 350 * CGXn_CMR2_CONFIG     0           --
 351 * CGXn_CMR3_CONFIG     0           --   --------------------------------
 352 * -------------------------------------------   4x25GBASE-R
 353 * 4      CGXn_CMR0_CONFIG     1           7
 354 * CGXn_CMR1_CONFIG     1           7
 355 * CGXn_CMR2_CONFIG     1           7
 356 * CGXn_CMR3_CONFIG     1           7   ---------------------------------
 357 * ------------------------------------------   QSGMII                  4
 358 * CGXn_CMR0_CONFIG     1           6
 359 * CGXn_CMR1_CONFIG     1           6
 360 * CGXn_CMR2_CONFIG     1           6
 361 * CGXn_CMR3_CONFIG     1           6   ---------------------------------
 362 * ------------------------------------------   1x40GBASE-R4            1
 363 * CGXn_CMR0_CONFIG     1           4
 364 * CGXn_CMR1_CONFIG     0           --
 365 * CGXn_CMR2_CONFIG     0           --
 366 * CGXn_CMR3_CONFIG     0           --   --------------------------------
 367 * -------------------------------------------   4x10GBASE-R
 368 * 4      CGXn_CMR0_CONFIG     1           3
 369 * CGXn_CMR1_CONFIG     1           3
 370 * CGXn_CMR2_CONFIG     1           3
 371 * CGXn_CMR3_CONFIG     1           3   ---------------------------------
 372 * ------------------------------------------   2xRXAUI                 2
 373 * CGXn_CMR0_CONFIG     1           2
 374 * CGXn_CMR1_CONFIG     1           2
 375 * CGXn_CMR2_CONFIG     0           --
 376 * CGXn_CMR3_CONFIG     0           --   --------------------------------
 377 * -------------------------------------------   1x10GBASE-X/XAUI/DXAUI
 378 * 1      CGXn_CMR0_CONFIG     1           1
 379 * CGXn_CMR1_CONFIG     0           --
 380 * CGXn_CMR2_CONFIG     0           --
 381 * CGXn_CMR3_CONFIG     0           --   --------------------------------
 382 * -------------------------------------------   4xSGMII/1000BASE-X
 383 * 4      CGXn_CMR0_CONFIG     1           0
 384 * CGXn_CMR1_CONFIG     1           0
 385 * CGXn_CMR2_CONFIG     1           0
 386 * CGXn_CMR3_CONFIG     1           0   ---------------------------------
 387 * ------------------------------------------ \</pre\>
 388 */
 389union cgxx_cmrx_config {
 390        u64 u;
 391        struct cgxx_cmrx_config_s {
 392                u64 lane_to_sds                      : 8;
 393                u64 reserved_8_39                    : 32;
 394                u64 lmac_type                        : 4;
 395                u64 unused                           : 8;
 396                u64 int_beat_gen                     : 1;
 397                u64 data_pkt_tx_en                   : 1;
 398                u64 data_pkt_rx_en                   : 1;
 399                u64 enable                           : 1;
 400                u64 x2p_select                       : 3;
 401                u64 p2x_select                       : 3;
 402                u64 reserved_62_63                   : 2;
 403        } s;
 404        /* struct cgxx_cmrx_config_s cn; */
 405};
 406
 407static inline u64 CGXX_CMRX_CONFIG(u64 a)
 408        __attribute__ ((pure, always_inline));
 409static inline u64 CGXX_CMRX_CONFIG(u64 a)
 410{
 411        return 0 + 0x40000 * a;
 412}
 413
 414/**
 415 * Register (RSL) cgx#_cmr#_int
 416 *
 417 * CGX CMR Interrupt Register
 418 */
 419union cgxx_cmrx_int {
 420        u64 u;
 421        struct cgxx_cmrx_int_s {
 422                u64 pause_drp                        : 1;
 423                u64 overflw                          : 1;
 424                u64 nic_nxc                          : 1;
 425                u64 nix0_nxc                         : 1;
 426                u64 nix1_nxc                         : 1;
 427                u64 nix0_e_nxc                       : 1;
 428                u64 nix1_e_nxc                       : 1;
 429                u64 reserved_7_63                    : 57;
 430        } s;
 431        /* struct cgxx_cmrx_int_s cn; */
 432};
 433
 434static inline u64 CGXX_CMRX_INT(u64 a)
 435        __attribute__ ((pure, always_inline));
 436static inline u64 CGXX_CMRX_INT(u64 a)
 437{
 438        return 0x40 + 0x40000 * a;
 439}
 440
 441/**
 442 * Register (RSL) cgx#_cmr#_int_ena_w1c
 443 *
 444 * CGX CMR Interrupt Enable Clear Register This register clears interrupt
 445 * enable bits.
 446 */
 447union cgxx_cmrx_int_ena_w1c {
 448        u64 u;
 449        struct cgxx_cmrx_int_ena_w1c_s {
 450                u64 pause_drp                        : 1;
 451                u64 overflw                          : 1;
 452                u64 nic_nxc                          : 1;
 453                u64 nix0_nxc                         : 1;
 454                u64 nix1_nxc                         : 1;
 455                u64 nix0_e_nxc                       : 1;
 456                u64 nix1_e_nxc                       : 1;
 457                u64 reserved_7_63                    : 57;
 458        } s;
 459        /* struct cgxx_cmrx_int_ena_w1c_s cn; */
 460};
 461
 462static inline u64 CGXX_CMRX_INT_ENA_W1C(u64 a)
 463        __attribute__ ((pure, always_inline));
 464static inline u64 CGXX_CMRX_INT_ENA_W1C(u64 a)
 465{
 466        return 0x50 + 0x40000 * a;
 467}
 468
 469/**
 470 * Register (RSL) cgx#_cmr#_int_ena_w1s
 471 *
 472 * CGX CMR Interrupt Enable Set Register This register sets interrupt
 473 * enable bits.
 474 */
 475union cgxx_cmrx_int_ena_w1s {
 476        u64 u;
 477        struct cgxx_cmrx_int_ena_w1s_s {
 478                u64 pause_drp                        : 1;
 479                u64 overflw                          : 1;
 480                u64 nic_nxc                          : 1;
 481                u64 nix0_nxc                         : 1;
 482                u64 nix1_nxc                         : 1;
 483                u64 nix0_e_nxc                       : 1;
 484                u64 nix1_e_nxc                       : 1;
 485                u64 reserved_7_63                    : 57;
 486        } s;
 487        /* struct cgxx_cmrx_int_ena_w1s_s cn; */
 488};
 489
 490static inline u64 CGXX_CMRX_INT_ENA_W1S(u64 a)
 491        __attribute__ ((pure, always_inline));
 492static inline u64 CGXX_CMRX_INT_ENA_W1S(u64 a)
 493{
 494        return 0x58 + 0x40000 * a;
 495}
 496
 497/**
 498 * Register (RSL) cgx#_cmr#_int_w1s
 499 *
 500 * CGX CMR Interrupt Set Register This register sets interrupt bits.
 501 */
 502union cgxx_cmrx_int_w1s {
 503        u64 u;
 504        struct cgxx_cmrx_int_w1s_s {
 505                u64 pause_drp                        : 1;
 506                u64 overflw                          : 1;
 507                u64 nic_nxc                          : 1;
 508                u64 nix0_nxc                         : 1;
 509                u64 nix1_nxc                         : 1;
 510                u64 nix0_e_nxc                       : 1;
 511                u64 nix1_e_nxc                       : 1;
 512                u64 reserved_7_63                    : 57;
 513        } s;
 514        /* struct cgxx_cmrx_int_w1s_s cn; */
 515};
 516
 517static inline u64 CGXX_CMRX_INT_W1S(u64 a)
 518        __attribute__ ((pure, always_inline));
 519static inline u64 CGXX_CMRX_INT_W1S(u64 a)
 520{
 521        return 0x48 + 0x40000 * a;
 522}
 523
 524/**
 525 * Register (RSL) cgx#_cmr#_led_timing
 526 *
 527 * CGX MAC LED Activity Timing Registers
 528 */
 529union cgxx_cmrx_led_timing {
 530        u64 u;
 531        struct cgxx_cmrx_led_timing_s {
 532                u64 extension                        : 8;
 533                u64 reserved_8_63                    : 56;
 534        } s;
 535        /* struct cgxx_cmrx_led_timing_s cn; */
 536};
 537
 538static inline u64 CGXX_CMRX_LED_TIMING(u64 a)
 539        __attribute__ ((pure, always_inline));
 540static inline u64 CGXX_CMRX_LED_TIMING(u64 a)
 541{
 542        return 0x5f0 + 0x40000 * a;
 543}
 544
 545/**
 546 * Register (RSL) cgx#_cmr#_prt_cbfc_ctl
 547 *
 548 * CGX CMR LMAC PFC Control Registers See CGX()_CMR()_RX_LOGL_XOFF[XOFF].
 549 */
 550union cgxx_cmrx_prt_cbfc_ctl {
 551        u64 u;
 552        struct cgxx_cmrx_prt_cbfc_ctl_s {
 553                u64 reserved_0_15                    : 16;
 554                u64 phys_bp                          : 16;
 555                u64 reserved_32_63                   : 32;
 556        } s;
 557        /* struct cgxx_cmrx_prt_cbfc_ctl_s cn; */
 558};
 559
 560static inline u64 CGXX_CMRX_PRT_CBFC_CTL(u64 a)
 561        __attribute__ ((pure, always_inline));
 562static inline u64 CGXX_CMRX_PRT_CBFC_CTL(u64 a)
 563{
 564        return 0x608 + 0x40000 * a;
 565}
 566
 567/**
 568 * Register (RSL) cgx#_cmr#_rx_bp_drop
 569 *
 570 * CGX Receive Backpressure Drop Register
 571 */
 572union cgxx_cmrx_rx_bp_drop {
 573        u64 u;
 574        struct cgxx_cmrx_rx_bp_drop_s {
 575                u64 mark                             : 7;
 576                u64 reserved_7_63                    : 57;
 577        } s;
 578        /* struct cgxx_cmrx_rx_bp_drop_s cn; */
 579};
 580
 581static inline u64 CGXX_CMRX_RX_BP_DROP(u64 a)
 582        __attribute__ ((pure, always_inline));
 583static inline u64 CGXX_CMRX_RX_BP_DROP(u64 a)
 584{
 585        return 0xd8 + 0x40000 * a;
 586}
 587
 588/**
 589 * Register (RSL) cgx#_cmr#_rx_bp_off
 590 *
 591 * CGX Receive Backpressure Off Register
 592 */
 593union cgxx_cmrx_rx_bp_off {
 594        u64 u;
 595        struct cgxx_cmrx_rx_bp_off_s {
 596                u64 mark                             : 7;
 597                u64 reserved_7_63                    : 57;
 598        } s;
 599        /* struct cgxx_cmrx_rx_bp_off_s cn; */
 600};
 601
 602static inline u64 CGXX_CMRX_RX_BP_OFF(u64 a)
 603        __attribute__ ((pure, always_inline));
 604static inline u64 CGXX_CMRX_RX_BP_OFF(u64 a)
 605{
 606        return 0xe8 + 0x40000 * a;
 607}
 608
 609/**
 610 * Register (RSL) cgx#_cmr#_rx_bp_on
 611 *
 612 * CGX Receive Backpressure On Register
 613 */
 614union cgxx_cmrx_rx_bp_on {
 615        u64 u;
 616        struct cgxx_cmrx_rx_bp_on_s {
 617                u64 mark                             : 13;
 618                u64 reserved_13_63                   : 51;
 619        } s;
 620        /* struct cgxx_cmrx_rx_bp_on_s cn; */
 621};
 622
 623static inline u64 CGXX_CMRX_RX_BP_ON(u64 a)
 624        __attribute__ ((pure, always_inline));
 625static inline u64 CGXX_CMRX_RX_BP_ON(u64 a)
 626{
 627        return 0xe0 + 0x40000 * a;
 628}
 629
 630/**
 631 * Register (RSL) cgx#_cmr#_rx_bp_status
 632 *
 633 * CGX CMR Receive Backpressure Status Registers
 634 */
 635union cgxx_cmrx_rx_bp_status {
 636        u64 u;
 637        struct cgxx_cmrx_rx_bp_status_s {
 638                u64 bp                               : 1;
 639                u64 reserved_1_63                    : 63;
 640        } s;
 641        /* struct cgxx_cmrx_rx_bp_status_s cn; */
 642};
 643
 644static inline u64 CGXX_CMRX_RX_BP_STATUS(u64 a)
 645        __attribute__ ((pure, always_inline));
 646static inline u64 CGXX_CMRX_RX_BP_STATUS(u64 a)
 647{
 648        return 0xf0 + 0x40000 * a;
 649}
 650
 651/**
 652 * Register (RSL) cgx#_cmr#_rx_dmac_ctl0
 653 *
 654 * CGX CMR Receive DMAC Address-Control0 Register DMAC CAM control
 655 * register for use by X2P/NIX bound traffic. Received packets are only
 656 * passed to X2P/NIX when the DMAC0 filter result is ACCEPT and STEERING0
 657 * filter result is PASS. See also CGX()_CMR_RX_DMAC()_CAM0 and
 658 * CGX()_CMR_RX_STEERING0().  Internal: "* ALGORITHM Here is some pseudo
 659 * code that represents the address filter behavior. \<pre\>
 660 * dmac_addr_filter(uint8 prt, uint48 dmac) { for (lmac=0, lmac\<4,
 661 * lmac++) {   if (is_bcst(dmac))                               //
 662 * broadcast accept     return (CGX()_CMR(lmac)_RX_DMAC_CTL0[BCST_ACCEPT]
 663 * ? ACCEPT : REJECT);   if (is_mcst(dmac) &&
 664 * CGX()_CMR(lmac)_RX_DMAC_CTL0[MCST_MODE] == 0)   // multicast reject
 665 * return REJECT;   if (is_mcst(dmac) &&
 666 * CGX()_CMR(lmac)_RX_DMAC_CTL0[MCST_MODE] == 1)   // multicast accept
 667 * return ACCEPT;   else        // DMAC CAM filter     cam_hit = 0;   for
 668 * (i=0; i\<32; i++) {     cam = CGX()_CMR_RX_DMAC(i)_CAM0;     if
 669 * (cam[EN] && cam[ID] == lmac && cam[ADR] == dmac) {       cam_hit = 1;
 670 * break;     }   }   if (cam_hit) {     return
 671 * (CGX()_CMR(lmac)_RX_DMAC_CTL0[CAM_ACCEPT] ? ACCEPT : REJECT);   else
 672 * return (CGX()_CMR(lmac)_RX_DMAC_CTL0[CAM_ACCEPT] ? REJECT : ACCEPT);
 673 * } } \</pre\>"
 674 */
 675union cgxx_cmrx_rx_dmac_ctl0 {
 676        u64 u;
 677        struct cgxx_cmrx_rx_dmac_ctl0_s {
 678                u64 bcst_accept                      : 1;
 679                u64 mcst_mode                        : 2;
 680                u64 cam_accept                       : 1;
 681                u64 reserved_4_63                    : 60;
 682        } s;
 683        /* struct cgxx_cmrx_rx_dmac_ctl0_s cn; */
 684};
 685
 686static inline u64 CGXX_CMRX_RX_DMAC_CTL0(u64 a)
 687        __attribute__ ((pure, always_inline));
 688static inline u64 CGXX_CMRX_RX_DMAC_CTL0(u64 a)
 689{
 690        return 0x1f8 + 0x40000 * a;
 691}
 692
 693/**
 694 * Register (RSL) cgx#_cmr#_rx_dmac_ctl1
 695 *
 696 * CGX CMR Receive DMAC Address-Control1 Register DMAC CAM control
 697 * register for use by NCSI bound traffic. Received packets are only
 698 * passed to NCSI when the DMAC1 filter result is ACCEPT and STEERING1
 699 * filter result is PASS. See also CGX()_CMR_RX_DMAC()_CAM1 and
 700 * CGX()_CMR_RX_STEERING1(). For use with the LMAC associated with NCSI;
 701 * see CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID].  Internal: ALGORITHM: See
 702 * CGX()_CMR()_RX_DMAC_CTL0.
 703 */
 704union cgxx_cmrx_rx_dmac_ctl1 {
 705        u64 u;
 706        struct cgxx_cmrx_rx_dmac_ctl1_s {
 707                u64 bcst_accept                      : 1;
 708                u64 mcst_mode                        : 2;
 709                u64 cam_accept                       : 1;
 710                u64 reserved_4_63                    : 60;
 711        } s;
 712        /* struct cgxx_cmrx_rx_dmac_ctl1_s cn; */
 713};
 714
 715static inline u64 CGXX_CMRX_RX_DMAC_CTL1(u64 a)
 716        __attribute__ ((pure, always_inline));
 717static inline u64 CGXX_CMRX_RX_DMAC_CTL1(u64 a)
 718{
 719        return 0x3f8 + 0x40000 * a;
 720}
 721
 722/**
 723 * Register (RSL) cgx#_cmr#_rx_fifo_len
 724 *
 725 * CGX CMR Receive Fifo Length Registers
 726 */
 727union cgxx_cmrx_rx_fifo_len {
 728        u64 u;
 729        struct cgxx_cmrx_rx_fifo_len_s {
 730                u64 fifo_len                         : 14;
 731                u64 busy                             : 1;
 732                u64 fifo_len_e                       : 14;
 733                u64 busy_e                           : 1;
 734                u64 reserved_30_63                   : 34;
 735        } s;
 736        /* struct cgxx_cmrx_rx_fifo_len_s cn; */
 737};
 738
 739static inline u64 CGXX_CMRX_RX_FIFO_LEN(u64 a)
 740        __attribute__ ((pure, always_inline));
 741static inline u64 CGXX_CMRX_RX_FIFO_LEN(u64 a)
 742{
 743        return 0x108 + 0x40000 * a;
 744}
 745
 746/**
 747 * Register (RSL) cgx#_cmr#_rx_id_map
 748 *
 749 * CGX CMR Receive ID Map Register These registers set the RX LMAC ID
 750 * mapping for X2P/NIX.
 751 */
 752union cgxx_cmrx_rx_id_map {
 753        u64 u;
 754        struct cgxx_cmrx_rx_id_map_s {
 755                u64 pknd                             : 6;
 756                u64 unused                           : 2;
 757                u64 rid                              : 7;
 758                u64 reserved_15_63                   : 49;
 759        } s;
 760        /* struct cgxx_cmrx_rx_id_map_s cn; */
 761};
 762
 763static inline u64 CGXX_CMRX_RX_ID_MAP(u64 a)
 764        __attribute__ ((pure, always_inline));
 765static inline u64 CGXX_CMRX_RX_ID_MAP(u64 a)
 766{
 767        return 0x60 + 0x40000 * a;
 768}
 769
 770/**
 771 * Register (RSL) cgx#_cmr#_rx_logl_xoff
 772 *
 773 * CGX CMR Receive Logical XOFF Registers
 774 */
 775union cgxx_cmrx_rx_logl_xoff {
 776        u64 u;
 777        struct cgxx_cmrx_rx_logl_xoff_s {
 778                u64 xoff                             : 16;
 779                u64 reserved_16_63                   : 48;
 780        } s;
 781        /* struct cgxx_cmrx_rx_logl_xoff_s cn; */
 782};
 783
 784static inline u64 CGXX_CMRX_RX_LOGL_XOFF(u64 a)
 785        __attribute__ ((pure, always_inline));
 786static inline u64 CGXX_CMRX_RX_LOGL_XOFF(u64 a)
 787{
 788        return 0xf8 + 0x40000 * a;
 789}
 790
 791/**
 792 * Register (RSL) cgx#_cmr#_rx_logl_xon
 793 *
 794 * CGX CMR Receive Logical XON Registers
 795 */
 796union cgxx_cmrx_rx_logl_xon {
 797        u64 u;
 798        struct cgxx_cmrx_rx_logl_xon_s {
 799                u64 xon                              : 16;
 800                u64 reserved_16_63                   : 48;
 801        } s;
 802        /* struct cgxx_cmrx_rx_logl_xon_s cn; */
 803};
 804
 805static inline u64 CGXX_CMRX_RX_LOGL_XON(u64 a)
 806        __attribute__ ((pure, always_inline));
 807static inline u64 CGXX_CMRX_RX_LOGL_XON(u64 a)
 808{
 809        return 0x100 + 0x40000 * a;
 810}
 811
 812/**
 813 * Register (RSL) cgx#_cmr#_rx_merge_stat0
 814 *
 815 * CGX RX Preemption Status Register 0
 816 */
 817union cgxx_cmrx_rx_merge_stat0 {
 818        u64 u;
 819        struct cgxx_cmrx_rx_merge_stat0_s {
 820                u64 fa_err_cnt                       : 48;
 821                u64 reserved_48_63                   : 16;
 822        } s;
 823        /* struct cgxx_cmrx_rx_merge_stat0_s cn; */
 824};
 825
 826static inline u64 CGXX_CMRX_RX_MERGE_STAT0(u64 a)
 827        __attribute__ ((pure, always_inline));
 828static inline u64 CGXX_CMRX_RX_MERGE_STAT0(u64 a)
 829{
 830        return 0x138 + 0x40000 * a;
 831}
 832
 833/**
 834 * Register (RSL) cgx#_cmr#_rx_merge_stat1
 835 *
 836 * CGX RX Preemption Status Register 1
 837 */
 838union cgxx_cmrx_rx_merge_stat1 {
 839        u64 u;
 840        struct cgxx_cmrx_rx_merge_stat1_s {
 841                u64 fs_err_cnt                       : 48;
 842                u64 reserved_48_63                   : 16;
 843        } s;
 844        /* struct cgxx_cmrx_rx_merge_stat1_s cn; */
 845};
 846
 847static inline u64 CGXX_CMRX_RX_MERGE_STAT1(u64 a)
 848        __attribute__ ((pure, always_inline));
 849static inline u64 CGXX_CMRX_RX_MERGE_STAT1(u64 a)
 850{
 851        return 0x140 + 0x40000 * a;
 852}
 853
 854/**
 855 * Register (RSL) cgx#_cmr#_rx_merge_stat2
 856 *
 857 * CGX RX Preemption Status Register 2
 858 */
 859union cgxx_cmrx_rx_merge_stat2 {
 860        u64 u;
 861        struct cgxx_cmrx_rx_merge_stat2_s {
 862                u64 fa_ok_cnt                        : 48;
 863                u64 reserved_48_63                   : 16;
 864        } s;
 865        /* struct cgxx_cmrx_rx_merge_stat2_s cn; */
 866};
 867
 868static inline u64 CGXX_CMRX_RX_MERGE_STAT2(u64 a)
 869        __attribute__ ((pure, always_inline));
 870static inline u64 CGXX_CMRX_RX_MERGE_STAT2(u64 a)
 871{
 872        return 0x148 + 0x40000 * a;
 873}
 874
 875/**
 876 * Register (RSL) cgx#_cmr#_rx_merge_stat3
 877 *
 878 * CGX RX Preemption Status Register 3
 879 */
 880union cgxx_cmrx_rx_merge_stat3 {
 881        u64 u;
 882        struct cgxx_cmrx_rx_merge_stat3_s {
 883                u64 ff_cnt                           : 48;
 884                u64 reserved_48_63                   : 16;
 885        } s;
 886        /* struct cgxx_cmrx_rx_merge_stat3_s cn; */
 887};
 888
 889static inline u64 CGXX_CMRX_RX_MERGE_STAT3(u64 a)
 890        __attribute__ ((pure, always_inline));
 891static inline u64 CGXX_CMRX_RX_MERGE_STAT3(u64 a)
 892{
 893        return 0x150 + 0x40000 * a;
 894}
 895
 896/**
 897 * Register (RSL) cgx#_cmr#_rx_merge_stat4
 898 *
 899 * CGX RX Preemption Status Register 4
 900 */
 901union cgxx_cmrx_rx_merge_stat4 {
 902        u64 u;
 903        struct cgxx_cmrx_rx_merge_stat4_s {
 904                u64 cnt                              : 48;
 905                u64 reserved_48_63                   : 16;
 906        } s;
 907        /* struct cgxx_cmrx_rx_merge_stat4_s cn; */
 908};
 909
 910static inline u64 CGXX_CMRX_RX_MERGE_STAT4(u64 a)
 911        __attribute__ ((pure, always_inline));
 912static inline u64 CGXX_CMRX_RX_MERGE_STAT4(u64 a)
 913{
 914        return 0x158 + 0x40000 * a;
 915}
 916
 917/**
 918 * Register (RSL) cgx#_cmr#_rx_pause_drop_time
 919 *
 920 * CGX CMR Receive Pause Drop-Time Register
 921 */
 922union cgxx_cmrx_rx_pause_drop_time {
 923        u64 u;
 924        struct cgxx_cmrx_rx_pause_drop_time_s {
 925                u64 pause_time                       : 16;
 926                u64 pause_time_e                     : 16;
 927                u64 reserved_32_63                   : 32;
 928        } s;
 929        /* struct cgxx_cmrx_rx_pause_drop_time_s cn; */
 930};
 931
 932static inline u64 CGXX_CMRX_RX_PAUSE_DROP_TIME(u64 a)
 933        __attribute__ ((pure, always_inline));
 934static inline u64 CGXX_CMRX_RX_PAUSE_DROP_TIME(u64 a)
 935{
 936        return 0x68 + 0x40000 * a;
 937}
 938
 939/**
 940 * Register (RSL) cgx#_cmr#_rx_stat0
 941 *
 942 * CGX Receive Status Register 0 These registers provide a count of
 943 * received packets that meet the following conditions: * are not
 944 * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
 945 * packets. * are not dropped due FIFO full status. * are not dropped due
 946 * DMAC0 or STEERING0 filtering.  Internal: "This pseudo code represents
 947 * the RX STAT0 through STAT8 accounting: \<pre\> If (errored)   incr
 948 * RX_STAT8 else if (ctrl packet, i.e. Pause/PFC)   incr RX_STAT2,3 else
 949 * if (fifo full drop)   incr RX_STAT6,7 else if (DMAC0/VLAN0 filter
 950 * drop)   incr RX_STAT4,5 if not a filter+decision else   incr
 951 * RX_STAT0,1 end \</pre\>"
 952 */
 953union cgxx_cmrx_rx_stat0 {
 954        u64 u;
 955        struct cgxx_cmrx_rx_stat0_s {
 956                u64 cnt                              : 48;
 957                u64 reserved_48_63                   : 16;
 958        } s;
 959        /* struct cgxx_cmrx_rx_stat0_s cn; */
 960};
 961
 962static inline u64 CGXX_CMRX_RX_STAT0(u64 a)
 963        __attribute__ ((pure, always_inline));
 964static inline u64 CGXX_CMRX_RX_STAT0(u64 a)
 965{
 966        return 0x70 + 0x40000 * a;
 967}
 968
 969/**
 970 * Register (RSL) cgx#_cmr#_rx_stat1
 971 *
 972 * CGX Receive Status Register 1 These registers provide a count of
 973 * octets of received packets.
 974 */
 975union cgxx_cmrx_rx_stat1 {
 976        u64 u;
 977        struct cgxx_cmrx_rx_stat1_s {
 978                u64 cnt                              : 48;
 979                u64 reserved_48_63                   : 16;
 980        } s;
 981        /* struct cgxx_cmrx_rx_stat1_s cn; */
 982};
 983
 984static inline u64 CGXX_CMRX_RX_STAT1(u64 a)
 985        __attribute__ ((pure, always_inline));
 986static inline u64 CGXX_CMRX_RX_STAT1(u64 a)
 987{
 988        return 0x78 + 0x40000 * a;
 989}
 990
 991/**
 992 * Register (RSL) cgx#_cmr#_rx_stat2
 993 *
 994 * CGX Receive Status Register 2 These registers provide a count of
 995 * received packets that meet the following conditions: * are not
 996 * recognized as ERROR packets(any OPCODE). * are recognized as PAUSE
 997 * packets.  Pause packets can be optionally dropped or forwarded based
 998 * on
 999 * CGX()_SMU()_RX_FRM_CTL[CTL_DRP]/CGX()_GMP_GMI_RX()_FRM_CTL[CTL_DRP].
1000 * This count increments regardless of whether the packet is dropped.
1001 */
1002union cgxx_cmrx_rx_stat2 {
1003        u64 u;
1004        struct cgxx_cmrx_rx_stat2_s {
1005                u64 cnt                              : 48;
1006                u64 reserved_48_63                   : 16;
1007        } s;
1008        /* struct cgxx_cmrx_rx_stat2_s cn; */
1009};
1010
1011static inline u64 CGXX_CMRX_RX_STAT2(u64 a)
1012        __attribute__ ((pure, always_inline));
1013static inline u64 CGXX_CMRX_RX_STAT2(u64 a)
1014{
1015        return 0x80 + 0x40000 * a;
1016}
1017
1018/**
1019 * Register (RSL) cgx#_cmr#_rx_stat3
1020 *
1021 * CGX Receive Status Register 3 These registers provide a count of
1022 * octets of received PAUSE and control packets.
1023 */
1024union cgxx_cmrx_rx_stat3 {
1025        u64 u;
1026        struct cgxx_cmrx_rx_stat3_s {
1027                u64 cnt                              : 48;
1028                u64 reserved_48_63                   : 16;
1029        } s;
1030        /* struct cgxx_cmrx_rx_stat3_s cn; */
1031};
1032
1033static inline u64 CGXX_CMRX_RX_STAT3(u64 a)
1034        __attribute__ ((pure, always_inline));
1035static inline u64 CGXX_CMRX_RX_STAT3(u64 a)
1036{
1037        return 0x88 + 0x40000 * a;
1038}
1039
1040/**
1041 * Register (RSL) cgx#_cmr#_rx_stat4
1042 *
1043 * CGX Receive Status Register 4 These registers provide a count of
1044 * received packets that meet the following conditions: * are not
1045 * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
1046 * packets. * are not dropped due FIFO full status. * are dropped due
1047 * DMAC0 or STEERING0 filtering.  16B packets or smaller (20B in case of
1048 * FCS strip) as the result of truncation or other means are not dropped
1049 * by CGX (unless filter and decision is also asserted) and will never
1050 * appear in this count. Should the MAC signal to the CMR that the packet
1051 * be filtered upon decision before the end of packet, then STAT4 and
1052 * STAT5 will not be updated.
1053 */
1054union cgxx_cmrx_rx_stat4 {
1055        u64 u;
1056        struct cgxx_cmrx_rx_stat4_s {
1057                u64 cnt                              : 48;
1058                u64 reserved_48_63                   : 16;
1059        } s;
1060        /* struct cgxx_cmrx_rx_stat4_s cn; */
1061};
1062
1063static inline u64 CGXX_CMRX_RX_STAT4(u64 a)
1064        __attribute__ ((pure, always_inline));
1065static inline u64 CGXX_CMRX_RX_STAT4(u64 a)
1066{
1067        return 0x90 + 0x40000 * a;
1068}
1069
1070/**
1071 * Register (RSL) cgx#_cmr#_rx_stat5
1072 *
1073 * CGX Receive Status Register 5 These registers provide a count of
1074 * octets of filtered DMAC0 or VLAN STEERING0 packets.
1075 */
1076union cgxx_cmrx_rx_stat5 {
1077        u64 u;
1078        struct cgxx_cmrx_rx_stat5_s {
1079                u64 cnt                              : 48;
1080                u64 reserved_48_63                   : 16;
1081        } s;
1082        /* struct cgxx_cmrx_rx_stat5_s cn; */
1083};
1084
1085static inline u64 CGXX_CMRX_RX_STAT5(u64 a)
1086        __attribute__ ((pure, always_inline));
1087static inline u64 CGXX_CMRX_RX_STAT5(u64 a)
1088{
1089        return 0x98 + 0x40000 * a;
1090}
1091
1092/**
1093 * Register (RSL) cgx#_cmr#_rx_stat6
1094 *
1095 * CGX Receive Status Register 6 These registers provide a count of
1096 * received packets that meet the following conditions: * are not
1097 * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
1098 * packets. * are dropped due FIFO full status.  They do not count any
1099 * packet that is truncated at the point of overflow and sent on to the
1100 * NIX. The truncated packet will be marked with error and increment
1101 * STAT8. These registers count all entire packets dropped by the FIFO
1102 * for a given LMAC.
1103 */
1104union cgxx_cmrx_rx_stat6 {
1105        u64 u;
1106        struct cgxx_cmrx_rx_stat6_s {
1107                u64 cnt                              : 48;
1108                u64 reserved_48_63                   : 16;
1109        } s;
1110        /* struct cgxx_cmrx_rx_stat6_s cn; */
1111};
1112
1113static inline u64 CGXX_CMRX_RX_STAT6(u64 a)
1114        __attribute__ ((pure, always_inline));
1115static inline u64 CGXX_CMRX_RX_STAT6(u64 a)
1116{
1117        return 0xa0 + 0x40000 * a;
1118}
1119
1120/**
1121 * Register (RSL) cgx#_cmr#_rx_stat7
1122 *
1123 * CGX Receive Status Register 7 These registers provide a count of
1124 * octets of received packets that were dropped due to a full receive
1125 * FIFO.
1126 */
1127union cgxx_cmrx_rx_stat7 {
1128        u64 u;
1129        struct cgxx_cmrx_rx_stat7_s {
1130                u64 cnt                              : 48;
1131                u64 reserved_48_63                   : 16;
1132        } s;
1133        /* struct cgxx_cmrx_rx_stat7_s cn; */
1134};
1135
1136static inline u64 CGXX_CMRX_RX_STAT7(u64 a)
1137        __attribute__ ((pure, always_inline));
1138static inline u64 CGXX_CMRX_RX_STAT7(u64 a)
1139{
1140        return 0xa8 + 0x40000 * a;
1141}
1142
1143/**
1144 * Register (RSL) cgx#_cmr#_rx_stat8
1145 *
1146 * CGX Receive Status Register 8 These registers provide a count of
1147 * received packets that meet the following conditions:  * are recognized
1148 * as ERROR packets(any OPCODE).
1149 */
1150union cgxx_cmrx_rx_stat8 {
1151        u64 u;
1152        struct cgxx_cmrx_rx_stat8_s {
1153                u64 cnt                              : 48;
1154                u64 reserved_48_63                   : 16;
1155        } s;
1156        /* struct cgxx_cmrx_rx_stat8_s cn; */
1157};
1158
1159static inline u64 CGXX_CMRX_RX_STAT8(u64 a)
1160        __attribute__ ((pure, always_inline));
1161static inline u64 CGXX_CMRX_RX_STAT8(u64 a)
1162{
1163        return 0xb0 + 0x40000 * a;
1164}
1165
1166/**
1167 * Register (RSL) cgx#_cmr#_rx_stat_pri#_xoff
1168 *
1169 * CGX CMR RX XON to XOFF transition Registers
1170 */
1171union cgxx_cmrx_rx_stat_prix_xoff {
1172        u64 u;
1173        struct cgxx_cmrx_rx_stat_prix_xoff_s {
1174                u64 cnt                              : 48;
1175                u64 reserved_48_63                   : 16;
1176        } s;
1177        /* struct cgxx_cmrx_rx_stat_prix_xoff_s cn; */
1178};
1179
1180static inline u64 CGXX_CMRX_RX_STAT_PRIX_XOFF(u64 a, u64 b)
1181        __attribute__ ((pure, always_inline));
1182static inline u64 CGXX_CMRX_RX_STAT_PRIX_XOFF(u64 a, u64 b)
1183{
1184        return 0x7c0 + 0x40000 * a + 8 * b;
1185}
1186
1187/**
1188 * Register (RSL) cgx#_cmr#_scratch#
1189 *
1190 * CGX CMR Scratch Registers
1191 */
1192union cgxx_cmrx_scratchx {
1193        u64 u;
1194        struct cgxx_cmrx_scratchx_s {
1195                u64 scratch                          : 64;
1196        } s;
1197        /* struct cgxx_cmrx_scratchx_s cn; */
1198};
1199
1200static inline u64 CGXX_CMRX_SCRATCHX(u64 a, u64 b)
1201        __attribute__ ((pure, always_inline));
1202static inline u64 CGXX_CMRX_SCRATCHX(u64 a, u64 b)
1203{
1204        return 0x1050 + 0x40000 * a + 8 * b;
1205}
1206
1207/**
1208 * Register (RSL) cgx#_cmr#_sw_int
1209 *
1210 * CGX CMR Interrupt Register
1211 */
1212union cgxx_cmrx_sw_int {
1213        u64 u;
1214        struct cgxx_cmrx_sw_int_s {
1215                u64 sw_set                           : 1;
1216                u64 reserved_1_63                    : 63;
1217        } s;
1218        /* struct cgxx_cmrx_sw_int_s cn; */
1219};
1220
1221static inline u64 CGXX_CMRX_SW_INT(u64 a)
1222        __attribute__ ((pure, always_inline));
1223static inline u64 CGXX_CMRX_SW_INT(u64 a)
1224{
1225        return 0x180 + 0x40000 * a;
1226}
1227
1228/**
1229 * Register (RSL) cgx#_cmr#_sw_int_ena_w1c
1230 *
1231 * CGX CMR Interrupt Enable Clear Register This register clears interrupt
1232 * enable bits.
1233 */
1234union cgxx_cmrx_sw_int_ena_w1c {
1235        u64 u;
1236        struct cgxx_cmrx_sw_int_ena_w1c_s {
1237                u64 sw_set                           : 1;
1238                u64 reserved_1_63                    : 63;
1239        } s;
1240        /* struct cgxx_cmrx_sw_int_ena_w1c_s cn; */
1241};
1242
1243static inline u64 CGXX_CMRX_SW_INT_ENA_W1C(u64 a)
1244        __attribute__ ((pure, always_inline));
1245static inline u64 CGXX_CMRX_SW_INT_ENA_W1C(u64 a)
1246{
1247        return 0x190 + 0x40000 * a;
1248}
1249
1250/**
1251 * Register (RSL) cgx#_cmr#_sw_int_ena_w1s
1252 *
1253 * CGX CMR Interrupt Enable Set Register This register sets interrupt
1254 * enable bits.
1255 */
1256union cgxx_cmrx_sw_int_ena_w1s {
1257        u64 u;
1258        struct cgxx_cmrx_sw_int_ena_w1s_s {
1259                u64 sw_set                           : 1;
1260                u64 reserved_1_63                    : 63;
1261        } s;
1262        /* struct cgxx_cmrx_sw_int_ena_w1s_s cn; */
1263};
1264
1265static inline u64 CGXX_CMRX_SW_INT_ENA_W1S(u64 a)
1266        __attribute__ ((pure, always_inline));
1267static inline u64 CGXX_CMRX_SW_INT_ENA_W1S(u64 a)
1268{
1269        return 0x198 + 0x40000 * a;
1270}
1271
1272/**
1273 * Register (RSL) cgx#_cmr#_sw_int_w1s
1274 *
1275 * CGX CMR Interrupt Set Register This register sets interrupt bits.
1276 */
1277union cgxx_cmrx_sw_int_w1s {
1278        u64 u;
1279        struct cgxx_cmrx_sw_int_w1s_s {
1280                u64 sw_set                           : 1;
1281                u64 reserved_1_63                    : 63;
1282        } s;
1283        /* struct cgxx_cmrx_sw_int_w1s_s cn; */
1284};
1285
1286static inline u64 CGXX_CMRX_SW_INT_W1S(u64 a)
1287        __attribute__ ((pure, always_inline));
1288static inline u64 CGXX_CMRX_SW_INT_W1S(u64 a)
1289{
1290        return 0x188 + 0x40000 * a;
1291}
1292
1293/**
1294 * Register (RSL) cgx#_cmr#_tx_channel
1295 *
1296 * CGX CMR Transmit-Channels Registers
1297 */
1298union cgxx_cmrx_tx_channel {
1299        u64 u;
1300        struct cgxx_cmrx_tx_channel_s {
1301                u64 msk                              : 16;
1302                u64 reserved_16_63                   : 48;
1303        } s;
1304        /* struct cgxx_cmrx_tx_channel_s cn; */
1305};
1306
1307static inline u64 CGXX_CMRX_TX_CHANNEL(u64 a)
1308        __attribute__ ((pure, always_inline));
1309static inline u64 CGXX_CMRX_TX_CHANNEL(u64 a)
1310{
1311        return 0x600 + 0x40000 * a;
1312}
1313
1314/**
1315 * Register (RSL) cgx#_cmr#_tx_fifo_len
1316 *
1317 * CGX CMR Transmit Fifo Length Registers
1318 */
1319union cgxx_cmrx_tx_fifo_len {
1320        u64 u;
1321        struct cgxx_cmrx_tx_fifo_len_s {
1322                u64 fifo_len                         : 14;
1323                u64 lmac_idle                        : 1;
1324                u64 fifo_e_len                       : 14;
1325                u64 lmac_e_idle                      : 1;
1326                u64 reserved_30_63                   : 34;
1327        } s;
1328        /* struct cgxx_cmrx_tx_fifo_len_s cn; */
1329};
1330
1331static inline u64 CGXX_CMRX_TX_FIFO_LEN(u64 a)
1332        __attribute__ ((pure, always_inline));
1333static inline u64 CGXX_CMRX_TX_FIFO_LEN(u64 a)
1334{
1335        return 0x618 + 0x40000 * a;
1336}
1337
1338/**
1339 * Register (RSL) cgx#_cmr#_tx_hg2_status
1340 *
1341 * CGX CMR Transmit HiGig2 Status Registers
1342 */
1343union cgxx_cmrx_tx_hg2_status {
1344        u64 u;
1345        struct cgxx_cmrx_tx_hg2_status_s {
1346                u64 lgtim2go                         : 16;
1347                u64 xof                              : 16;
1348                u64 reserved_32_63                   : 32;
1349        } s;
1350        /* struct cgxx_cmrx_tx_hg2_status_s cn; */
1351};
1352
1353static inline u64 CGXX_CMRX_TX_HG2_STATUS(u64 a)
1354        __attribute__ ((pure, always_inline));
1355static inline u64 CGXX_CMRX_TX_HG2_STATUS(u64 a)
1356{
1357        return 0x610 + 0x40000 * a;
1358}
1359
1360/**
1361 * Register (RSL) cgx#_cmr#_tx_merge_stat0
1362 *
1363 * CGX TX Preemption Status Register 0
1364 */
1365union cgxx_cmrx_tx_merge_stat0 {
1366        u64 u;
1367        struct cgxx_cmrx_tx_merge_stat0_s {
1368                u64 ff_cnt                           : 48;
1369                u64 reserved_48_63                   : 16;
1370        } s;
1371        /* struct cgxx_cmrx_tx_merge_stat0_s cn; */
1372};
1373
1374static inline u64 CGXX_CMRX_TX_MERGE_STAT0(u64 a)
1375        __attribute__ ((pure, always_inline));
1376static inline u64 CGXX_CMRX_TX_MERGE_STAT0(u64 a)
1377{
1378        return 0x160 + 0x40000 * a;
1379}
1380
1381/**
1382 * Register (RSL) cgx#_cmr#_tx_ovr_bp
1383 *
1384 * CGX CMR Transmit-Channels Backpressure Override Registers
1385 */
1386union cgxx_cmrx_tx_ovr_bp {
1387        u64 u;
1388        struct cgxx_cmrx_tx_ovr_bp_s {
1389                u64 tx_chan_bp                       : 16;
1390                u64 reserved_16_63                   : 48;
1391        } s;
1392        /* struct cgxx_cmrx_tx_ovr_bp_s cn; */
1393};
1394
1395static inline u64 CGXX_CMRX_TX_OVR_BP(u64 a)
1396        __attribute__ ((pure, always_inline));
1397static inline u64 CGXX_CMRX_TX_OVR_BP(u64 a)
1398{
1399        return 0x620 + 0x40000 * a;
1400}
1401
1402/**
1403 * Register (RSL) cgx#_cmr#_tx_stat0
1404 *
1405 * CGX CMR Transmit Statistics Registers 0
1406 */
1407union cgxx_cmrx_tx_stat0 {
1408        u64 u;
1409        struct cgxx_cmrx_tx_stat0_s {
1410                u64 xscol                            : 48;
1411                u64 reserved_48_63                   : 16;
1412        } s;
1413        /* struct cgxx_cmrx_tx_stat0_s cn; */
1414};
1415
1416static inline u64 CGXX_CMRX_TX_STAT0(u64 a)
1417        __attribute__ ((pure, always_inline));
1418static inline u64 CGXX_CMRX_TX_STAT0(u64 a)
1419{
1420        return 0x700 + 0x40000 * a;
1421}
1422
1423/**
1424 * Register (RSL) cgx#_cmr#_tx_stat1
1425 *
1426 * CGX CMR Transmit Statistics Registers 1
1427 */
1428union cgxx_cmrx_tx_stat1 {
1429        u64 u;
1430        struct cgxx_cmrx_tx_stat1_s {
1431                u64 xsdef                            : 48;
1432                u64 reserved_48_63                   : 16;
1433        } s;
1434        /* struct cgxx_cmrx_tx_stat1_s cn; */
1435};
1436
1437static inline u64 CGXX_CMRX_TX_STAT1(u64 a)
1438        __attribute__ ((pure, always_inline));
1439static inline u64 CGXX_CMRX_TX_STAT1(u64 a)
1440{
1441        return 0x708 + 0x40000 * a;
1442}
1443
1444/**
1445 * Register (RSL) cgx#_cmr#_tx_stat10
1446 *
1447 * CGX CMR Transmit Statistics Registers 10
1448 */
1449union cgxx_cmrx_tx_stat10 {
1450        u64 u;
1451        struct cgxx_cmrx_tx_stat10_s {
1452                u64 hist4                            : 48;
1453                u64 reserved_48_63                   : 16;
1454        } s;
1455        /* struct cgxx_cmrx_tx_stat10_s cn; */
1456};
1457
1458static inline u64 CGXX_CMRX_TX_STAT10(u64 a)
1459        __attribute__ ((pure, always_inline));
1460static inline u64 CGXX_CMRX_TX_STAT10(u64 a)
1461{
1462        return 0x750 + 0x40000 * a;
1463}
1464
1465/**
1466 * Register (RSL) cgx#_cmr#_tx_stat11
1467 *
1468 * CGX CMR Transmit Statistics Registers 11
1469 */
1470union cgxx_cmrx_tx_stat11 {
1471        u64 u;
1472        struct cgxx_cmrx_tx_stat11_s {
1473                u64 hist5                            : 48;
1474                u64 reserved_48_63                   : 16;
1475        } s;
1476        /* struct cgxx_cmrx_tx_stat11_s cn; */
1477};
1478
1479static inline u64 CGXX_CMRX_TX_STAT11(u64 a)
1480        __attribute__ ((pure, always_inline));
1481static inline u64 CGXX_CMRX_TX_STAT11(u64 a)
1482{
1483        return 0x758 + 0x40000 * a;
1484}
1485
1486/**
1487 * Register (RSL) cgx#_cmr#_tx_stat12
1488 *
1489 * CGX CMR Transmit Statistics Registers 12
1490 */
1491union cgxx_cmrx_tx_stat12 {
1492        u64 u;
1493        struct cgxx_cmrx_tx_stat12_s {
1494                u64 hist6                            : 48;
1495                u64 reserved_48_63                   : 16;
1496        } s;
1497        /* struct cgxx_cmrx_tx_stat12_s cn; */
1498};
1499
1500static inline u64 CGXX_CMRX_TX_STAT12(u64 a)
1501        __attribute__ ((pure, always_inline));
1502static inline u64 CGXX_CMRX_TX_STAT12(u64 a)
1503{
1504        return 0x760 + 0x40000 * a;
1505}
1506
1507/**
1508 * Register (RSL) cgx#_cmr#_tx_stat13
1509 *
1510 * CGX CMR Transmit Statistics Registers 13
1511 */
1512union cgxx_cmrx_tx_stat13 {
1513        u64 u;
1514        struct cgxx_cmrx_tx_stat13_s {
1515                u64 hist7                            : 48;
1516                u64 reserved_48_63                   : 16;
1517        } s;
1518        /* struct cgxx_cmrx_tx_stat13_s cn; */
1519};
1520
1521static inline u64 CGXX_CMRX_TX_STAT13(u64 a)
1522        __attribute__ ((pure, always_inline));
1523static inline u64 CGXX_CMRX_TX_STAT13(u64 a)
1524{
1525        return 0x768 + 0x40000 * a;
1526}
1527
1528/**
1529 * Register (RSL) cgx#_cmr#_tx_stat14
1530 *
1531 * CGX CMR Transmit Statistics Registers 14
1532 */
1533union cgxx_cmrx_tx_stat14 {
1534        u64 u;
1535        struct cgxx_cmrx_tx_stat14_s {
1536                u64 bcst                             : 48;
1537                u64 reserved_48_63                   : 16;
1538        } s;
1539        /* struct cgxx_cmrx_tx_stat14_s cn; */
1540};
1541
1542static inline u64 CGXX_CMRX_TX_STAT14(u64 a)
1543        __attribute__ ((pure, always_inline));
1544static inline u64 CGXX_CMRX_TX_STAT14(u64 a)
1545{
1546        return 0x770 + 0x40000 * a;
1547}
1548
1549/**
1550 * Register (RSL) cgx#_cmr#_tx_stat15
1551 *
1552 * CGX CMR Transmit Statistics Registers 15
1553 */
1554union cgxx_cmrx_tx_stat15 {
1555        u64 u;
1556        struct cgxx_cmrx_tx_stat15_s {
1557                u64 mcst                             : 48;
1558                u64 reserved_48_63                   : 16;
1559        } s;
1560        /* struct cgxx_cmrx_tx_stat15_s cn; */
1561};
1562
1563static inline u64 CGXX_CMRX_TX_STAT15(u64 a)
1564        __attribute__ ((pure, always_inline));
1565static inline u64 CGXX_CMRX_TX_STAT15(u64 a)
1566{
1567        return 0x778 + 0x40000 * a;
1568}
1569
1570/**
1571 * Register (RSL) cgx#_cmr#_tx_stat16
1572 *
1573 * CGX CMR Transmit Statistics Registers 16
1574 */
1575union cgxx_cmrx_tx_stat16 {
1576        u64 u;
1577        struct cgxx_cmrx_tx_stat16_s {
1578                u64 undflw                           : 48;
1579                u64 reserved_48_63                   : 16;
1580        } s;
1581        /* struct cgxx_cmrx_tx_stat16_s cn; */
1582};
1583
1584static inline u64 CGXX_CMRX_TX_STAT16(u64 a)
1585        __attribute__ ((pure, always_inline));
1586static inline u64 CGXX_CMRX_TX_STAT16(u64 a)
1587{
1588        return 0x780 + 0x40000 * a;
1589}
1590
1591/**
1592 * Register (RSL) cgx#_cmr#_tx_stat17
1593 *
1594 * CGX CMR Transmit Statistics Registers 17
1595 */
1596union cgxx_cmrx_tx_stat17 {
1597        u64 u;
1598        struct cgxx_cmrx_tx_stat17_s {
1599                u64 ctl                              : 48;
1600                u64 reserved_48_63                   : 16;
1601        } s;
1602        /* struct cgxx_cmrx_tx_stat17_s cn; */
1603};
1604
1605static inline u64 CGXX_CMRX_TX_STAT17(u64 a)
1606        __attribute__ ((pure, always_inline));
1607static inline u64 CGXX_CMRX_TX_STAT17(u64 a)
1608{
1609        return 0x788 + 0x40000 * a;
1610}
1611
1612/**
1613 * Register (RSL) cgx#_cmr#_tx_stat2
1614 *
1615 * CGX CMR Transmit Statistics Registers 2
1616 */
1617union cgxx_cmrx_tx_stat2 {
1618        u64 u;
1619        struct cgxx_cmrx_tx_stat2_s {
1620                u64 mcol                             : 48;
1621                u64 reserved_48_63                   : 16;
1622        } s;
1623        /* struct cgxx_cmrx_tx_stat2_s cn; */
1624};
1625
1626static inline u64 CGXX_CMRX_TX_STAT2(u64 a)
1627        __attribute__ ((pure, always_inline));
1628static inline u64 CGXX_CMRX_TX_STAT2(u64 a)
1629{
1630        return 0x710 + 0x40000 * a;
1631}
1632
1633/**
1634 * Register (RSL) cgx#_cmr#_tx_stat3
1635 *
1636 * CGX CMR Transmit Statistics Registers 3
1637 */
1638union cgxx_cmrx_tx_stat3 {
1639        u64 u;
1640        struct cgxx_cmrx_tx_stat3_s {
1641                u64 scol                             : 48;
1642                u64 reserved_48_63                   : 16;
1643        } s;
1644        /* struct cgxx_cmrx_tx_stat3_s cn; */
1645};
1646
1647static inline u64 CGXX_CMRX_TX_STAT3(u64 a)
1648        __attribute__ ((pure, always_inline));
1649static inline u64 CGXX_CMRX_TX_STAT3(u64 a)
1650{
1651        return 0x718 + 0x40000 * a;
1652}
1653
1654/**
1655 * Register (RSL) cgx#_cmr#_tx_stat4
1656 *
1657 * CGX CMR Transmit Statistics Registers 4
1658 */
1659union cgxx_cmrx_tx_stat4 {
1660        u64 u;
1661        struct cgxx_cmrx_tx_stat4_s {
1662                u64 octs                             : 48;
1663                u64 reserved_48_63                   : 16;
1664        } s;
1665        /* struct cgxx_cmrx_tx_stat4_s cn; */
1666};
1667
1668static inline u64 CGXX_CMRX_TX_STAT4(u64 a)
1669        __attribute__ ((pure, always_inline));
1670static inline u64 CGXX_CMRX_TX_STAT4(u64 a)
1671{
1672        return 0x720 + 0x40000 * a;
1673}
1674
1675/**
1676 * Register (RSL) cgx#_cmr#_tx_stat5
1677 *
1678 * CGX CMR Transmit Statistics Registers 5
1679 */
1680union cgxx_cmrx_tx_stat5 {
1681        u64 u;
1682        struct cgxx_cmrx_tx_stat5_s {
1683                u64 pkts                             : 48;
1684                u64 reserved_48_63                   : 16;
1685        } s;
1686        /* struct cgxx_cmrx_tx_stat5_s cn; */
1687};
1688
1689static inline u64 CGXX_CMRX_TX_STAT5(u64 a)
1690        __attribute__ ((pure, always_inline));
1691static inline u64 CGXX_CMRX_TX_STAT5(u64 a)
1692{
1693        return 0x728 + 0x40000 * a;
1694}
1695
1696/**
1697 * Register (RSL) cgx#_cmr#_tx_stat6
1698 *
1699 * CGX CMR Transmit Statistics Registers 6
1700 */
1701union cgxx_cmrx_tx_stat6 {
1702        u64 u;
1703        struct cgxx_cmrx_tx_stat6_s {
1704                u64 hist0                            : 48;
1705                u64 reserved_48_63                   : 16;
1706        } s;
1707        /* struct cgxx_cmrx_tx_stat6_s cn; */
1708};
1709
1710static inline u64 CGXX_CMRX_TX_STAT6(u64 a)
1711        __attribute__ ((pure, always_inline));
1712static inline u64 CGXX_CMRX_TX_STAT6(u64 a)
1713{
1714        return 0x730 + 0x40000 * a;
1715}
1716
1717/**
1718 * Register (RSL) cgx#_cmr#_tx_stat7
1719 *
1720 * CGX CMR Transmit Statistics Registers 7
1721 */
1722union cgxx_cmrx_tx_stat7 {
1723        u64 u;
1724        struct cgxx_cmrx_tx_stat7_s {
1725                u64 hist1                            : 48;
1726                u64 reserved_48_63                   : 16;
1727        } s;
1728        /* struct cgxx_cmrx_tx_stat7_s cn; */
1729};
1730
1731static inline u64 CGXX_CMRX_TX_STAT7(u64 a)
1732        __attribute__ ((pure, always_inline));
1733static inline u64 CGXX_CMRX_TX_STAT7(u64 a)
1734{
1735        return 0x738 + 0x40000 * a;
1736}
1737
1738/**
1739 * Register (RSL) cgx#_cmr#_tx_stat8
1740 *
1741 * CGX CMR Transmit Statistics Registers 8
1742 */
1743union cgxx_cmrx_tx_stat8 {
1744        u64 u;
1745        struct cgxx_cmrx_tx_stat8_s {
1746                u64 hist2                            : 48;
1747                u64 reserved_48_63                   : 16;
1748        } s;
1749        /* struct cgxx_cmrx_tx_stat8_s cn; */
1750};
1751
1752static inline u64 CGXX_CMRX_TX_STAT8(u64 a)
1753        __attribute__ ((pure, always_inline));
1754static inline u64 CGXX_CMRX_TX_STAT8(u64 a)
1755{
1756        return 0x740 + 0x40000 * a;
1757}
1758
1759/**
1760 * Register (RSL) cgx#_cmr#_tx_stat9
1761 *
1762 * CGX CMR Transmit Statistics Registers 9
1763 */
1764union cgxx_cmrx_tx_stat9 {
1765        u64 u;
1766        struct cgxx_cmrx_tx_stat9_s {
1767                u64 hist3                            : 48;
1768                u64 reserved_48_63                   : 16;
1769        } s;
1770        /* struct cgxx_cmrx_tx_stat9_s cn; */
1771};
1772
1773static inline u64 CGXX_CMRX_TX_STAT9(u64 a)
1774        __attribute__ ((pure, always_inline));
1775static inline u64 CGXX_CMRX_TX_STAT9(u64 a)
1776{
1777        return 0x748 + 0x40000 * a;
1778}
1779
1780/**
1781 * Register (RSL) cgx#_cmr#_tx_stat_pri#_xoff
1782 *
1783 * CGX CMR TX XON to XOFF transition Registers
1784 */
1785union cgxx_cmrx_tx_stat_prix_xoff {
1786        u64 u;
1787        struct cgxx_cmrx_tx_stat_prix_xoff_s {
1788                u64 cnt                              : 48;
1789                u64 reserved_48_63                   : 16;
1790        } s;
1791        /* struct cgxx_cmrx_tx_stat_prix_xoff_s cn; */
1792};
1793
1794static inline u64 CGXX_CMRX_TX_STAT_PRIX_XOFF(u64 a, u64 b)
1795        __attribute__ ((pure, always_inline));
1796static inline u64 CGXX_CMRX_TX_STAT_PRIX_XOFF(u64 a, u64 b)
1797{
1798        return 0x800 + 0x40000 * a + 8 * b;
1799}
1800
1801/**
1802 * Register (RSL) cgx#_cmr_bad
1803 *
1804 * CGX CMR Bad Registers
1805 */
1806union cgxx_cmr_bad {
1807        u64 u;
1808        struct cgxx_cmr_bad_s {
1809                u64 rxb_nxl                          : 1;
1810                u64 reserved_1_63                    : 63;
1811        } s;
1812        /* struct cgxx_cmr_bad_s cn; */
1813};
1814
1815static inline u64 CGXX_CMR_BAD(void)
1816        __attribute__ ((pure, always_inline));
1817static inline u64 CGXX_CMR_BAD(void)
1818{
1819        return 0x1020;
1820}
1821
1822/**
1823 * Register (RSL) cgx#_cmr_chan_msk_and
1824 *
1825 * CGX CMR Backpressure Channel Mask AND Registers
1826 */
1827union cgxx_cmr_chan_msk_and {
1828        u64 u;
1829        struct cgxx_cmr_chan_msk_and_s {
1830                u64 msk_and                          : 64;
1831        } s;
1832        /* struct cgxx_cmr_chan_msk_and_s cn; */
1833};
1834
1835static inline u64 CGXX_CMR_CHAN_MSK_AND(void)
1836        __attribute__ ((pure, always_inline));
1837static inline u64 CGXX_CMR_CHAN_MSK_AND(void)
1838{
1839        return 0x110;
1840}
1841
1842/**
1843 * Register (RSL) cgx#_cmr_chan_msk_or
1844 *
1845 * CGX Backpressure Channel Mask OR Registers
1846 */
1847union cgxx_cmr_chan_msk_or {
1848        u64 u;
1849        struct cgxx_cmr_chan_msk_or_s {
1850                u64 msk_or                           : 64;
1851        } s;
1852        /* struct cgxx_cmr_chan_msk_or_s cn; */
1853};
1854
1855static inline u64 CGXX_CMR_CHAN_MSK_OR(void)
1856        __attribute__ ((pure, always_inline));
1857static inline u64 CGXX_CMR_CHAN_MSK_OR(void)
1858{
1859        return 0x118;
1860}
1861
1862/**
1863 * Register (RSL) cgx#_cmr_eco
1864 *
1865 * INTERNAL: CGX ECO Registers
1866 */
1867union cgxx_cmr_eco {
1868        u64 u;
1869        struct cgxx_cmr_eco_s {
1870                u64 eco_rw                           : 32;
1871                u64 eco_ro                           : 32;
1872        } s;
1873        /* struct cgxx_cmr_eco_s cn; */
1874};
1875
1876static inline u64 CGXX_CMR_ECO(void)
1877        __attribute__ ((pure, always_inline));
1878static inline u64 CGXX_CMR_ECO(void)
1879{
1880        return 0x1028;
1881}
1882
1883/**
1884 * Register (RSL) cgx#_cmr_global_config
1885 *
1886 * CGX CMR Global Configuration Register These registers configure the
1887 * global CMR, PCS, and MAC.
1888 */
1889union cgxx_cmr_global_config {
1890        u64 u;
1891        struct cgxx_cmr_global_config_s {
1892                u64 pmux_sds_sel                     : 1;
1893                u64 cgx_clk_enable                   : 1;
1894                u64 cmr_x2p_reset                    : 3;
1895                u64 interleave_mode                  : 1;
1896                u64 fcs_strip                        : 1;
1897                u64 ncsi_lmac_id                     : 2;
1898                u64 cmr_ncsi_drop                    : 1;
1899                u64 cmr_ncsi_reset                   : 1;
1900                u64 cmr_ncsi_tag_cnt                 : 13;
1901                u64 cmr_clken_ovrd                   : 1;
1902                u64 reserved_25_63                   : 39;
1903        } s;
1904        /* struct cgxx_cmr_global_config_s cn; */
1905};
1906
1907static inline u64 CGXX_CMR_GLOBAL_CONFIG(void)
1908        __attribute__ ((pure, always_inline));
1909static inline u64 CGXX_CMR_GLOBAL_CONFIG(void)
1910{
1911        return 8;
1912}
1913
1914/**
1915 * Register (RSL) cgx#_cmr_mem_int
1916 *
1917 * CGX CMR Memory Interrupt Register
1918 */
1919union cgxx_cmr_mem_int {
1920        u64 u;
1921        struct cgxx_cmr_mem_int_s {
1922                u64 gmp_in_overfl                    : 1;
1923                u64 smu_in_overfl                    : 1;
1924                u64 reserved_2_63                    : 62;
1925        } s;
1926        /* struct cgxx_cmr_mem_int_s cn; */
1927};
1928
1929static inline u64 CGXX_CMR_MEM_INT(void)
1930        __attribute__ ((pure, always_inline));
1931static inline u64 CGXX_CMR_MEM_INT(void)
1932{
1933        return 0x10;
1934}
1935
1936/**
1937 * Register (RSL) cgx#_cmr_mem_int_ena_w1c
1938 *
1939 * CGX CMR Memory Interrupt Enable Clear Register This register clears
1940 * interrupt enable bits.
1941 */
1942union cgxx_cmr_mem_int_ena_w1c {
1943        u64 u;
1944        struct cgxx_cmr_mem_int_ena_w1c_s {
1945                u64 gmp_in_overfl                    : 1;
1946                u64 smu_in_overfl                    : 1;
1947                u64 reserved_2_63                    : 62;
1948        } s;
1949        /* struct cgxx_cmr_mem_int_ena_w1c_s cn; */
1950};
1951
1952static inline u64 CGXX_CMR_MEM_INT_ENA_W1C(void)
1953        __attribute__ ((pure, always_inline));
1954static inline u64 CGXX_CMR_MEM_INT_ENA_W1C(void)
1955{
1956        return 0x20;
1957}
1958
1959/**
1960 * Register (RSL) cgx#_cmr_mem_int_ena_w1s
1961 *
1962 * CGX CMR Memory Interrupt Enable Set Register This register sets
1963 * interrupt enable bits.
1964 */
1965union cgxx_cmr_mem_int_ena_w1s {
1966        u64 u;
1967        struct cgxx_cmr_mem_int_ena_w1s_s {
1968                u64 gmp_in_overfl                    : 1;
1969                u64 smu_in_overfl                    : 1;
1970                u64 reserved_2_63                    : 62;
1971        } s;
1972        /* struct cgxx_cmr_mem_int_ena_w1s_s cn; */
1973};
1974
1975static inline u64 CGXX_CMR_MEM_INT_ENA_W1S(void)
1976        __attribute__ ((pure, always_inline));
1977static inline u64 CGXX_CMR_MEM_INT_ENA_W1S(void)
1978{
1979        return 0x28;
1980}
1981
1982/**
1983 * Register (RSL) cgx#_cmr_mem_int_w1s
1984 *
1985 * CGX CMR Memory Interrupt Set Register This register sets interrupt
1986 * bits.
1987 */
1988union cgxx_cmr_mem_int_w1s {
1989        u64 u;
1990        struct cgxx_cmr_mem_int_w1s_s {
1991                u64 gmp_in_overfl                    : 1;
1992                u64 smu_in_overfl                    : 1;
1993                u64 reserved_2_63                    : 62;
1994        } s;
1995        /* struct cgxx_cmr_mem_int_w1s_s cn; */
1996};
1997
1998static inline u64 CGXX_CMR_MEM_INT_W1S(void)
1999        __attribute__ ((pure, always_inline));
2000static inline u64 CGXX_CMR_MEM_INT_W1S(void)
2001{
2002        return 0x18;
2003}
2004
2005/**
2006 * Register (RSL) cgx#_cmr_nic_nxc_adr
2007 *
2008 * CGX CMR NIC NXC Exception Registers
2009 */
2010union cgxx_cmr_nic_nxc_adr {
2011        u64 u;
2012        struct cgxx_cmr_nic_nxc_adr_s {
2013                u64 channel                          : 12;
2014                u64 lmac_id                          : 4;
2015                u64 reserved_16_63                   : 48;
2016        } s;
2017        /* struct cgxx_cmr_nic_nxc_adr_s cn; */
2018};
2019
2020static inline u64 CGXX_CMR_NIC_NXC_ADR(void)
2021        __attribute__ ((pure, always_inline));
2022static inline u64 CGXX_CMR_NIC_NXC_ADR(void)
2023{
2024        return 0x1030;
2025}
2026
2027/**
2028 * Register (RSL) cgx#_cmr_nix0_nxc_adr
2029 *
2030 * CGX CMR NIX0 NXC Exception Registers
2031 */
2032union cgxx_cmr_nix0_nxc_adr {
2033        u64 u;
2034        struct cgxx_cmr_nix0_nxc_adr_s {
2035                u64 channel                          : 12;
2036                u64 lmac_id                          : 4;
2037                u64 channel_e                        : 12;
2038                u64 lmac_e_id                        : 4;
2039                u64 reserved_32_63                   : 32;
2040        } s;
2041        /* struct cgxx_cmr_nix0_nxc_adr_s cn; */
2042};
2043
2044static inline u64 CGXX_CMR_NIX0_NXC_ADR(void)
2045        __attribute__ ((pure, always_inline));
2046static inline u64 CGXX_CMR_NIX0_NXC_ADR(void)
2047{
2048        return 0x1038;
2049}
2050
2051/**
2052 * Register (RSL) cgx#_cmr_nix1_nxc_adr
2053 *
2054 * CGX CMR NIX1 NXC Exception Registers
2055 */
2056union cgxx_cmr_nix1_nxc_adr {
2057        u64 u;
2058        struct cgxx_cmr_nix1_nxc_adr_s {
2059                u64 channel                          : 12;
2060                u64 lmac_id                          : 4;
2061                u64 channel_e                        : 12;
2062                u64 lmac_e_id                        : 4;
2063                u64 reserved_32_63                   : 32;
2064        } s;
2065        /* struct cgxx_cmr_nix1_nxc_adr_s cn; */
2066};
2067
2068static inline u64 CGXX_CMR_NIX1_NXC_ADR(void)
2069        __attribute__ ((pure, always_inline));
2070static inline u64 CGXX_CMR_NIX1_NXC_ADR(void)
2071{
2072        return 0x1040;
2073}
2074
2075/**
2076 * Register (RSL) cgx#_cmr_p2x#_count
2077 *
2078 * CGX P2X Activity Register
2079 */
2080union cgxx_cmr_p2xx_count {
2081        u64 u;
2082        struct cgxx_cmr_p2xx_count_s {
2083                u64 p2x_cnt                          : 64;
2084        } s;
2085        /* struct cgxx_cmr_p2xx_count_s cn; */
2086};
2087
2088static inline u64 CGXX_CMR_P2XX_COUNT(u64 a)
2089        __attribute__ ((pure, always_inline));
2090static inline u64 CGXX_CMR_P2XX_COUNT(u64 a)
2091{
2092        return 0x168 + 0x1000 * a;
2093}
2094
2095/**
2096 * Register (RSL) cgx#_cmr_rx_dmac#_cam0
2097 *
2098 * CGX CMR Receive CAM Registers These registers provide access to the 32
2099 * DMAC CAM0 entries in CGX, for use by X2P/NIX bound traffic.
2100 */
2101union cgxx_cmr_rx_dmacx_cam0 {
2102        u64 u;
2103        struct cgxx_cmr_rx_dmacx_cam0_s {
2104                u64 adr                              : 48;
2105                u64 en                               : 1;
2106                u64 id                               : 2;
2107                u64 reserved_51_63                   : 13;
2108        } s;
2109        /* struct cgxx_cmr_rx_dmacx_cam0_s cn; */
2110};
2111
2112static inline u64 CGXX_CMR_RX_DMACX_CAM0(u64 a)
2113        __attribute__ ((pure, always_inline));
2114static inline u64 CGXX_CMR_RX_DMACX_CAM0(u64 a)
2115{
2116        return 0x200 + 8 * a;
2117}
2118
2119/**
2120 * Register (RSL) cgx#_cmr_rx_dmac#_cam1
2121 *
2122 * CGX CMR Receive CAM Registers These registers provide access to the 32
2123 * DMAC CAM entries in CGX for use by NCSI bound traffic. See
2124 * CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID] and CGX()_CMR_RX_STEERING1()
2125 * registers.
2126 */
2127union cgxx_cmr_rx_dmacx_cam1 {
2128        u64 u;
2129        struct cgxx_cmr_rx_dmacx_cam1_s {
2130                u64 adr                              : 48;
2131                u64 en                               : 1;
2132                u64 id                               : 2;
2133                u64 reserved_51_63                   : 13;
2134        } s;
2135        /* struct cgxx_cmr_rx_dmacx_cam1_s cn; */
2136};
2137
2138static inline u64 CGXX_CMR_RX_DMACX_CAM1(u64 a)
2139        __attribute__ ((pure, always_inline));
2140static inline u64 CGXX_CMR_RX_DMACX_CAM1(u64 a)
2141{
2142        return 0x400 + 8 * a;
2143}
2144
2145/**
2146 * Register (RSL) cgx#_cmr_rx_lmacs
2147 *
2148 * CGX CMR Receive Logical MACs Registers
2149 */
2150union cgxx_cmr_rx_lmacs {
2151        u64 u;
2152        struct cgxx_cmr_rx_lmacs_s {
2153                u64 lmacs                            : 3;
2154                u64 reserved_3_63                    : 61;
2155        } s;
2156        /* struct cgxx_cmr_rx_lmacs_s cn; */
2157};
2158
2159static inline u64 CGXX_CMR_RX_LMACS(void)
2160        __attribute__ ((pure, always_inline));
2161static inline u64 CGXX_CMR_RX_LMACS(void)
2162{
2163        return 0x128;
2164}
2165
2166/**
2167 * Register (RSL) cgx#_cmr_rx_ovr_bp
2168 *
2169 * CGX CMR Receive-Ports Backpressure Override Registers Per-LMAC
2170 * backpressure override register. For SMU, CGX()_CMR_RX_OVR_BP[EN]\<0\>
2171 * must be set to one and CGX()_CMR_RX_OVR_BP[BP]\<0\> must be cleared to
2172 * zero (to forcibly disable hardware-automatic 802.3 PAUSE packet
2173 * generation) with the HiGig2 Protocol when
2174 * CGX()_SMU()_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated
2175 * by CGX()_SMU()_TX_CTL[HG_EN]=1 and CGX()_SMU()_RX_UDD_SKP[LEN]=16).
2176 * Hardware can only auto-generate backpressure through HiGig2 messages
2177 * (optionally, when CGX()_SMU()_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2
2178 * protocol.
2179 */
2180union cgxx_cmr_rx_ovr_bp {
2181        u64 u;
2182        struct cgxx_cmr_rx_ovr_bp_s {
2183                u64 ign_fifo_bp                      : 4;
2184                u64 bp                               : 4;
2185                u64 en                               : 4;
2186                u64 reserved_12_63                   : 52;
2187        } s;
2188        /* struct cgxx_cmr_rx_ovr_bp_s cn; */
2189};
2190
2191static inline u64 CGXX_CMR_RX_OVR_BP(void)
2192        __attribute__ ((pure, always_inline));
2193static inline u64 CGXX_CMR_RX_OVR_BP(void)
2194{
2195        return 0x130;
2196}
2197
2198/**
2199 * Register (RSL) cgx#_cmr_rx_stat10
2200 *
2201 * CGX Receive Status Register 10 These registers provide a count of
2202 * octets of filtered DMAC1 or VLAN STEERING1 packets.
2203 */
2204union cgxx_cmr_rx_stat10 {
2205        u64 u;
2206        struct cgxx_cmr_rx_stat10_s {
2207                u64 cnt                              : 48;
2208                u64 reserved_48_63                   : 16;
2209        } s;
2210        /* struct cgxx_cmr_rx_stat10_s cn; */
2211};
2212
2213static inline u64 CGXX_CMR_RX_STAT10(void)
2214        __attribute__ ((pure, always_inline));
2215static inline u64 CGXX_CMR_RX_STAT10(void)
2216{
2217        return 0xc0;
2218}
2219
2220/**
2221 * Register (RSL) cgx#_cmr_rx_stat11
2222 *
2223 * CGX Receive Status Register 11 This registers provides a count of
2224 * packets dropped at the NCSI interface. This includes drops due to
2225 * CGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_DROP] or NCSI FIFO full. The count of
2226 * dropped NCSI packets is not accounted for in any other stats
2227 * registers.
2228 */
2229union cgxx_cmr_rx_stat11 {
2230        u64 u;
2231        struct cgxx_cmr_rx_stat11_s {
2232                u64 cnt                              : 48;
2233                u64 reserved_48_63                   : 16;
2234        } s;
2235        /* struct cgxx_cmr_rx_stat11_s cn; */
2236};
2237
2238static inline u64 CGXX_CMR_RX_STAT11(void)
2239        __attribute__ ((pure, always_inline));
2240static inline u64 CGXX_CMR_RX_STAT11(void)
2241{
2242        return 0xc8;
2243}
2244
2245/**
2246 * Register (RSL) cgx#_cmr_rx_stat12
2247 *
2248 * CGX Receive Status Register 12 This register provide a count of octets
2249 * of dropped at the NCSI interface.
2250 */
2251union cgxx_cmr_rx_stat12 {
2252        u64 u;
2253        struct cgxx_cmr_rx_stat12_s {
2254                u64 cnt                              : 48;
2255                u64 reserved_48_63                   : 16;
2256        } s;
2257        /* struct cgxx_cmr_rx_stat12_s cn; */
2258};
2259
2260static inline u64 CGXX_CMR_RX_STAT12(void)
2261        __attribute__ ((pure, always_inline));
2262static inline u64 CGXX_CMR_RX_STAT12(void)
2263{
2264        return 0xd0;
2265}
2266
2267/**
2268 * Register (RSL) cgx#_cmr_rx_stat9
2269 *
2270 * CGX Receive Status Register 9 These registers provide a count of all
2271 * received packets that were dropped by the DMAC1 or VLAN STEERING1
2272 * filter. Packets that are dropped by the DMAC1 or VLAN STEERING1
2273 * filters are counted here regardless of whether they were ERR packets,
2274 * but does not include those reported in CGX()_CMR()_RX_STAT6. 16B
2275 * packets or smaller (20B in case of FCS strip) as the result of
2276 * truncation or other means are not dropped by CGX (unless filter and
2277 * decision is also asserted) and will never appear in this count. Should
2278 * the MAC signal to the CMR that the packet be filtered upon decision
2279 * before the end of packet, then STAT9 and STAT10 will not be updated.
2280 */
2281union cgxx_cmr_rx_stat9 {
2282        u64 u;
2283        struct cgxx_cmr_rx_stat9_s {
2284                u64 cnt                              : 48;
2285                u64 reserved_48_63                   : 16;
2286        } s;
2287        /* struct cgxx_cmr_rx_stat9_s cn; */
2288};
2289
2290static inline u64 CGXX_CMR_RX_STAT9(void)
2291        __attribute__ ((pure, always_inline));
2292static inline u64 CGXX_CMR_RX_STAT9(void)
2293{
2294        return 0xb8;
2295}
2296
2297/**
2298 * Register (RSL) cgx#_cmr_rx_steering0#
2299 *
2300 * CGX CMR Receive Steering0 Registers These registers, along with
2301 * CGX()_CMR_RX_STEERING_VETYPE0(), provide eight filters for identifying
2302 * and steering receive traffic to X2P/NIX. Received packets are only
2303 * passed to X2P/NIX when the DMAC0 filter result is ACCEPT and STEERING0
2304 * filter result is PASS. See also CGX()_CMR()_RX_DMAC_CTL0.  Internal:
2305 * "* ALGORITHM \<pre\> rx_steering(uint48 pkt_dmac, uint16 pkt_etype,
2306 * uint16 pkt_vlan_id) {    for (int i = 0; i \< 8; i++) {       steer =
2307 * CGX()_CMR_RX_STEERING0(i);       vetype =
2308 * CGX()_CMR_RX_STEERING_VETYPE0(i);       if (steer[MCST_EN] ||
2309 * steer[DMAC_EN] || vetype[VLAN_EN] || vetype[VLAN_TAG_EN]) {
2310 * // Filter is enabled.          if (   (!steer[MCST_EN] ||
2311 * is_mcst(pkt_dmac))              && (!steer[DMAC_EN] || pkt_dmac ==
2312 * steer[DMAC])              && (!vetype[VLAN_EN] || pkt_vlan_id ==
2313 * vetype[VLAN_ID])              && (!vetype[VLAN_TAG_EN] || pkt_etype ==
2314 * vetype[VLAN_ETYPE]) )          {             // Filter match (all
2315 * enabled matching criteria are met).             return steer[PASS];
2316 * }       }    }    return CGX()_CMR_RX_STEERING_DEFAULT0[PASS]; // No
2317 * match } \</pre\>"
2318 */
2319union cgxx_cmr_rx_steering0x {
2320        u64 u;
2321        struct cgxx_cmr_rx_steering0x_s {
2322                u64 dmac                             : 48;
2323                u64 dmac_en                          : 1;
2324                u64 mcst_en                          : 1;
2325                u64 pass                             : 1;
2326                u64 reserved_51_63                   : 13;
2327        } s;
2328        /* struct cgxx_cmr_rx_steering0x_s cn; */
2329};
2330
2331static inline u64 CGXX_CMR_RX_STEERING0X(u64 a)
2332        __attribute__ ((pure, always_inline));
2333static inline u64 CGXX_CMR_RX_STEERING0X(u64 a)
2334{
2335        return 0x300 + 8 * a;
2336}
2337
2338/**
2339 * Register (RSL) cgx#_cmr_rx_steering1#
2340 *
2341 * CGX CMR Receive Steering1 Registers These registers, along with
2342 * CGX()_CMR_RX_STEERING_VETYPE1(), provide eight filters for identifying
2343 * and steering NCSI receive traffic. Received packets are only passed to
2344 * NCSI when the DMAC1 filter result is ACCEPT and STEERING1 filter
2345 * result is PASS. See also CGX()_CMR_RX_DMAC()_CAM1 and
2346 * CGX()_CMR_RX_STEERING1(). For use with the LMAC associated with NCSI.
2347 * See CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID].  Internal: ALGORITHM: See
2348 * CGX()_CMR_RX_STEERING0().
2349 */
2350union cgxx_cmr_rx_steering1x {
2351        u64 u;
2352        struct cgxx_cmr_rx_steering1x_s {
2353                u64 dmac                             : 48;
2354                u64 dmac_en                          : 1;
2355                u64 mcst_en                          : 1;
2356                u64 pass                             : 1;
2357                u64 reserved_51_63                   : 13;
2358        } s;
2359        /* struct cgxx_cmr_rx_steering1x_s cn; */
2360};
2361
2362static inline u64 CGXX_CMR_RX_STEERING1X(u64 a)
2363        __attribute__ ((pure, always_inline));
2364static inline u64 CGXX_CMR_RX_STEERING1X(u64 a)
2365{
2366        return 0x500 + 8 * a;
2367}
2368
2369/**
2370 * Register (RSL) cgx#_cmr_rx_steering_default0
2371 *
2372 * CGX CMR Receive Steering Default0 Destination Register For determining
2373 * destination of traffic that does not meet matching algorithm described
2374 * in registers CGX()_CMR_RX_STEERING0() and
2375 * CGX()_CMR_RX_STEERING_VETYPE0(). All 16B packets or smaller (20B in
2376 * case of FCS strip) as the result of truncation will steer to default
2377 * destination
2378 */
2379union cgxx_cmr_rx_steering_default0 {
2380        u64 u;
2381        struct cgxx_cmr_rx_steering_default0_s {
2382                u64 pass                             : 1;
2383                u64 reserved_1_63                    : 63;
2384        } s;
2385        /* struct cgxx_cmr_rx_steering_default0_s cn; */
2386};
2387
2388static inline u64 CGXX_CMR_RX_STEERING_DEFAULT0(void)
2389        __attribute__ ((pure, always_inline));
2390static inline u64 CGXX_CMR_RX_STEERING_DEFAULT0(void)
2391{
2392        return 0x3f0;
2393}
2394
2395/**
2396 * Register (RSL) cgx#_cmr_rx_steering_default1
2397 *
2398 * CGX CMR Receive Steering Default1 Destination Register For use with
2399 * the lmac_id associated with NCSI. See
2400 * CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. For determining destination of
2401 * traffic that does not meet matching algorithm described in registers
2402 * CGX()_CMR_RX_STEERING1() and CGX()_CMR_RX_STEERING_VETYPE1(). All 16B
2403 * packets or smaller (20B in case of FCS strip) as the result of
2404 * truncation will steer to default destination
2405 */
2406union cgxx_cmr_rx_steering_default1 {
2407        u64 u;
2408        struct cgxx_cmr_rx_steering_default1_s {
2409                u64 pass                             : 1;
2410                u64 reserved_1_63                    : 63;
2411        } s;
2412        /* struct cgxx_cmr_rx_steering_default1_s cn; */
2413};
2414
2415static inline u64 CGXX_CMR_RX_STEERING_DEFAULT1(void)
2416        __attribute__ ((pure, always_inline));
2417static inline u64 CGXX_CMR_RX_STEERING_DEFAULT1(void)
2418{
2419        return 0x5e0;
2420}
2421
2422/**
2423 * Register (RSL) cgx#_cmr_rx_steering_vetype0#
2424 *
2425 * CGX CMR Receive VLAN Ethertype1 Register These registers, along with
2426 * CGX()_CMR_RX_STEERING0(), provide eight filters for identifying and
2427 * steering X2P/NIX receive traffic.
2428 */
2429union cgxx_cmr_rx_steering_vetype0x {
2430        u64 u;
2431        struct cgxx_cmr_rx_steering_vetype0x_s {
2432                u64 vlan_etype                       : 16;
2433                u64 vlan_tag_en                      : 1;
2434                u64 vlan_id                          : 12;
2435                u64 vlan_en                          : 1;
2436                u64 reserved_30_63                   : 34;
2437        } s;
2438        /* struct cgxx_cmr_rx_steering_vetype0x_s cn; */
2439};
2440
2441static inline u64 CGXX_CMR_RX_STEERING_VETYPE0X(u64 a)
2442        __attribute__ ((pure, always_inline));
2443static inline u64 CGXX_CMR_RX_STEERING_VETYPE0X(u64 a)
2444{
2445        return 0x380 + 8 * a;
2446}
2447
2448/**
2449 * Register (RSL) cgx#_cmr_rx_steering_vetype1#
2450 *
2451 * CGX CMR Receive VLAN Ethertype1 Register For use with the lmac_id
2452 * associated with NCSI. See CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. These
2453 * registers, along with CGX()_CMR_RX_STEERING1(), provide eight filters
2454 * for identifying and steering NCSI receive traffic.
2455 */
2456union cgxx_cmr_rx_steering_vetype1x {
2457        u64 u;
2458        struct cgxx_cmr_rx_steering_vetype1x_s {
2459                u64 vlan_etype                       : 16;
2460                u64 vlan_tag_en                      : 1;
2461                u64 vlan_id                          : 12;
2462                u64 vlan_en                          : 1;
2463                u64 reserved_30_63                   : 34;
2464        } s;
2465        /* struct cgxx_cmr_rx_steering_vetype1x_s cn; */
2466};
2467
2468static inline u64 CGXX_CMR_RX_STEERING_VETYPE1X(u64 a)
2469        __attribute__ ((pure, always_inline));
2470static inline u64 CGXX_CMR_RX_STEERING_VETYPE1X(u64 a)
2471{
2472        return 0x580 + 8 * a;
2473}
2474
2475/**
2476 * Register (RSL) cgx#_cmr_tx_lmacs
2477 *
2478 * CGX CMR Transmit Logical MACs Registers This register sets the number
2479 * of LMACs allowed on the TX interface. The value is important for
2480 * defining the partitioning of the transmit FIFO.
2481 */
2482union cgxx_cmr_tx_lmacs {
2483        u64 u;
2484        struct cgxx_cmr_tx_lmacs_s {
2485                u64 lmacs                            : 3;
2486                u64 reserved_3_63                    : 61;
2487        } s;
2488        /* struct cgxx_cmr_tx_lmacs_s cn; */
2489};
2490
2491static inline u64 CGXX_CMR_TX_LMACS(void)
2492        __attribute__ ((pure, always_inline));
2493static inline u64 CGXX_CMR_TX_LMACS(void)
2494{
2495        return 0x1000;
2496}
2497
2498/**
2499 * Register (RSL) cgx#_cmr_x2p#_count
2500 *
2501 * CGX X2P Activity Register
2502 */
2503union cgxx_cmr_x2px_count {
2504        u64 u;
2505        struct cgxx_cmr_x2px_count_s {
2506                u64 x2p_cnt                          : 64;
2507        } s;
2508        /* struct cgxx_cmr_x2px_count_s cn; */
2509};
2510
2511static inline u64 CGXX_CMR_X2PX_COUNT(u64 a)
2512        __attribute__ ((pure, always_inline));
2513static inline u64 CGXX_CMR_X2PX_COUNT(u64 a)
2514{
2515        return 0x170 + 0x1000 * a;
2516}
2517
2518/**
2519 * Register (RSL) cgx#_const
2520 *
2521 * CGX CONST Registers This register contains constants for software
2522 * discovery.
2523 */
2524union cgxx_const {
2525        u64 u;
2526        struct cgxx_const_s {
2527                u64 tx_fifosz                        : 24;
2528                u64 lmacs                            : 8;
2529                u64 rx_fifosz                        : 24;
2530                u64 reserved_56_63                   : 8;
2531        } s;
2532        /* struct cgxx_const_s cn; */
2533};
2534
2535static inline u64 CGXX_CONST(void)
2536        __attribute__ ((pure, always_inline));
2537static inline u64 CGXX_CONST(void)
2538{
2539        return 0x2000;
2540}
2541
2542/**
2543 * Register (RSL) cgx#_const1
2544 *
2545 * CGX CONST1 Registers This register contains constants for software
2546 * discovery.
2547 */
2548union cgxx_const1 {
2549        u64 u;
2550        struct cgxx_const1_s {
2551                u64 types                            : 11;
2552                u64 res_types                        : 21;
2553                u64 reserved_32_63                   : 32;
2554        } s;
2555        /* struct cgxx_const1_s cn; */
2556};
2557
2558static inline u64 CGXX_CONST1(void)
2559        __attribute__ ((pure, always_inline));
2560static inline u64 CGXX_CONST1(void)
2561{
2562        return 0x2008;
2563}
2564
2565/**
2566 * Register (RSL) cgx#_gmp_gmi#_rx_wol_ctrl0
2567 *
2568 * CGX GMP GMI RX Wake-on-LAN Control 0 Registers
2569 */
2570union cgxx_gmp_gmix_rx_wol_ctrl0 {
2571        u64 u;
2572        struct cgxx_gmp_gmix_rx_wol_ctrl0_s {
2573                u64 dmac                             : 48;
2574                u64 pswd_len                         : 4;
2575                u64 reserved_52_63                   : 12;
2576        } s;
2577        /* struct cgxx_gmp_gmix_rx_wol_ctrl0_s cn; */
2578};
2579
2580static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL0(u64 a)
2581        __attribute__ ((pure, always_inline));
2582static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL0(u64 a)
2583{
2584        return 0x38a00 + 0x40000 * a;
2585}
2586
2587/**
2588 * Register (RSL) cgx#_gmp_gmi#_rx_wol_ctrl1
2589 *
2590 * CGX GMP GMI RX Wake-on-LAN Control 1 Registers
2591 */
2592union cgxx_gmp_gmix_rx_wol_ctrl1 {
2593        u64 u;
2594        struct cgxx_gmp_gmix_rx_wol_ctrl1_s {
2595                u64 pswd                             : 64;
2596        } s;
2597        /* struct cgxx_gmp_gmix_rx_wol_ctrl1_s cn; */
2598};
2599
2600static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL1(u64 a)
2601        __attribute__ ((pure, always_inline));
2602static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL1(u64 a)
2603{
2604        return 0x38a08 + 0x40000 * a;
2605}
2606
2607/**
2608 * Register (RSL) cgx#_gmp_gmi#_tx_eee
2609 *
2610 * INTERNAL: CGX GMP GMI TX EEE Configure Registers  Reserved. Internal:
2611 * These registers control when GMP GMI TX requests to enter or exist
2612 * LPI. Those registers take effect only when EEE is supported and
2613 * enabled for a given LMAC.
2614 */
2615union cgxx_gmp_gmix_tx_eee {
2616        u64 u;
2617        struct cgxx_gmp_gmix_tx_eee_s {
2618                u64 idle_thresh                      : 28;
2619                u64 reserved_28                      : 1;
2620                u64 force_lpi                        : 1;
2621                u64 wakeup                           : 1;
2622                u64 auto_lpi                         : 1;
2623                u64 idle_cnt                         : 28;
2624                u64 tx_lpi                           : 1;
2625                u64 tx_lpi_wait                      : 1;
2626                u64 sync_status_lpi_enable           : 1;
2627                u64 reserved_63                      : 1;
2628        } s;
2629        /* struct cgxx_gmp_gmix_tx_eee_s cn; */
2630};
2631
2632static inline u64 CGXX_GMP_GMIX_TX_EEE(u64 a)
2633        __attribute__ ((pure, always_inline));
2634static inline u64 CGXX_GMP_GMIX_TX_EEE(u64 a)
2635{
2636        return 0x38800 + 0x40000 * a;
2637}
2638
2639/**
2640 * Register (RSL) cgx#_gmp_gmi#_tx_eee_cfg1
2641 *
2642 * INTERNAL: CGX GMP GMI TX EEE Configure More Configuration Registers
2643 * Reserved. Internal: Controls the GMP exiting of LPI and starting to
2644 * send data.
2645 */
2646union cgxx_gmp_gmix_tx_eee_cfg1 {
2647        u64 u;
2648        struct cgxx_gmp_gmix_tx_eee_cfg1_s {
2649                u64 wake2data_time                   : 24;
2650                u64 reserved_24_35                   : 12;
2651                u64 tx_eee_enable                    : 1;
2652                u64 reserved_37_39                   : 3;
2653                u64 sync2lpi_time                    : 21;
2654                u64 reserved_61_63                   : 3;
2655        } s;
2656        struct cgxx_gmp_gmix_tx_eee_cfg1_cn {
2657                u64 wake2data_time                   : 24;
2658                u64 reserved_24_31                   : 8;
2659                u64 reserved_32_35                   : 4;
2660                u64 tx_eee_enable                    : 1;
2661                u64 reserved_37_39                   : 3;
2662                u64 sync2lpi_time                    : 21;
2663                u64 reserved_61_63                   : 3;
2664        } cn;
2665};
2666
2667static inline u64 CGXX_GMP_GMIX_TX_EEE_CFG1(u64 a)
2668        __attribute__ ((pure, always_inline));
2669static inline u64 CGXX_GMP_GMIX_TX_EEE_CFG1(u64 a)
2670{
2671        return 0x38808 + 0x40000 * a;
2672}
2673
2674/**
2675 * Register (RSL) cgx#_gmp_gmi#_wol_int
2676 *
2677 * CGX GMP GMI RX WOL Interrupt Registers These registers allow WOL
2678 * interrupts to be sent to the control processor.
2679 */
2680union cgxx_gmp_gmix_wol_int {
2681        u64 u;
2682        struct cgxx_gmp_gmix_wol_int_s {
2683                u64 wol_rcvd                         : 1;
2684                u64 reserved_1_63                    : 63;
2685        } s;
2686        /* struct cgxx_gmp_gmix_wol_int_s cn; */
2687};
2688
2689static inline u64 CGXX_GMP_GMIX_WOL_INT(u64 a)
2690        __attribute__ ((pure, always_inline));
2691static inline u64 CGXX_GMP_GMIX_WOL_INT(u64 a)
2692{
2693        return 0x38a80 + 0x40000 * a;
2694}
2695
2696/**
2697 * Register (RSL) cgx#_gmp_gmi#_wol_int_ena_w1c
2698 *
2699 * CGX GMP GMI RX WOL Interrupt Enable Clear Registers This register
2700 * clears interrupt enable bits.
2701 */
2702union cgxx_gmp_gmix_wol_int_ena_w1c {
2703        u64 u;
2704        struct cgxx_gmp_gmix_wol_int_ena_w1c_s {
2705                u64 wol_rcvd                         : 1;
2706                u64 reserved_1_63                    : 63;
2707        } s;
2708        /* struct cgxx_gmp_gmix_wol_int_ena_w1c_s cn; */
2709};
2710
2711static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1C(u64 a)
2712        __attribute__ ((pure, always_inline));
2713static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1C(u64 a)
2714{
2715        return 0x38a90 + 0x40000 * a;
2716}
2717
2718/**
2719 * Register (RSL) cgx#_gmp_gmi#_wol_int_ena_w1s
2720 *
2721 * CGX GMP GMI RX WOL Interrupt Enable Set Registers This register sets
2722 * interrupt enable bits.
2723 */
2724union cgxx_gmp_gmix_wol_int_ena_w1s {
2725        u64 u;
2726        struct cgxx_gmp_gmix_wol_int_ena_w1s_s {
2727                u64 wol_rcvd                         : 1;
2728                u64 reserved_1_63                    : 63;
2729        } s;
2730        /* struct cgxx_gmp_gmix_wol_int_ena_w1s_s cn; */
2731};
2732
2733static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1S(u64 a)
2734        __attribute__ ((pure, always_inline));
2735static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1S(u64 a)
2736{
2737        return 0x38a98 + 0x40000 * a;
2738}
2739
2740/**
2741 * Register (RSL) cgx#_gmp_gmi#_wol_int_w1s
2742 *
2743 * CGX GMP GMI RX WOL Interrupt Set Registers This register sets
2744 * interrupt bits.
2745 */
2746union cgxx_gmp_gmix_wol_int_w1s {
2747        u64 u;
2748        struct cgxx_gmp_gmix_wol_int_w1s_s {
2749                u64 wol_rcvd                         : 1;
2750                u64 reserved_1_63                    : 63;
2751        } s;
2752        /* struct cgxx_gmp_gmix_wol_int_w1s_s cn; */
2753};
2754
2755static inline u64 CGXX_GMP_GMIX_WOL_INT_W1S(u64 a)
2756        __attribute__ ((pure, always_inline));
2757static inline u64 CGXX_GMP_GMIX_WOL_INT_W1S(u64 a)
2758{
2759        return 0x38a88 + 0x40000 * a;
2760}
2761
2762/**
2763 * Register (RSL) cgx#_gmp_gmi_prt#_cfg
2764 *
2765 * CGX GMP GMI LMAC Configuration Registers This register controls the
2766 * configuration of the LMAC.
2767 */
2768union cgxx_gmp_gmi_prtx_cfg {
2769        u64 u;
2770        struct cgxx_gmp_gmi_prtx_cfg_s {
2771                u64 reserved_0                       : 1;
2772                u64 speed                            : 1;
2773                u64 duplex                           : 1;
2774                u64 slottime                         : 1;
2775                u64 reserved_4_7                     : 4;
2776                u64 speed_msb                        : 1;
2777                u64 reserved_9_11                    : 3;
2778                u64 rx_idle                          : 1;
2779                u64 tx_idle                          : 1;
2780                u64 reserved_14_63                   : 50;
2781        } s;
2782        /* struct cgxx_gmp_gmi_prtx_cfg_s cn; */
2783};
2784
2785static inline u64 CGXX_GMP_GMI_PRTX_CFG(u64 a)
2786        __attribute__ ((pure, always_inline));
2787static inline u64 CGXX_GMP_GMI_PRTX_CFG(u64 a)
2788{
2789        return 0x38020 + 0x40000 * a;
2790}
2791
2792/**
2793 * Register (RSL) cgx#_gmp_gmi_rx#_decision
2794 *
2795 * CGX GMP Packet-Decision Registers This register specifies the byte
2796 * count used to determine when to accept or to filter a packet. As each
2797 * byte in a packet is received by GMI, the L2 byte count is compared
2798 * against [CNT]. In normal operation, the L2 header begins after the
2799 * PREAMBLE + SFD (CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] = 1) and any
2800 * optional UDD skip data (CGX()_GMP_GMI_RX()_UDD_SKP[LEN]).  Internal:
2801 * Notes: As each byte in a packet is received by GMI, the L2 byte count
2802 * is compared against the [CNT].  The L2 byte count is the number of
2803 * bytes from the beginning of the L2 header (DMAC).  In normal
2804 * operation, the L2 header begins after the PREAMBLE+SFD
2805 * (CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]=1) and any optional UDD skip data
2806 * (CGX()_GMP_GMI_RX()_UDD_SKP[LEN]). When
2807 * CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are
2808 * prepended to the packet and would require UDD skip length to account
2809 * for them.  Full Duplex: _   L2 Size \<  [CNT] - Accept packet. No
2810 * filtering is applied. _   L2 Size \>= [CNT] - Apply filter. Accept
2811 * packet based on PAUSE packet filter.  Half Duplex: _   L2 Size \<
2812 * [CNT] - Drop packet. Packet is unconditionally dropped. _   L2 Size
2813 * \>= [CNT] - Accept packet.  where L2_size = MAX(0, total_packet_size -
2814 * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] -
2815 * ((CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]==1)*8)).
2816 */
2817union cgxx_gmp_gmi_rxx_decision {
2818        u64 u;
2819        struct cgxx_gmp_gmi_rxx_decision_s {
2820                u64 cnt                              : 5;
2821                u64 reserved_5_63                    : 59;
2822        } s;
2823        /* struct cgxx_gmp_gmi_rxx_decision_s cn; */
2824};
2825
2826static inline u64 CGXX_GMP_GMI_RXX_DECISION(u64 a)
2827        __attribute__ ((pure, always_inline));
2828static inline u64 CGXX_GMP_GMI_RXX_DECISION(u64 a)
2829{
2830        return 0x38040 + 0x40000 * a;
2831}
2832
2833/**
2834 * Register (RSL) cgx#_gmp_gmi_rx#_frm_chk
2835 *
2836 * CGX GMP Frame Check Registers
2837 */
2838union cgxx_gmp_gmi_rxx_frm_chk {
2839        u64 u;
2840        struct cgxx_gmp_gmi_rxx_frm_chk_s {
2841                u64 minerr                           : 1;
2842                u64 carext                           : 1;
2843                u64 reserved_2                       : 1;
2844                u64 jabber                           : 1;
2845                u64 fcserr                           : 1;
2846                u64 reserved_5_6                     : 2;
2847                u64 rcverr                           : 1;
2848                u64 skperr                           : 1;
2849                u64 reserved_9_63                    : 55;
2850        } s;
2851        /* struct cgxx_gmp_gmi_rxx_frm_chk_s cn; */
2852};
2853
2854static inline u64 CGXX_GMP_GMI_RXX_FRM_CHK(u64 a)
2855        __attribute__ ((pure, always_inline));
2856static inline u64 CGXX_GMP_GMI_RXX_FRM_CHK(u64 a)
2857{
2858        return 0x38030 + 0x40000 * a;
2859}
2860
2861/**
2862 * Register (RSL) cgx#_gmp_gmi_rx#_frm_ctl
2863 *
2864 * CGX GMP Frame Control Registers This register controls the handling of
2865 * the frames. The [CTL_BCK] and [CTL_DRP] bits control how the hardware
2866 * handles incoming PAUSE packets. The most common modes of operation: _
2867 * [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything. _ [CTL_BCK]
2868 * = 0, [CTL_DRP] = 0: software sees all PAUSE frames. _ [CTL_BCK] = 0,
2869 * [CTL_DRP] = 1: all PAUSE frames are completely ignored.  These control
2870 * bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in half-duplex
2871 * mode. Since PAUSE packets only apply to full duplex operation, any
2872 * PAUSE packet would constitute an exception which should be handled by
2873 * the processing cores. PAUSE packets should not be forwarded.
2874 * Internal: Notes: [PRE_STRP]: When [PRE_CHK] is set (indicating that
2875 * the PREAMBLE will be sent), [PRE_STRP] determines if the PREAMBLE+SFD
2876 * bytes are thrown away or sent to the Octane core as part of the
2877 * packet. In either mode, the PREAMBLE+SFD bytes are not counted toward
2878 * the packet size when checking against the MIN and MAX bounds.
2879 * Furthermore, the bytes are skipped when locating the start of the L2
2880 * header for DMAC and Control frame recognition.
2881 */
2882union cgxx_gmp_gmi_rxx_frm_ctl {
2883        u64 u;
2884        struct cgxx_gmp_gmi_rxx_frm_ctl_s {
2885                u64 pre_chk                          : 1;
2886                u64 pre_strp                         : 1;
2887                u64 ctl_drp                          : 1;
2888                u64 ctl_bck                          : 1;
2889                u64 ctl_mcst                         : 1;
2890                u64 ctl_smac                         : 1;
2891                u64 pre_free                         : 1;
2892                u64 reserved_7_8                     : 2;
2893                u64 pre_align                        : 1;
2894                u64 null_dis                         : 1;
2895                u64 reserved_11                      : 1;
2896                u64 ptp_mode                         : 1;
2897                u64 rx_fc_type                       : 1;
2898                u64 reserved_14_63                   : 50;
2899        } s;
2900        struct cgxx_gmp_gmi_rxx_frm_ctl_cn {
2901                u64 pre_chk                          : 1;
2902                u64 pre_strp                         : 1;
2903                u64 ctl_drp                          : 1;
2904                u64 ctl_bck                          : 1;
2905                u64 ctl_mcst                         : 1;
2906                u64 ctl_smac                         : 1;
2907                u64 pre_free                         : 1;
2908                u64 reserved_7                       : 1;
2909                u64 reserved_8                       : 1;
2910                u64 pre_align                        : 1;
2911                u64 null_dis                         : 1;
2912                u64 reserved_11                      : 1;
2913                u64 ptp_mode                         : 1;
2914                u64 rx_fc_type                       : 1;
2915                u64 reserved_14_63                   : 50;
2916        } cn;
2917};
2918
2919static inline u64 CGXX_GMP_GMI_RXX_FRM_CTL(u64 a)
2920        __attribute__ ((pure, always_inline));
2921static inline u64 CGXX_GMP_GMI_RXX_FRM_CTL(u64 a)
2922{
2923        return 0x38028 + 0x40000 * a;
2924}
2925
2926/**
2927 * Register (RSL) cgx#_gmp_gmi_rx#_ifg
2928 *
2929 * CGX GMI Minimum Interframe-Gap Cycles Registers This register
2930 * specifies the minimum number of interframe-gap (IFG) cycles between
2931 * packets.
2932 */
2933union cgxx_gmp_gmi_rxx_ifg {
2934        u64 u;
2935        struct cgxx_gmp_gmi_rxx_ifg_s {
2936                u64 ifg                              : 4;
2937                u64 reserved_4_63                    : 60;
2938        } s;
2939        /* struct cgxx_gmp_gmi_rxx_ifg_s cn; */
2940};
2941
2942static inline u64 CGXX_GMP_GMI_RXX_IFG(u64 a)
2943        __attribute__ ((pure, always_inline));
2944static inline u64 CGXX_GMP_GMI_RXX_IFG(u64 a)
2945{
2946        return 0x38058 + 0x40000 * a;
2947}
2948
2949/**
2950 * Register (RSL) cgx#_gmp_gmi_rx#_int
2951 *
2952 * CGX GMP GMI RX Interrupt Registers These registers allow interrupts to
2953 * be sent to the control processor. * Exception conditions \<10:0\> can
2954 * also set the rcv/opcode in the received packet's work-queue entry.
2955 * CGX()_GMP_GMI_RX()_FRM_CHK provides a bit mask for configuring which
2956 * conditions set the error. In half duplex operation, the expectation is
2957 * that collisions will appear as either MINERR or CAREXT errors.
2958 * Internal: Notes: (1) exception conditions 10:0 can also set the
2959 * rcv/opcode in the received packet's workQ entry.  The
2960 * CGX()_GMP_GMI_RX()_FRM_CHK register provides a bit mask for
2961 * configuring which conditions set the error.  (2) in half duplex
2962 * operation, the expectation is that collisions will appear as either
2963 * MINERR o r CAREXT errors.  (3) JABBER An RX jabber error indicates
2964 * that a packet was received which is longer than the maximum allowed
2965 * packet as defined by the system.  GMI will truncate the packet at the
2966 * JABBER count. Failure to do so could lead to system instabilty.  (4)
2967 * NIBERR This error is illegal at 1000Mbs speeds
2968 * (CGX()_GMP_GMI_PRT()_CFG[SPEED]==0) and will never assert.  (5) MINERR
2969 * total frame DA+SA+TL+DATA+PAD+FCS \< 64  (6) ALNERR Indicates that the
2970 * packet received was not an integer number of bytes.  If FCS checking
2971 * is enabled, ALNERR will only assert if the FCS is bad.  If FCS
2972 * checking is disabled, ALNERR will assert in all non-integer frame
2973 * cases.  (7) Collisions Collisions can only occur in half-duplex mode.
2974 * A collision is assumed by the receiver when the slottime
2975 * (CGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not satisfied.  In 10/100 mode,
2976 * this will result in a frame \< SLOTTIME.  In 1000 mode, it could
2977 * result either in frame \< SLOTTIME or a carrier extend error with the
2978 * SLOTTIME.  These conditions are visible by... . transfer ended before
2979 * slottime COLDET . carrier extend error           CAREXT  (A) LENERR
2980 * Length errors occur when the received packet does not match the length
2981 * field.  LENERR is only checked for packets between 64 and 1500 bytes.
2982 * For untagged frames, the length must exact match.  For tagged frames
2983 * the length or length+4 must match.  (B) PCTERR checks that the frame
2984 * begins with a valid PREAMBLE sequence. Does not check the number of
2985 * PREAMBLE cycles.  (C) OVRERR *DON'T PUT IN HRM* OVRERR is an
2986 * architectural assertion check internal to GMI to make sure no
2987 * assumption was violated.  In a correctly operating system, this
2988 * interrupt can never fire. GMI has an internal arbiter which selects
2989 * which of four ports to buffer in the main RX FIFO.  If we normally
2990 * buffer eight bytes, then each port will typically push a tick every
2991 * eight cycles if the packet interface is going as fast as possible.  If
2992 * there are four ports, they push every two cycles.  So that's the
2993 * assumption.  That the inbound module will always be able to consume
2994 * the tick before another is produced.  If that doesn't happen that's
2995 * when OVRERR will assert."
2996 */
2997union cgxx_gmp_gmi_rxx_int {
2998        u64 u;
2999        struct cgxx_gmp_gmi_rxx_int_s {
3000                u64 minerr                           : 1;
3001                u64 carext                           : 1;
3002                u64 jabber                           : 1;
3003                u64 fcserr                           : 1;
3004                u64 rcverr                           : 1;
3005                u64 skperr                           : 1;
3006                u64 ovrerr                           : 1;
3007                u64 pcterr                           : 1;
3008                u64 rsverr                           : 1;
3009                u64 falerr                           : 1;
3010                u64 coldet                           : 1;
3011                u64 ifgerr                           : 1;
3012                u64 reserved_12_63                   : 52;
3013        } s;
3014        struct cgxx_gmp_gmi_rxx_int_cn {
3015                u64 minerr                           : 1;
3016                u64 carext                           : 1;
3017                u64 jabber                           : 1;
3018                u64 fcserr                           : 1;
3019                u64 rcverr                           : 1;
3020                u64 skperr                           : 1;
3021                u64 ovrerr                           : 1;
3022                u64 pcterr                           : 1;
3023                u64 rsverr                           : 1;
3024                u64 falerr                           : 1;
3025                u64 coldet                           : 1;
3026                u64 ifgerr                           : 1;
3027                u64 reserved_12_15                   : 4;
3028                u64 reserved_16_63                   : 48;
3029        } cn;
3030};
3031
3032static inline u64 CGXX_GMP_GMI_RXX_INT(u64 a)
3033        __attribute__ ((pure, always_inline));
3034static inline u64 CGXX_GMP_GMI_RXX_INT(u64 a)
3035{
3036        return 0x38000 + 0x40000 * a;
3037}
3038
3039/**
3040 * Register (RSL) cgx#_gmp_gmi_rx#_int_ena_w1c
3041 *
3042 * CGX GMP GMI RX Interrupt Enable Clear Registers This register clears
3043 * interrupt enable bits.
3044 */
3045union cgxx_gmp_gmi_rxx_int_ena_w1c {
3046        u64 u;
3047        struct cgxx_gmp_gmi_rxx_int_ena_w1c_s {
3048                u64 minerr                           : 1;
3049                u64 carext                           : 1;
3050                u64 jabber                           : 1;
3051                u64 fcserr                           : 1;
3052                u64 rcverr                           : 1;
3053                u64 skperr                           : 1;
3054                u64 ovrerr                           : 1;
3055                u64 pcterr                           : 1;
3056                u64 rsverr                           : 1;
3057                u64 falerr                           : 1;
3058                u64 coldet                           : 1;
3059                u64 ifgerr                           : 1;
3060                u64 reserved_12_63                   : 52;
3061        } s;
3062        struct cgxx_gmp_gmi_rxx_int_ena_w1c_cn {
3063                u64 minerr                           : 1;
3064                u64 carext                           : 1;
3065                u64 jabber                           : 1;
3066                u64 fcserr                           : 1;
3067                u64 rcverr                           : 1;
3068                u64 skperr                           : 1;
3069                u64 ovrerr                           : 1;
3070                u64 pcterr                           : 1;
3071                u64 rsverr                           : 1;
3072                u64 falerr                           : 1;
3073                u64 coldet                           : 1;
3074                u64 ifgerr                           : 1;
3075                u64 reserved_12_15                   : 4;
3076                u64 reserved_16_63                   : 48;
3077        } cn;
3078};
3079
3080static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1C(u64 a)
3081        __attribute__ ((pure, always_inline));
3082static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1C(u64 a)
3083{
3084        return 0x38010 + 0x40000 * a;
3085}
3086
3087/**
3088 * Register (RSL) cgx#_gmp_gmi_rx#_int_ena_w1s
3089 *
3090 * CGX GMP GMI RX Interrupt Enable Set Registers This register sets
3091 * interrupt enable bits.
3092 */
3093union cgxx_gmp_gmi_rxx_int_ena_w1s {
3094        u64 u;
3095        struct cgxx_gmp_gmi_rxx_int_ena_w1s_s {
3096                u64 minerr                           : 1;
3097                u64 carext                           : 1;
3098                u64 jabber                           : 1;
3099                u64 fcserr                           : 1;
3100                u64 rcverr                           : 1;
3101                u64 skperr                           : 1;
3102                u64 ovrerr                           : 1;
3103                u64 pcterr                           : 1;
3104                u64 rsverr                           : 1;
3105                u64 falerr                           : 1;
3106                u64 coldet                           : 1;
3107                u64 ifgerr                           : 1;
3108                u64 reserved_12_63                   : 52;
3109        } s;
3110        struct cgxx_gmp_gmi_rxx_int_ena_w1s_cn {
3111                u64 minerr                           : 1;
3112                u64 carext                           : 1;
3113                u64 jabber                           : 1;
3114                u64 fcserr                           : 1;
3115                u64 rcverr                           : 1;
3116                u64 skperr                           : 1;
3117                u64 ovrerr                           : 1;
3118                u64 pcterr                           : 1;
3119                u64 rsverr                           : 1;
3120                u64 falerr                           : 1;
3121                u64 coldet                           : 1;
3122                u64 ifgerr                           : 1;
3123                u64 reserved_12_15                   : 4;
3124                u64 reserved_16_63                   : 48;
3125        } cn;
3126};
3127
3128static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1S(u64 a)
3129        __attribute__ ((pure, always_inline));
3130static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1S(u64 a)
3131{
3132        return 0x38018 + 0x40000 * a;
3133}
3134
3135/**
3136 * Register (RSL) cgx#_gmp_gmi_rx#_int_w1s
3137 *
3138 * CGX GMP GMI RX Interrupt Set Registers This register sets interrupt
3139 * bits.
3140 */
3141union cgxx_gmp_gmi_rxx_int_w1s {
3142        u64 u;
3143        struct cgxx_gmp_gmi_rxx_int_w1s_s {
3144                u64 minerr                           : 1;
3145                u64 carext                           : 1;
3146                u64 jabber                           : 1;
3147                u64 fcserr                           : 1;
3148                u64 rcverr                           : 1;
3149                u64 skperr                           : 1;
3150                u64 ovrerr                           : 1;
3151                u64 pcterr                           : 1;
3152                u64 rsverr                           : 1;
3153                u64 falerr                           : 1;
3154                u64 coldet                           : 1;
3155                u64 ifgerr                           : 1;
3156                u64 reserved_12_63                   : 52;
3157        } s;
3158        struct cgxx_gmp_gmi_rxx_int_w1s_cn {
3159                u64 minerr                           : 1;
3160                u64 carext                           : 1;
3161                u64 jabber                           : 1;
3162                u64 fcserr                           : 1;
3163                u64 rcverr                           : 1;
3164                u64 skperr                           : 1;
3165                u64 ovrerr                           : 1;
3166                u64 pcterr                           : 1;
3167                u64 rsverr                           : 1;
3168                u64 falerr                           : 1;
3169                u64 coldet                           : 1;
3170                u64 ifgerr                           : 1;
3171                u64 reserved_12_15                   : 4;
3172                u64 reserved_16_63                   : 48;
3173        } cn;
3174};
3175
3176static inline u64 CGXX_GMP_GMI_RXX_INT_W1S(u64 a)
3177        __attribute__ ((pure, always_inline));
3178static inline u64 CGXX_GMP_GMI_RXX_INT_W1S(u64 a)
3179{
3180        return 0x38008 + 0x40000 * a;
3181}
3182
3183/**
3184 * Register (RSL) cgx#_gmp_gmi_rx#_jabber
3185 *
3186 * CGX GMP Maximum Packet-Size Registers This register specifies the
3187 * maximum size for packets, beyond which the GMI truncates.
3188 */
3189union cgxx_gmp_gmi_rxx_jabber {
3190        u64 u;
3191        struct cgxx_gmp_gmi_rxx_jabber_s {
3192                u64 cnt                              : 16;
3193                u64 reserved_16_63                   : 48;
3194        } s;
3195        /* struct cgxx_gmp_gmi_rxx_jabber_s cn; */
3196};
3197
3198static inline u64 CGXX_GMP_GMI_RXX_JABBER(u64 a)
3199        __attribute__ ((pure, always_inline));
3200static inline u64 CGXX_GMP_GMI_RXX_JABBER(u64 a)
3201{
3202        return 0x38038 + 0x40000 * a;
3203}
3204
3205/**
3206 * Register (RSL) cgx#_gmp_gmi_rx#_udd_skp
3207 *
3208 * CGX GMP GMI User-Defined Data Skip Registers This register specifies
3209 * the amount of user-defined data (UDD) added before the start of the
3210 * L2C data.  Internal: Notes: (1) The skip bytes are part of the packet
3211 * and will be handled by NIX.  (2) The system can determine if the UDD
3212 * bytes are included in the FCS check by using the FCSSEL field - if the
3213 * FCS check is enabled.  (3) Assume that the preamble/sfd is always at
3214 * the start of the frame - even before UDD bytes.  In most cases, there
3215 * will be no preamble in these cases since it will be packet interface
3216 * in direct communication to another packet interface (MAC to MAC)
3217 * without a PHY involved.  (4) We can still do address filtering and
3218 * control packet filtering is the user desires.  (5)
3219 * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] must be 0 in half-duplex operation
3220 * unless CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear.  If
3221 * CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear, then
3222 * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] will normally be 8.  (6) In all cases,
3223 * the UDD bytes will be sent down the packet interface as part of the
3224 * packet.  The UDD bytes are never stripped from the actual packet.
3225 */
3226union cgxx_gmp_gmi_rxx_udd_skp {
3227        u64 u;
3228        struct cgxx_gmp_gmi_rxx_udd_skp_s {
3229                u64 len                              : 7;
3230                u64 reserved_7                       : 1;
3231                u64 fcssel                           : 1;
3232                u64 reserved_9_63                    : 55;
3233        } s;
3234        /* struct cgxx_gmp_gmi_rxx_udd_skp_s cn; */
3235};
3236
3237static inline u64 CGXX_GMP_GMI_RXX_UDD_SKP(u64 a)
3238        __attribute__ ((pure, always_inline));
3239static inline u64 CGXX_GMP_GMI_RXX_UDD_SKP(u64 a)
3240{
3241        return 0x38048 + 0x40000 * a;
3242}
3243
3244/**
3245 * Register (RSL) cgx#_gmp_gmi_smac#
3246 *
3247 * CGX GMI SMAC Registers
3248 */
3249union cgxx_gmp_gmi_smacx {
3250        u64 u;
3251        struct cgxx_gmp_gmi_smacx_s {
3252                u64 smac                             : 48;
3253                u64 reserved_48_63                   : 16;
3254        } s;
3255        /* struct cgxx_gmp_gmi_smacx_s cn; */
3256};
3257
3258static inline u64 CGXX_GMP_GMI_SMACX(u64 a)
3259        __attribute__ ((pure, always_inline));
3260static inline u64 CGXX_GMP_GMI_SMACX(u64 a)
3261{
3262        return 0x38230 + 0x40000 * a;
3263}
3264
3265/**
3266 * Register (RSL) cgx#_gmp_gmi_tx#_append
3267 *
3268 * CGX GMI TX Append Control Registers
3269 */
3270union cgxx_gmp_gmi_txx_append {
3271        u64 u;
3272        struct cgxx_gmp_gmi_txx_append_s {
3273                u64 preamble                         : 1;
3274                u64 pad                              : 1;
3275                u64 fcs                              : 1;
3276                u64 force_fcs                        : 1;
3277                u64 reserved_4_63                    : 60;
3278        } s;
3279        /* struct cgxx_gmp_gmi_txx_append_s cn; */
3280};
3281
3282static inline u64 CGXX_GMP_GMI_TXX_APPEND(u64 a)
3283        __attribute__ ((pure, always_inline));
3284static inline u64 CGXX_GMP_GMI_TXX_APPEND(u64 a)
3285{
3286        return 0x38218 + 0x40000 * a;
3287}
3288
3289/**
3290 * Register (RSL) cgx#_gmp_gmi_tx#_burst
3291 *
3292 * CGX GMI TX Burst-Counter Registers
3293 */
3294union cgxx_gmp_gmi_txx_burst {
3295        u64 u;
3296        struct cgxx_gmp_gmi_txx_burst_s {
3297                u64 burst                            : 16;
3298                u64 reserved_16_63                   : 48;
3299        } s;
3300        /* struct cgxx_gmp_gmi_txx_burst_s cn; */
3301};
3302
3303static inline u64 CGXX_GMP_GMI_TXX_BURST(u64 a)
3304        __attribute__ ((pure, always_inline));
3305static inline u64 CGXX_GMP_GMI_TXX_BURST(u64 a)
3306{
3307        return 0x38228 + 0x40000 * a;
3308}
3309
3310/**
3311 * Register (RSL) cgx#_gmp_gmi_tx#_ctl
3312 *
3313 * CGX GMI Transmit Control Registers
3314 */
3315union cgxx_gmp_gmi_txx_ctl {
3316        u64 u;
3317        struct cgxx_gmp_gmi_txx_ctl_s {
3318                u64 xscol_en                         : 1;
3319                u64 xsdef_en                         : 1;
3320                u64 tx_fc_type                       : 1;
3321                u64 link_drain                       : 1;
3322                u64 reserved_4_63                    : 60;
3323        } s;
3324        /* struct cgxx_gmp_gmi_txx_ctl_s cn; */
3325};
3326
3327static inline u64 CGXX_GMP_GMI_TXX_CTL(u64 a)
3328        __attribute__ ((pure, always_inline));
3329static inline u64 CGXX_GMP_GMI_TXX_CTL(u64 a)
3330{
3331        return 0x38270 + 0x40000 * a;
3332}
3333
3334/**
3335 * Register (RSL) cgx#_gmp_gmi_tx#_int
3336 *
3337 * CGX GMI TX Interrupt Registers
3338 */
3339union cgxx_gmp_gmi_txx_int {
3340        u64 u;
3341        struct cgxx_gmp_gmi_txx_int_s {
3342                u64 undflw                           : 1;
3343                u64 xscol                            : 1;
3344                u64 xsdef                            : 1;
3345                u64 late_col                         : 1;
3346                u64 ptp_lost                         : 1;
3347                u64 reserved_5_63                    : 59;
3348        } s;
3349        struct cgxx_gmp_gmi_txx_int_cn {
3350                u64 undflw                           : 1;
3351                u64 xscol                            : 1;
3352                u64 xsdef                            : 1;
3353                u64 late_col                         : 1;
3354                u64 ptp_lost                         : 1;
3355                u64 reserved_5_7                     : 3;
3356                u64 reserved_8                       : 1;
3357                u64 reserved_9_63                    : 55;
3358        } cn;
3359};
3360
3361static inline u64 CGXX_GMP_GMI_TXX_INT(u64 a)
3362        __attribute__ ((pure, always_inline));
3363static inline u64 CGXX_GMP_GMI_TXX_INT(u64 a)
3364{
3365        return 0x38500 + 0x40000 * a;
3366}
3367
3368/**
3369 * Register (RSL) cgx#_gmp_gmi_tx#_int_ena_w1c
3370 *
3371 * CGX GMI TX Interrupt Enable Clear Registers This register clears
3372 * interrupt enable bits.
3373 */
3374union cgxx_gmp_gmi_txx_int_ena_w1c {
3375        u64 u;
3376        struct cgxx_gmp_gmi_txx_int_ena_w1c_s {
3377                u64 undflw                           : 1;
3378                u64 xscol                            : 1;
3379                u64 xsdef                            : 1;
3380                u64 late_col                         : 1;
3381                u64 ptp_lost                         : 1;
3382                u64 reserved_5_63                    : 59;
3383        } s;
3384        struct cgxx_gmp_gmi_txx_int_ena_w1c_cn {
3385                u64 undflw                           : 1;
3386                u64 xscol                            : 1;
3387                u64 xsdef                            : 1;
3388                u64 late_col                         : 1;
3389                u64 ptp_lost                         : 1;
3390                u64 reserved_5_7                     : 3;
3391                u64 reserved_8                       : 1;
3392                u64 reserved_9_63                    : 55;
3393        } cn;
3394};
3395
3396static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1C(u64 a)
3397        __attribute__ ((pure, always_inline));
3398static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1C(u64 a)
3399{
3400        return 0x38510 + 0x40000 * a;
3401}
3402
3403/**
3404 * Register (RSL) cgx#_gmp_gmi_tx#_int_ena_w1s
3405 *
3406 * CGX GMI TX Interrupt Enable Set Registers This register sets interrupt
3407 * enable bits.
3408 */
3409union cgxx_gmp_gmi_txx_int_ena_w1s {
3410        u64 u;
3411        struct cgxx_gmp_gmi_txx_int_ena_w1s_s {
3412                u64 undflw                           : 1;
3413                u64 xscol                            : 1;
3414                u64 xsdef                            : 1;
3415                u64 late_col                         : 1;
3416                u64 ptp_lost                         : 1;
3417                u64 reserved_5_63                    : 59;
3418        } s;
3419        struct cgxx_gmp_gmi_txx_int_ena_w1s_cn {
3420                u64 undflw                           : 1;
3421                u64 xscol                            : 1;
3422                u64 xsdef                            : 1;
3423                u64 late_col                         : 1;
3424                u64 ptp_lost                         : 1;
3425                u64 reserved_5_7                     : 3;
3426                u64 reserved_8                       : 1;
3427                u64 reserved_9_63                    : 55;
3428        } cn;
3429};
3430
3431static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1S(u64 a)
3432        __attribute__ ((pure, always_inline));
3433static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1S(u64 a)
3434{
3435        return 0x38518 + 0x40000 * a;
3436}
3437
3438/**
3439 * Register (RSL) cgx#_gmp_gmi_tx#_int_w1s
3440 *
3441 * CGX GMI TX Interrupt Set Registers This register sets interrupt bits.
3442 */
3443union cgxx_gmp_gmi_txx_int_w1s {
3444        u64 u;
3445        struct cgxx_gmp_gmi_txx_int_w1s_s {
3446                u64 undflw                           : 1;
3447                u64 xscol                            : 1;
3448                u64 xsdef                            : 1;
3449                u64 late_col                         : 1;
3450                u64 ptp_lost                         : 1;
3451                u64 reserved_5_63                    : 59;
3452        } s;
3453        struct cgxx_gmp_gmi_txx_int_w1s_cn {
3454                u64 undflw                           : 1;
3455                u64 xscol                            : 1;
3456                u64 xsdef                            : 1;
3457                u64 late_col                         : 1;
3458                u64 ptp_lost                         : 1;
3459                u64 reserved_5_7                     : 3;
3460                u64 reserved_8                       : 1;
3461                u64 reserved_9_63                    : 55;
3462        } cn;
3463};
3464
3465static inline u64 CGXX_GMP_GMI_TXX_INT_W1S(u64 a)
3466        __attribute__ ((pure, always_inline));
3467static inline u64 CGXX_GMP_GMI_TXX_INT_W1S(u64 a)
3468{
3469        return 0x38508 + 0x40000 * a;
3470}
3471
3472/**
3473 * Register (RSL) cgx#_gmp_gmi_tx#_min_pkt
3474 *
3475 * CGX GMI TX Minimum-Size-Packet Registers
3476 */
3477union cgxx_gmp_gmi_txx_min_pkt {
3478        u64 u;
3479        struct cgxx_gmp_gmi_txx_min_pkt_s {
3480                u64 min_size                         : 8;
3481                u64 reserved_8_63                    : 56;
3482        } s;
3483        /* struct cgxx_gmp_gmi_txx_min_pkt_s cn; */
3484};
3485
3486static inline u64 CGXX_GMP_GMI_TXX_MIN_PKT(u64 a)
3487        __attribute__ ((pure, always_inline));
3488static inline u64 CGXX_GMP_GMI_TXX_MIN_PKT(u64 a)
3489{
3490        return 0x38240 + 0x40000 * a;
3491}
3492
3493/**
3494 * Register (RSL) cgx#_gmp_gmi_tx#_pause_pkt_interval
3495 *
3496 * CGX GMI TX PAUSE-Packet Transmission-Interval Registers This register
3497 * specifies how often PAUSE packets are sent. Internal: Notes: Choosing
3498 * proper values of CGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME] and
3499 * CGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to
3500 * the system designer.  It is suggested that TIME be much greater than
3501 * INTERVAL and CGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] be set.  This allows
3502 * a periodic refresh of the PAUSE count and then when the backpressure
3503 * condition is lifted, a PAUSE packet with TIME==0 will be sent
3504 * indicating that Octane is ready for additional data.  If the system
3505 * chooses to not set CGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND], then it is
3506 * suggested that TIME and INTERVAL are programmed such that they
3507 * satisify the following rule:  _ INTERVAL \<= TIME - (largest_pkt_size
3508 * + IFG + pause_pkt_size)  where largest_pkt_size is that largest packet
3509 * that the system can send (normally 1518B), IFG is the interframe gap
3510 * and pause_pkt_size is the size of the PAUSE packet (normally 64B).
3511 */
3512union cgxx_gmp_gmi_txx_pause_pkt_interval {
3513        u64 u;
3514        struct cgxx_gmp_gmi_txx_pause_pkt_interval_s {
3515                u64 interval                         : 16;
3516                u64 reserved_16_63                   : 48;
3517        } s;
3518        /* struct cgxx_gmp_gmi_txx_pause_pkt_interval_s cn; */
3519};
3520
3521static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(u64 a)
3522        __attribute__ ((pure, always_inline));
3523static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(u64 a)
3524{
3525        return 0x38248 + 0x40000 * a;
3526}
3527
3528/**
3529 * Register (RSL) cgx#_gmp_gmi_tx#_pause_pkt_time
3530 *
3531 * CGX GMI TX PAUSE Packet PAUSE-Time Registers
3532 */
3533union cgxx_gmp_gmi_txx_pause_pkt_time {
3534        u64 u;
3535        struct cgxx_gmp_gmi_txx_pause_pkt_time_s {
3536                u64 ptime                            : 16;
3537                u64 reserved_16_63                   : 48;
3538        } s;
3539        /* struct cgxx_gmp_gmi_txx_pause_pkt_time_s cn; */
3540};
3541
3542static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(u64 a)
3543        __attribute__ ((pure, always_inline));
3544static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(u64 a)
3545{
3546        return 0x38238 + 0x40000 * a;
3547}
3548
3549/**
3550 * Register (RSL) cgx#_gmp_gmi_tx#_pause_togo
3551 *
3552 * CGX GMI TX Time-to-Backpressure Registers
3553 */
3554union cgxx_gmp_gmi_txx_pause_togo {
3555        u64 u;
3556        struct cgxx_gmp_gmi_txx_pause_togo_s {
3557                u64 ptime                            : 16;
3558                u64 reserved_16_63                   : 48;
3559        } s;
3560        /* struct cgxx_gmp_gmi_txx_pause_togo_s cn; */
3561};
3562
3563static inline u64 CGXX_GMP_GMI_TXX_PAUSE_TOGO(u64 a)
3564        __attribute__ ((pure, always_inline));
3565static inline u64 CGXX_GMP_GMI_TXX_PAUSE_TOGO(u64 a)
3566{
3567        return 0x38258 + 0x40000 * a;
3568}
3569
3570/**
3571 * Register (RSL) cgx#_gmp_gmi_tx#_pause_zero
3572 *
3573 * CGX GMI TX PAUSE-Zero-Enable Registers
3574 */
3575union cgxx_gmp_gmi_txx_pause_zero {
3576        u64 u;
3577        struct cgxx_gmp_gmi_txx_pause_zero_s {
3578                u64 send                             : 1;
3579                u64 reserved_1_63                    : 63;
3580        } s;
3581        /* struct cgxx_gmp_gmi_txx_pause_zero_s cn; */
3582};
3583
3584static inline u64 CGXX_GMP_GMI_TXX_PAUSE_ZERO(u64 a)
3585        __attribute__ ((pure, always_inline));
3586static inline u64 CGXX_GMP_GMI_TXX_PAUSE_ZERO(u64 a)
3587{
3588        return 0x38260 + 0x40000 * a;
3589}
3590
3591/**
3592 * Register (RSL) cgx#_gmp_gmi_tx#_sgmii_ctl
3593 *
3594 * CGX SGMII Control Registers
3595 */
3596union cgxx_gmp_gmi_txx_sgmii_ctl {
3597        u64 u;
3598        struct cgxx_gmp_gmi_txx_sgmii_ctl_s {
3599                u64 align                            : 1;
3600                u64 reserved_1_63                    : 63;
3601        } s;
3602        /* struct cgxx_gmp_gmi_txx_sgmii_ctl_s cn; */
3603};
3604
3605static inline u64 CGXX_GMP_GMI_TXX_SGMII_CTL(u64 a)
3606        __attribute__ ((pure, always_inline));
3607static inline u64 CGXX_GMP_GMI_TXX_SGMII_CTL(u64 a)
3608{
3609        return 0x38300 + 0x40000 * a;
3610}
3611
3612/**
3613 * Register (RSL) cgx#_gmp_gmi_tx#_slot
3614 *
3615 * CGX GMI TX Slottime Counter Registers
3616 */
3617union cgxx_gmp_gmi_txx_slot {
3618        u64 u;
3619        struct cgxx_gmp_gmi_txx_slot_s {
3620                u64 slot                             : 10;
3621                u64 reserved_10_63                   : 54;
3622        } s;
3623        /* struct cgxx_gmp_gmi_txx_slot_s cn; */
3624};
3625
3626static inline u64 CGXX_GMP_GMI_TXX_SLOT(u64 a)
3627        __attribute__ ((pure, always_inline));
3628static inline u64 CGXX_GMP_GMI_TXX_SLOT(u64 a)
3629{
3630        return 0x38220 + 0x40000 * a;
3631}
3632
3633/**
3634 * Register (RSL) cgx#_gmp_gmi_tx#_soft_pause
3635 *
3636 * CGX GMI TX Software PAUSE Registers
3637 */
3638union cgxx_gmp_gmi_txx_soft_pause {
3639        u64 u;
3640        struct cgxx_gmp_gmi_txx_soft_pause_s {
3641                u64 ptime                            : 16;
3642                u64 reserved_16_63                   : 48;
3643        } s;
3644        /* struct cgxx_gmp_gmi_txx_soft_pause_s cn; */
3645};
3646
3647static inline u64 CGXX_GMP_GMI_TXX_SOFT_PAUSE(u64 a)
3648        __attribute__ ((pure, always_inline));
3649static inline u64 CGXX_GMP_GMI_TXX_SOFT_PAUSE(u64 a)
3650{
3651        return 0x38250 + 0x40000 * a;
3652}
3653
3654/**
3655 * Register (RSL) cgx#_gmp_gmi_tx#_thresh
3656 *
3657 * CGX GMI TX Threshold Registers
3658 */
3659union cgxx_gmp_gmi_txx_thresh {
3660        u64 u;
3661        struct cgxx_gmp_gmi_txx_thresh_s {
3662                u64 cnt                              : 11;
3663                u64 reserved_11_63                   : 53;
3664        } s;
3665        /* struct cgxx_gmp_gmi_txx_thresh_s cn; */
3666};
3667
3668static inline u64 CGXX_GMP_GMI_TXX_THRESH(u64 a)
3669        __attribute__ ((pure, always_inline));
3670static inline u64 CGXX_GMP_GMI_TXX_THRESH(u64 a)
3671{
3672        return 0x38210 + 0x40000 * a;
3673}
3674
3675/**
3676 * Register (RSL) cgx#_gmp_gmi_tx_col_attempt
3677 *
3678 * CGX TX Collision Attempts Before Dropping Frame Registers
3679 */
3680union cgxx_gmp_gmi_tx_col_attempt {
3681        u64 u;
3682        struct cgxx_gmp_gmi_tx_col_attempt_s {
3683                u64 limit                            : 5;
3684                u64 reserved_5_63                    : 59;
3685        } s;
3686        /* struct cgxx_gmp_gmi_tx_col_attempt_s cn; */
3687};
3688
3689static inline u64 CGXX_GMP_GMI_TX_COL_ATTEMPT(void)
3690        __attribute__ ((pure, always_inline));
3691static inline u64 CGXX_GMP_GMI_TX_COL_ATTEMPT(void)
3692{
3693        return 0x39010;
3694}
3695
3696/**
3697 * Register (RSL) cgx#_gmp_gmi_tx_ifg
3698 *
3699 * CGX GMI TX Interframe-Gap Cycles Registers Consider the following when
3700 * programming IFG1 and IFG2: * For 10/100/1000 Mb/s half-duplex systems
3701 * that require IEEE 802.3 compatibility, IFG1 must be in the range of
3702 * 1-8, [IFG2] must be in the range of 4-12, and the [IFG1] + [IFG2] sum
3703 * must be 12. * For 10/100/1000 Mb/s full-duplex systems that require
3704 * IEEE 802.3 compatibility, IFG1 must be in the range of 1-11, [IFG2]
3705 * must be in the range of 1-11, and the [IFG1] + [IFG2] sum must be 12.
3706 * For all other systems, IFG1 and IFG2 can be any value in the range of
3707 * 1-15, allowing for a total possible IFG sum of 2-30.
3708 */
3709union cgxx_gmp_gmi_tx_ifg {
3710        u64 u;
3711        struct cgxx_gmp_gmi_tx_ifg_s {
3712                u64 ifg1                             : 4;
3713                u64 ifg2                             : 4;
3714                u64 reserved_8_63                    : 56;
3715        } s;
3716        /* struct cgxx_gmp_gmi_tx_ifg_s cn; */
3717};
3718
3719static inline u64 CGXX_GMP_GMI_TX_IFG(void)
3720        __attribute__ ((pure, always_inline));
3721static inline u64 CGXX_GMP_GMI_TX_IFG(void)
3722{
3723        return 0x39000;
3724}
3725
3726/**
3727 * Register (RSL) cgx#_gmp_gmi_tx_jam
3728 *
3729 * CGX GMI TX JAM Pattern Registers This register provides the pattern
3730 * used in JAM bytes.
3731 */
3732union cgxx_gmp_gmi_tx_jam {
3733        u64 u;
3734        struct cgxx_gmp_gmi_tx_jam_s {
3735                u64 jam                              : 8;
3736                u64 reserved_8_63                    : 56;
3737        } s;
3738        /* struct cgxx_gmp_gmi_tx_jam_s cn; */
3739};
3740
3741static inline u64 CGXX_GMP_GMI_TX_JAM(void)
3742        __attribute__ ((pure, always_inline));
3743static inline u64 CGXX_GMP_GMI_TX_JAM(void)
3744{
3745        return 0x39008;
3746}
3747
3748/**
3749 * Register (RSL) cgx#_gmp_gmi_tx_lfsr
3750 *
3751 * CGX GMI TX LFSR Registers This register shows the contents of the
3752 * linear feedback shift register (LFSR), which is used to implement
3753 * truncated binary exponential backoff.
3754 */
3755union cgxx_gmp_gmi_tx_lfsr {
3756        u64 u;
3757        struct cgxx_gmp_gmi_tx_lfsr_s {
3758                u64 lfsr                             : 16;
3759                u64 reserved_16_63                   : 48;
3760        } s;
3761        /* struct cgxx_gmp_gmi_tx_lfsr_s cn; */
3762};
3763
3764static inline u64 CGXX_GMP_GMI_TX_LFSR(void)
3765        __attribute__ ((pure, always_inline));
3766static inline u64 CGXX_GMP_GMI_TX_LFSR(void)
3767{
3768        return 0x39028;
3769}
3770
3771/**
3772 * Register (RSL) cgx#_gmp_gmi_tx_pause_pkt_dmac
3773 *
3774 * CGX TX PAUSE-Packet DMAC-Field Registers
3775 */
3776union cgxx_gmp_gmi_tx_pause_pkt_dmac {
3777        u64 u;
3778        struct cgxx_gmp_gmi_tx_pause_pkt_dmac_s {
3779                u64 dmac                             : 48;
3780                u64 reserved_48_63                   : 16;
3781        } s;
3782        /* struct cgxx_gmp_gmi_tx_pause_pkt_dmac_s cn; */
3783};
3784
3785static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(void)
3786        __attribute__ ((pure, always_inline));
3787static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(void)
3788{
3789        return 0x39018;
3790}
3791
3792/**
3793 * Register (RSL) cgx#_gmp_gmi_tx_pause_pkt_type
3794 *
3795 * CGX GMI TX PAUSE-Packet-PTYPE Field Registers This register provides
3796 * the PTYPE field that is placed in outbound PAUSE packets.
3797 */
3798union cgxx_gmp_gmi_tx_pause_pkt_type {
3799        u64 u;
3800        struct cgxx_gmp_gmi_tx_pause_pkt_type_s {
3801                u64 ptype                            : 16;
3802                u64 reserved_16_63                   : 48;
3803        } s;
3804        /* struct cgxx_gmp_gmi_tx_pause_pkt_type_s cn; */
3805};
3806
3807static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(void)
3808        __attribute__ ((pure, always_inline));
3809static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(void)
3810{
3811        return 0x39020;
3812}
3813
3814/**
3815 * Register (RSL) cgx#_gmp_misc#_cfg
3816 *
3817 * CGX GMP PCS Miscellaneous Control Registers This register contains
3818 * general configuration that should not need to be changed from reset
3819 * settings.  Internal: Per lmac diagnostic and chicken bits.
3820 */
3821union cgxx_gmp_miscx_cfg {
3822        u64 u;
3823        struct cgxx_gmp_miscx_cfg_s {
3824                u64 tx_eee_quiet_credit_mode         : 1;
3825                u64 tx_eee_wait_gmi_fast_idle        : 1;
3826                u64 tx_qsgmii_port0_init             : 1;
3827                u64 tx_eee_rx_sync_status_enable     : 1;
3828                u64 pcs_alt_an                       : 1;
3829                u64 reserved_5_7                     : 3;
3830                u64 rx_pcs_sync_signal_detect        : 1;
3831                u64 rx_pcs_sync_timeout              : 1;
3832                u64 rx_pcs_eee_mode_enable           : 1;
3833                u64 rx_pcs_lpi_enable                : 1;
3834                u64 rx_pcs_802_rx_k                  : 1;
3835                u64 rx_pcs_alt_qlb2i                 : 1;
3836                u64 reserved_14_15                   : 2;
3837                u64 rx_cgp_gser_throttle             : 1;
3838                u64 rx_cgp_edet_filter               : 1;
3839                u64 rx_cgp_edet_qlm_val              : 1;
3840                u64 reserved_19_63                   : 45;
3841        } s;
3842        /* struct cgxx_gmp_miscx_cfg_s cn; */
3843};
3844
3845static inline u64 CGXX_GMP_MISCX_CFG(u64 a)
3846        __attribute__ ((pure, always_inline));
3847static inline u64 CGXX_GMP_MISCX_CFG(u64 a)
3848{
3849        return 0x34000 + 0x40000 * a;
3850}
3851
3852/**
3853 * Register (RSL) cgx#_gmp_pcs#_an_expansion
3854 *
3855 * CGX GMP PCS AN Expansion register Register 6 AN status
3856 */
3857union cgxx_gmp_pcsx_an_expansion {
3858        u64 u;
3859        struct cgxx_gmp_pcsx_an_expansion_s {
3860                u64 reserved_0                       : 1;
3861                u64 page_received                    : 1;
3862                u64 next_page_able                   : 1;
3863                u64 reserved_3_63                    : 61;
3864        } s;
3865        /* struct cgxx_gmp_pcsx_an_expansion_s cn; */
3866};
3867
3868static inline u64 CGXX_GMP_PCSX_AN_EXPANSION(u64 a)
3869        __attribute__ ((pure, always_inline));
3870static inline u64 CGXX_GMP_PCSX_AN_EXPANSION(u64 a)
3871{
3872        return 0x30a60 + 0x40000 * a;
3873}
3874
3875/**
3876 * Register (RSL) cgx#_gmp_pcs#_an_lp_abil_np
3877 *
3878 * CGX GMP PCS AN Link Partner Ability Next Page Register 8 This register
3879 * contains the advertised ability of the link partners Next Page. The
3880 * definition for this register is provided in 32.5.4.2 for changes to
3881 * 28.2.4.1.4.
3882 */
3883union cgxx_gmp_pcsx_an_lp_abil_np {
3884        u64 u;
3885        struct cgxx_gmp_pcsx_an_lp_abil_np_s {
3886                u64 m_u                              : 11;
3887                u64 toggle                           : 1;
3888                u64 ack2                             : 1;
3889                u64 mp                               : 1;
3890                u64 ack                              : 1;
3891                u64 np                               : 1;
3892                u64 reserved_16_63                   : 48;
3893        } s;
3894        /* struct cgxx_gmp_pcsx_an_lp_abil_np_s cn; */
3895};
3896
3897static inline u64 CGXX_GMP_PCSX_AN_LP_ABIL_NP(u64 a)
3898        __attribute__ ((pure, always_inline));
3899static inline u64 CGXX_GMP_PCSX_AN_LP_ABIL_NP(u64 a)
3900{
3901        return 0x30a80 + 0x40000 * a;
3902}
3903
3904/**
3905 * Register (RSL) cgx#_gmp_pcs#_an_np_tx
3906 *
3907 * CGX GMP PCS AN Next Page Transmit Register 7 Software programs this
3908 * register with the contents of the AN message next page or unformatted
3909 * next page link code word to be transmitted during autonegotiation.
3910 * Next page exchange occurs after the base link code words have been
3911 * exchanged if either end of the link segment sets the NP bit to 1,
3912 * indicating that it has at least one next page to send. Once initiated,
3913 * next page exchange continues until both ends of the link segment set
3914 * their NP bits to 0. Both sides must be NP capable to use NP exchanges.
3915 */
3916union cgxx_gmp_pcsx_an_np_tx {
3917        u64 u;
3918        struct cgxx_gmp_pcsx_an_np_tx_s {
3919                u64 m_u                              : 11;
3920                u64 toggle                           : 1;
3921                u64 ack2                             : 1;
3922                u64 mp                               : 1;
3923                u64 ack                              : 1;
3924                u64 np                               : 1;
3925                u64 reserved_16_63                   : 48;
3926        } s;
3927        /* struct cgxx_gmp_pcsx_an_np_tx_s cn; */
3928};
3929
3930static inline u64 CGXX_GMP_PCSX_AN_NP_TX(u64 a)
3931        __attribute__ ((pure, always_inline));
3932static inline u64 CGXX_GMP_PCSX_AN_NP_TX(u64 a)
3933{
3934        return 0x30a70 + 0x40000 * a;
3935}
3936
3937/**
3938 * Register (RSL) cgx#_gmp_pcs#_dbg_control
3939 *
3940 * CGX PCS Debug Control Registers
3941 */
3942union cgxx_gmp_pcsx_dbg_control {
3943        u64 u;
3944        struct cgxx_gmp_pcsx_dbg_control_s {
3945                u64 us_clk_period                    : 7;
3946                u64 reserved_7_63                    : 57;
3947        } s;
3948        /* struct cgxx_gmp_pcsx_dbg_control_s cn; */
3949};
3950
3951static inline u64 CGXX_GMP_PCSX_DBG_CONTROL(u64 a)
3952        __attribute__ ((pure, always_inline));
3953static inline u64 CGXX_GMP_PCSX_DBG_CONTROL(u64 a)
3954{
3955        return 0x31000 + 0x40000 * a;
3956}
3957
3958/**
3959 * Register (RSL) cgx#_gmp_pcs#_rx_eee_wake
3960 *
3961 * INTERNAL: CGX GMP PCS  RX EEE Wake Error Counter  Registers  Reserved.
3962 * Internal: This register is used by PHY types that support EEE to count
3963 * wake time faults where the PHY fails to complete its normal wake
3964 * sequence within the time required for the specific PHY type. The
3965 * definition of the fault event to be counted is defined for each PHY
3966 * and may occur during a refresh or a wake-up as defined by the PHY.
3967 * This 16-bit counter shall be reset to all zeros upon execution of the
3968 * PCS reset. This counter shall be held at all ones in the case of
3969 * overflow.
3970 */
3971union cgxx_gmp_pcsx_rx_eee_wake {
3972        u64 u;
3973        struct cgxx_gmp_pcsx_rx_eee_wake_s {
3974                u64 error_counter                    : 16;
3975                u64 reserved_16_63                   : 48;
3976        } s;
3977        /* struct cgxx_gmp_pcsx_rx_eee_wake_s cn; */
3978};
3979
3980static inline u64 CGXX_GMP_PCSX_RX_EEE_WAKE(u64 a)
3981        __attribute__ ((pure, always_inline));
3982static inline u64 CGXX_GMP_PCSX_RX_EEE_WAKE(u64 a)
3983{
3984        return 0x30910 + 0x40000 * a;
3985}
3986
3987/**
3988 * Register (RSL) cgx#_gmp_pcs#_rx_lpi_timing
3989 *
3990 * INTERNAL: CGX GMP PCS  RX EEE LPI Timing Parameters Registers
3991 * Reserved. Internal: Receiver LPI timing parameters Tqr, Twr and Twtf.
3992 */
3993union cgxx_gmp_pcsx_rx_lpi_timing {
3994        u64 u;
3995        struct cgxx_gmp_pcsx_rx_lpi_timing_s {
3996                u64 twtf                             : 18;
3997                u64 reserved_18_19                   : 2;
3998                u64 twr                              : 12;
3999                u64 tqr                              : 20;
4000                u64 reserved_52_63                   : 12;
4001        } s;
4002        /* struct cgxx_gmp_pcsx_rx_lpi_timing_s cn; */
4003};
4004
4005static inline u64 CGXX_GMP_PCSX_RX_LPI_TIMING(u64 a)
4006        __attribute__ ((pure, always_inline));
4007static inline u64 CGXX_GMP_PCSX_RX_LPI_TIMING(u64 a)
4008{
4009        return 0x30900 + 0x40000 * a;
4010}
4011
4012/**
4013 * Register (RSL) cgx#_gmp_pcs#_status1
4014 *
4015 * CGX GMP PCS Status 1 Register PCS LPI Status, Link OK.  Register 3.1
4016 */
4017union cgxx_gmp_pcsx_status1 {
4018        u64 u;
4019        struct cgxx_gmp_pcsx_status1_s {
4020                u64 reserved_0_1                     : 2;
4021                u64 receive_link_status              : 1;
4022                u64 reserved_3_7                     : 5;
4023                u64 rx_lpi_indication                : 1;
4024                u64 tx_lpi_indication                : 1;
4025                u64 rx_lpi_received                  : 1;
4026                u64 tx_lpi_received                  : 1;
4027                u64 reserved_12_63                   : 52;
4028        } s;
4029        /* struct cgxx_gmp_pcsx_status1_s cn; */
4030};
4031
4032static inline u64 CGXX_GMP_PCSX_STATUS1(u64 a)
4033        __attribute__ ((pure, always_inline));
4034static inline u64 CGXX_GMP_PCSX_STATUS1(u64 a)
4035{
4036        return 0x30880 + 0x40000 * a;
4037}
4038
4039/**
4040 * Register (RSL) cgx#_gmp_pcs#_tx_lpi_timing
4041 *
4042 * INTERNAL: CGX GMP GMI  TX EEE LPI Timing Parameters Registers
4043 * Reserved. Internal: Transmitter LPI timing parameters Tsl, Tql and
4044 * Tul.
4045 */
4046union cgxx_gmp_pcsx_tx_lpi_timing {
4047        u64 u;
4048        struct cgxx_gmp_pcsx_tx_lpi_timing_s {
4049                u64 tql                              : 19;
4050                u64 reserved_19_31                   : 13;
4051                u64 tul                              : 12;
4052                u64 reserved_44_47                   : 4;
4053                u64 tsl                              : 12;
4054                u64 reserved_60_63                   : 4;
4055        } s;
4056        /* struct cgxx_gmp_pcsx_tx_lpi_timing_s cn; */
4057};
4058
4059static inline u64 CGXX_GMP_PCSX_TX_LPI_TIMING(u64 a)
4060        __attribute__ ((pure, always_inline));
4061static inline u64 CGXX_GMP_PCSX_TX_LPI_TIMING(u64 a)
4062{
4063        return 0x30800 + 0x40000 * a;
4064}
4065
4066/**
4067 * Register (RSL) cgx#_gmp_pcs_an#_adv
4068 *
4069 * CGX GMP PCS Autonegotiation Advertisement Registers
4070 */
4071union cgxx_gmp_pcs_anx_adv {
4072        u64 u;
4073        struct cgxx_gmp_pcs_anx_adv_s {
4074                u64 reserved_0_4                     : 5;
4075                u64 fd                               : 1;
4076                u64 hfd                              : 1;
4077                u64 pause                            : 2;
4078                u64 reserved_9_11                    : 3;
4079                u64 rem_flt                          : 2;
4080                u64 reserved_14                      : 1;
4081                u64 np                               : 1;
4082                u64 reserved_16_63                   : 48;
4083        } s;
4084        /* struct cgxx_gmp_pcs_anx_adv_s cn; */
4085};
4086
4087static inline u64 CGXX_GMP_PCS_ANX_ADV(u64 a)
4088        __attribute__ ((pure, always_inline));
4089static inline u64 CGXX_GMP_PCS_ANX_ADV(u64 a)
4090{
4091        return 0x30010 + 0x40000 * a;
4092}
4093
4094/**
4095 * Register (RSL) cgx#_gmp_pcs_an#_ext_st
4096 *
4097 * CGX GMO PCS Autonegotiation Extended Status Registers
4098 */
4099union cgxx_gmp_pcs_anx_ext_st {
4100        u64 u;
4101        struct cgxx_gmp_pcs_anx_ext_st_s {
4102                u64 reserved_0_11                    : 12;
4103                u64 thou_thd                         : 1;
4104                u64 thou_tfd                         : 1;
4105                u64 thou_xhd                         : 1;
4106                u64 thou_xfd                         : 1;
4107                u64 reserved_16_63                   : 48;
4108        } s;
4109        /* struct cgxx_gmp_pcs_anx_ext_st_s cn; */
4110};
4111
4112static inline u64 CGXX_GMP_PCS_ANX_EXT_ST(u64 a)
4113        __attribute__ ((pure, always_inline));
4114static inline u64 CGXX_GMP_PCS_ANX_EXT_ST(u64 a)
4115{
4116        return 0x30028 + 0x40000 * a;
4117}
4118
4119/**
4120 * Register (RSL) cgx#_gmp_pcs_an#_lp_abil
4121 *
4122 * CGX GMP PCS Autonegotiation Link Partner Ability Registers This is the
4123 * autonegotiation link partner ability register 5 as per IEEE 802.3,
4124 * Clause 37.
4125 */
4126union cgxx_gmp_pcs_anx_lp_abil {
4127        u64 u;
4128        struct cgxx_gmp_pcs_anx_lp_abil_s {
4129                u64 reserved_0_4                     : 5;
4130                u64 fd                               : 1;
4131                u64 hfd                              : 1;
4132                u64 pause                            : 2;
4133                u64 reserved_9_11                    : 3;
4134                u64 rem_flt                          : 2;
4135                u64 ack                              : 1;
4136                u64 np                               : 1;
4137                u64 reserved_16_63                   : 48;
4138        } s;
4139        /* struct cgxx_gmp_pcs_anx_lp_abil_s cn; */
4140};
4141
4142static inline u64 CGXX_GMP_PCS_ANX_LP_ABIL(u64 a)
4143        __attribute__ ((pure, always_inline));
4144static inline u64 CGXX_GMP_PCS_ANX_LP_ABIL(u64 a)
4145{
4146        return 0x30018 + 0x40000 * a;
4147}
4148
4149/**
4150 * Register (RSL) cgx#_gmp_pcs_an#_results
4151 *
4152 * CGX GMP PCS Autonegotiation Results Registers This register is not
4153 * valid when CGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 1. If
4154 * CGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 0 and
4155 * CGX()_GMP_PCS_AN()_RESULTS[AN_CPT] is set to 1, this register is
4156 * valid.
4157 */
4158union cgxx_gmp_pcs_anx_results {
4159        u64 u;
4160        struct cgxx_gmp_pcs_anx_results_s {
4161                u64 link_ok                          : 1;
4162                u64 dup                              : 1;
4163                u64 an_cpt                           : 1;
4164                u64 spd                              : 2;
4165                u64 pause                            : 2;
4166                u64 reserved_7_63                    : 57;
4167        } s;
4168        /* struct cgxx_gmp_pcs_anx_results_s cn; */
4169};
4170
4171static inline u64 CGXX_GMP_PCS_ANX_RESULTS(u64 a)
4172        __attribute__ ((pure, always_inline));
4173static inline u64 CGXX_GMP_PCS_ANX_RESULTS(u64 a)
4174{
4175        return 0x30020 + 0x40000 * a;
4176}
4177
4178/**
4179 * Register (RSL) cgx#_gmp_pcs_int#
4180 *
4181 * CGX GMP PCS Interrupt Registers
4182 */
4183union cgxx_gmp_pcs_intx {
4184        u64 u;
4185        struct cgxx_gmp_pcs_intx_s {
4186                u64 lnkspd                           : 1;
4187                u64 xmit                             : 1;
4188                u64 an_err                           : 1;
4189                u64 txfifu                           : 1;
4190                u64 txfifo                           : 1;
4191                u64 txbad                            : 1;
4192                u64 rxerr                            : 1;
4193                u64 rxbad                            : 1;
4194                u64 rxlock                           : 1;
4195                u64 an_bad                           : 1;
4196                u64 sync_bad                         : 1;
4197                u64 dup                              : 1;
4198                u64 dbg_sync                         : 1;
4199                u64 reserved_13_15                   : 3;
4200                u64 an_page_received                 : 1;
4201                u64 an_complete                      : 1;
4202                u64 reserved_18_19                   : 2;
4203                u64 eee_tx_change                    : 1;
4204                u64 eee_rx_change                    : 1;
4205                u64 eee_rx_link_fail                 : 1;
4206                u64 reserved_23_63                   : 41;
4207        } s;
4208        /* struct cgxx_gmp_pcs_intx_s cn; */
4209};
4210
4211static inline u64 CGXX_GMP_PCS_INTX(u64 a)
4212        __attribute__ ((pure, always_inline));
4213static inline u64 CGXX_GMP_PCS_INTX(u64 a)
4214{
4215        return 0x30080 + 0x40000 * a;
4216}
4217
4218/**
4219 * Register (RSL) cgx#_gmp_pcs_int#_ena_w1c
4220 *
4221 * CGX GMP PCS Interrupt Enable Clear Registers This register clears
4222 * interrupt enable bits.
4223 */
4224union cgxx_gmp_pcs_intx_ena_w1c {
4225        u64 u;
4226        struct cgxx_gmp_pcs_intx_ena_w1c_s {
4227                u64 lnkspd                           : 1;
4228                u64 xmit                             : 1;
4229                u64 an_err                           : 1;
4230                u64 txfifu                           : 1;
4231                u64 txfifo                           : 1;
4232                u64 txbad                            : 1;
4233                u64 rxerr                            : 1;
4234                u64 rxbad                            : 1;
4235                u64 rxlock                           : 1;
4236                u64 an_bad                           : 1;
4237                u64 sync_bad                         : 1;
4238                u64 dup                              : 1;
4239                u64 dbg_sync                         : 1;
4240                u64 reserved_13_15                   : 3;
4241                u64 an_page_received                 : 1;
4242                u64 an_complete                      : 1;
4243                u64 reserved_18_19                   : 2;
4244                u64 eee_tx_change                    : 1;
4245                u64 eee_rx_change                    : 1;
4246                u64 eee_rx_link_fail                 : 1;
4247                u64 reserved_23_63                   : 41;
4248        } s;
4249        /* struct cgxx_gmp_pcs_intx_ena_w1c_s cn; */
4250};
4251
4252static inline u64 CGXX_GMP_PCS_INTX_ENA_W1C(u64 a)
4253        __attribute__ ((pure, always_inline));
4254static inline u64 CGXX_GMP_PCS_INTX_ENA_W1C(u64 a)
4255{
4256        return 0x30090 + 0x40000 * a;
4257}
4258
4259/**
4260 * Register (RSL) cgx#_gmp_pcs_int#_ena_w1s
4261 *
4262 * CGX GMP PCS Interrupt Enable Set Registers This register sets
4263 * interrupt enable bits.
4264 */
4265union cgxx_gmp_pcs_intx_ena_w1s {
4266        u64 u;
4267        struct cgxx_gmp_pcs_intx_ena_w1s_s {
4268                u64 lnkspd                           : 1;
4269                u64 xmit                             : 1;
4270                u64 an_err                           : 1;
4271                u64 txfifu                           : 1;
4272                u64 txfifo                           : 1;
4273                u64 txbad                            : 1;
4274                u64 rxerr                            : 1;
4275                u64 rxbad                            : 1;
4276                u64 rxlock                           : 1;
4277                u64 an_bad                           : 1;
4278                u64 sync_bad                         : 1;
4279                u64 dup                              : 1;
4280                u64 dbg_sync                         : 1;
4281                u64 reserved_13_15                   : 3;
4282                u64 an_page_received                 : 1;
4283                u64 an_complete                      : 1;
4284                u64 reserved_18_19                   : 2;
4285                u64 eee_tx_change                    : 1;
4286                u64 eee_rx_change                    : 1;
4287                u64 eee_rx_link_fail                 : 1;
4288                u64 reserved_23_63                   : 41;
4289        } s;
4290        /* struct cgxx_gmp_pcs_intx_ena_w1s_s cn; */
4291};
4292
4293static inline u64 CGXX_GMP_PCS_INTX_ENA_W1S(u64 a)
4294        __attribute__ ((pure, always_inline));
4295static inline u64 CGXX_GMP_PCS_INTX_ENA_W1S(u64 a)
4296{
4297        return 0x30098 + 0x40000 * a;
4298}
4299
4300/**
4301 * Register (RSL) cgx#_gmp_pcs_int#_w1s
4302 *
4303 * CGX GMP PCS Interrupt Set Registers This register sets interrupt bits.
4304 */
4305union cgxx_gmp_pcs_intx_w1s {
4306        u64 u;
4307        struct cgxx_gmp_pcs_intx_w1s_s {
4308                u64 lnkspd                           : 1;
4309                u64 xmit                             : 1;
4310                u64 an_err                           : 1;
4311                u64 txfifu                           : 1;
4312                u64 txfifo                           : 1;
4313                u64 txbad                            : 1;
4314                u64 rxerr                            : 1;
4315                u64 rxbad                            : 1;
4316                u64 rxlock                           : 1;
4317                u64 an_bad                           : 1;
4318                u64 sync_bad                         : 1;
4319                u64 dup                              : 1;
4320                u64 dbg_sync                         : 1;
4321                u64 reserved_13_15                   : 3;
4322                u64 an_page_received                 : 1;
4323                u64 an_complete                      : 1;
4324                u64 reserved_18_19                   : 2;
4325                u64 eee_tx_change                    : 1;
4326                u64 eee_rx_change                    : 1;
4327                u64 eee_rx_link_fail                 : 1;
4328                u64 reserved_23_63                   : 41;
4329        } s;
4330        /* struct cgxx_gmp_pcs_intx_w1s_s cn; */
4331};
4332
4333static inline u64 CGXX_GMP_PCS_INTX_W1S(u64 a)
4334        __attribute__ ((pure, always_inline));
4335static inline u64 CGXX_GMP_PCS_INTX_W1S(u64 a)
4336{
4337        return 0x30088 + 0x40000 * a;
4338}
4339
4340/**
4341 * Register (RSL) cgx#_gmp_pcs_link#_timer
4342 *
4343 * CGX GMP PCS Link Timer Registers This is the 1.6 ms nominal link timer
4344 * register.
4345 */
4346union cgxx_gmp_pcs_linkx_timer {
4347        u64 u;
4348        struct cgxx_gmp_pcs_linkx_timer_s {
4349                u64 count                            : 16;
4350                u64 reserved_16_63                   : 48;
4351        } s;
4352        /* struct cgxx_gmp_pcs_linkx_timer_s cn; */
4353};
4354
4355static inline u64 CGXX_GMP_PCS_LINKX_TIMER(u64 a)
4356        __attribute__ ((pure, always_inline));
4357static inline u64 CGXX_GMP_PCS_LINKX_TIMER(u64 a)
4358{
4359        return 0x30040 + 0x40000 * a;
4360}
4361
4362/**
4363 * Register (RSL) cgx#_gmp_pcs_misc#_ctl
4364 *
4365 * CGX GMP SGMII Miscellaneous Control Registers Internal: SGMII bit [12]
4366 * is really a misnomer, it is a decode  of pi_qlm_cfg pins to indicate
4367 * SGMII or 1000Base-X modes.  Note: The SGMII AN Advertisement Register
4368 * above will be sent during Auto Negotiation if [MAC_PHY] is set (1=PHY
4369 * mode). If the bit is not set (0=MAC mode), the tx_Config_Reg\<14\>
4370 * becomes ACK bit and tx_Config_Reg\<0\> is always 1. All other bits in
4371 * tx_Config_Reg sent will be 0. The PHY dictates the Auto Negotiation
4372 * results.
4373 */
4374union cgxx_gmp_pcs_miscx_ctl {
4375        u64 u;
4376        struct cgxx_gmp_pcs_miscx_ctl_s {
4377                u64 samp_pt                          : 7;
4378                u64 an_ovrd                          : 1;
4379                u64 mode                             : 1;
4380                u64 mac_phy                          : 1;
4381                u64 loopbck2                         : 1;
4382                u64 gmxeno                           : 1;
4383                u64 reserved_12                      : 1;
4384                u64 disp_en                          : 1;
4385                u64 reserved_14_15                   : 2;
4386                u64 qsgmii_comma_wd                  : 16;
4387                u64 qsgmii_comma_wd_en               : 1;
4388                u64 reserved_33_63                   : 31;
4389        } s;
4390        struct cgxx_gmp_pcs_miscx_ctl_cn {
4391                u64 samp_pt                          : 7;
4392                u64 an_ovrd                          : 1;
4393                u64 mode                             : 1;
4394                u64 mac_phy                          : 1;
4395                u64 loopbck2                         : 1;
4396                u64 gmxeno                           : 1;
4397                u64 reserved_12                      : 1;
4398                u64 disp_en                          : 1;
4399                u64 reserved_14_15                   : 2;
4400                u64 qsgmii_comma_wd                  : 16;
4401                u64 qsgmii_comma_wd_en               : 1;
4402                u64 reserved_33_35                   : 3;
4403                u64 reserved_36_63                   : 28;
4404        } cn;
4405};
4406
4407static inline u64 CGXX_GMP_PCS_MISCX_CTL(u64 a)
4408        __attribute__ ((pure, always_inline));
4409static inline u64 CGXX_GMP_PCS_MISCX_CTL(u64 a)
4410{
4411        return 0x30078 + 0x40000 * a;
4412}
4413
4414/**
4415 * Register (RSL) cgx#_gmp_pcs_mr#_control
4416 *
4417 * CGX GMP PCS Control Registers
4418 */
4419union cgxx_gmp_pcs_mrx_control {
4420        u64 u;
4421        struct cgxx_gmp_pcs_mrx_control_s {
4422                u64 reserved_0_4                     : 5;
4423                u64 uni                              : 1;
4424                u64 spdmsb                           : 1;
4425                u64 coltst                           : 1;
4426                u64 dup                              : 1;
4427                u64 rst_an                           : 1;
4428                u64 reserved_10                      : 1;
4429                u64 pwr_dn                           : 1;
4430                u64 an_en                            : 1;
4431                u64 spdlsb                           : 1;
4432                u64 loopbck1                         : 1;
4433                u64 reset                            : 1;
4434                u64 reserved_16_63                   : 48;
4435        } s;
4436        /* struct cgxx_gmp_pcs_mrx_control_s cn; */
4437};
4438
4439static inline u64 CGXX_GMP_PCS_MRX_CONTROL(u64 a)
4440        __attribute__ ((pure, always_inline));
4441static inline u64 CGXX_GMP_PCS_MRX_CONTROL(u64 a)
4442{
4443        return 0x30000 + 0x40000 * a;
4444}
4445
4446/**
4447 * Register (RSL) cgx#_gmp_pcs_mr#_status
4448 *
4449 * CGX GMP PCS Status Registers Bits \<15:9\> in this register indicate
4450 * the ability to operate when CGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set
4451 * to MAC mode. Bits \<15:9\> are always read as 0, indicating that the
4452 * chip cannot operate in the corresponding modes. The field [RM_FLT] is
4453 * a 'don't care' when the selected mode is SGMII/QSGMII.
4454 */
4455union cgxx_gmp_pcs_mrx_status {
4456        u64 u;
4457        struct cgxx_gmp_pcs_mrx_status_s {
4458                u64 extnd                            : 1;
4459                u64 reserved_1                       : 1;
4460                u64 lnk_st                           : 1;
4461                u64 an_abil                          : 1;
4462                u64 rm_flt                           : 1;
4463                u64 an_cpt                           : 1;
4464                u64 prb_sup                          : 1;
4465                u64 reserved_7                       : 1;
4466                u64 ext_st                           : 1;
4467                u64 hun_t2hd                         : 1;
4468                u64 hun_t2fd                         : 1;
4469                u64 ten_hd                           : 1;
4470                u64 ten_fd                           : 1;
4471                u64 hun_xhd                          : 1;
4472                u64 hun_xfd                          : 1;
4473                u64 hun_t4                           : 1;
4474                u64 reserved_16_63                   : 48;
4475        } s;
4476        /* struct cgxx_gmp_pcs_mrx_status_s cn; */
4477};
4478
4479static inline u64 CGXX_GMP_PCS_MRX_STATUS(u64 a)
4480        __attribute__ ((pure, always_inline));
4481static inline u64 CGXX_GMP_PCS_MRX_STATUS(u64 a)
4482{
4483        return 0x30008 + 0x40000 * a;
4484}
4485
4486/**
4487 * Register (RSL) cgx#_gmp_pcs_rx#_states
4488 *
4489 * CGX GMP PCS RX State-Machines States Registers
4490 */
4491union cgxx_gmp_pcs_rxx_states {
4492        u64 u;
4493        struct cgxx_gmp_pcs_rxx_states_s {
4494                u64 an_st                            : 4;
4495                u64 an_bad                           : 1;
4496                u64 sync                             : 4;
4497                u64 sync_bad                         : 1;
4498                u64 rx_st                            : 5;
4499                u64 rx_bad                           : 1;
4500                u64 reserved_16_63                   : 48;
4501        } s;
4502        /* struct cgxx_gmp_pcs_rxx_states_s cn; */
4503};
4504
4505static inline u64 CGXX_GMP_PCS_RXX_STATES(u64 a)
4506        __attribute__ ((pure, always_inline));
4507static inline u64 CGXX_GMP_PCS_RXX_STATES(u64 a)
4508{
4509        return 0x30058 + 0x40000 * a;
4510}
4511
4512/**
4513 * Register (RSL) cgx#_gmp_pcs_rx#_sync
4514 *
4515 * CGX GMP PCS Code Group Synchronization Registers
4516 */
4517union cgxx_gmp_pcs_rxx_sync {
4518        u64 u;
4519        struct cgxx_gmp_pcs_rxx_sync_s {
4520                u64 bit_lock                         : 1;
4521                u64 sync                             : 1;
4522                u64 reserved_2_63                    : 62;
4523        } s;
4524        /* struct cgxx_gmp_pcs_rxx_sync_s cn; */
4525};
4526
4527static inline u64 CGXX_GMP_PCS_RXX_SYNC(u64 a)
4528        __attribute__ ((pure, always_inline));
4529static inline u64 CGXX_GMP_PCS_RXX_SYNC(u64 a)
4530{
4531        return 0x30050 + 0x40000 * a;
4532}
4533
4534/**
4535 * Register (RSL) cgx#_gmp_pcs_sgm#_an_adv
4536 *
4537 * CGX GMP PCS SGMII Autonegotiation Advertisement Registers This is the
4538 * SGMII autonegotiation advertisement register (sent out as
4539 * tx_Config_Reg\<15:0\> as defined in IEEE 802.3 clause 37). This
4540 * register is sent during autonegotiation if
4541 * CGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set (1 = PHY mode). If the bit is
4542 * not set (0 = MAC mode), then tx_Config_Reg\<14\> becomes ACK bit and
4543 * tx_Config_Reg\<0\> is always 1. All other bits in tx_Config_Reg sent
4544 * will be 0. The PHY dictates the autonegotiation results.
4545 */
4546union cgxx_gmp_pcs_sgmx_an_adv {
4547        u64 u;
4548        struct cgxx_gmp_pcs_sgmx_an_adv_s {
4549                u64 one                              : 1;
4550                u64 reserved_1_9                     : 9;
4551                u64 speed                            : 2;
4552                u64 dup                              : 1;
4553                u64 reserved_13                      : 1;
4554                u64 ack                              : 1;
4555                u64 link                             : 1;
4556                u64 reserved_16_63                   : 48;
4557        } s;
4558        /* struct cgxx_gmp_pcs_sgmx_an_adv_s cn; */
4559};
4560
4561static inline u64 CGXX_GMP_PCS_SGMX_AN_ADV(u64 a)
4562        __attribute__ ((pure, always_inline));
4563static inline u64 CGXX_GMP_PCS_SGMX_AN_ADV(u64 a)
4564{
4565        return 0x30068 + 0x40000 * a;
4566}
4567
4568/**
4569 * Register (RSL) cgx#_gmp_pcs_sgm#_lp_adv
4570 *
4571 * CGX GMP PCS SGMII Link-Partner-Advertisement Registers This is the
4572 * SGMII link partner advertisement register (received as
4573 * rx_Config_Reg\<15:0\> as defined in IEEE 802.3 clause 37).
4574 */
4575union cgxx_gmp_pcs_sgmx_lp_adv {
4576        u64 u;
4577        struct cgxx_gmp_pcs_sgmx_lp_adv_s {
4578                u64 one                              : 1;
4579                u64 reserved_1_9                     : 9;
4580                u64 speed                            : 2;
4581                u64 dup                              : 1;
4582                u64 reserved_13_14                   : 2;
4583                u64 link                             : 1;
4584                u64 reserved_16_63                   : 48;
4585        } s;
4586        struct cgxx_gmp_pcs_sgmx_lp_adv_cn {
4587                u64 one                              : 1;
4588                u64 reserved_1_9                     : 9;
4589                u64 speed                            : 2;
4590                u64 dup                              : 1;
4591                u64 reserved_13                      : 1;
4592                u64 reserved_14                      : 1;
4593                u64 link                             : 1;
4594                u64 reserved_16_63                   : 48;
4595        } cn;
4596};
4597
4598static inline u64 CGXX_GMP_PCS_SGMX_LP_ADV(u64 a)
4599        __attribute__ ((pure, always_inline));
4600static inline u64 CGXX_GMP_PCS_SGMX_LP_ADV(u64 a)
4601{
4602        return 0x30070 + 0x40000 * a;
4603}
4604
4605/**
4606 * Register (RSL) cgx#_gmp_pcs_tx#_states
4607 *
4608 * CGX GMP PCS TX State-Machines States Registers
4609 */
4610union cgxx_gmp_pcs_txx_states {
4611        u64 u;
4612        struct cgxx_gmp_pcs_txx_states_s {
4613                u64 ord_st                           : 4;
4614                u64 tx_bad                           : 1;
4615                u64 xmit                             : 2;
4616                u64 reserved_7_63                    : 57;
4617        } s;
4618        /* struct cgxx_gmp_pcs_txx_states_s cn; */
4619};
4620
4621static inline u64 CGXX_GMP_PCS_TXX_STATES(u64 a)
4622        __attribute__ ((pure, always_inline));
4623static inline u64 CGXX_GMP_PCS_TXX_STATES(u64 a)
4624{
4625        return 0x30060 + 0x40000 * a;
4626}
4627
4628/**
4629 * Register (RSL) cgx#_gmp_pcs_tx_rx#_polarity
4630 *
4631 * CGX GMP PCS TX/RX Polarity Registers
4632 * CGX()_GMP_PCS_TX_RX()_POLARITY[AUTORXPL] shows correct polarity needed
4633 * on the link receive path after code group synchronization is achieved.
4634 * When LMAC_TYPE=QSGMII, only lane 0 polarity data and settings are
4635 * relevant and settings for lanes 1, 2 and 3 are unused.
4636 */
4637union cgxx_gmp_pcs_tx_rxx_polarity {
4638        u64 u;
4639        struct cgxx_gmp_pcs_tx_rxx_polarity_s {
4640                u64 txplrt                           : 1;
4641                u64 rxplrt                           : 1;
4642                u64 autorxpl                         : 1;
4643                u64 rxovrd                           : 1;
4644                u64 reserved_4_63                    : 60;
4645        } s;
4646        /* struct cgxx_gmp_pcs_tx_rxx_polarity_s cn; */
4647};
4648
4649static inline u64 CGXX_GMP_PCS_TX_RXX_POLARITY(u64 a)
4650        __attribute__ ((pure, always_inline));
4651static inline u64 CGXX_GMP_PCS_TX_RXX_POLARITY(u64 a)
4652{
4653        return 0x30048 + 0x40000 * a;
4654}
4655
4656/**
4657 * Register (RSL) cgx#_msix_pba#
4658 *
4659 * CGX MSI-X Pending Bit Array Registers This register is the MSI-X PBA
4660 * table, the bit number is indexed by the CGX_INT_VEC_E enumeration.
4661 */
4662union cgxx_msix_pbax {
4663        u64 u;
4664        struct cgxx_msix_pbax_s {
4665                u64 pend                             : 64;
4666        } s;
4667        /* struct cgxx_msix_pbax_s cn; */
4668};
4669
4670static inline u64 CGXX_MSIX_PBAX(u64 a)
4671        __attribute__ ((pure, always_inline));
4672static inline u64 CGXX_MSIX_PBAX(u64 a)
4673{
4674        return 0xf0000 + 8 * a;
4675}
4676
4677/**
4678 * Register (RSL) cgx#_msix_vec#_addr
4679 *
4680 * CGX MSI-X Vector Table Address Registers This register is the MSI-X
4681 * vector table, indexed by the CGX_INT_VEC_E enumeration.
4682 */
4683union cgxx_msix_vecx_addr {
4684        u64 u;
4685        struct cgxx_msix_vecx_addr_s {
4686                u64 secvec                           : 1;
4687                u64 reserved_1                       : 1;
4688                u64 addr                             : 51;
4689                u64 reserved_53_63                   : 11;
4690        } s;
4691        /* struct cgxx_msix_vecx_addr_s cn; */
4692};
4693
4694static inline u64 CGXX_MSIX_VECX_ADDR(u64 a)
4695        __attribute__ ((pure, always_inline));
4696static inline u64 CGXX_MSIX_VECX_ADDR(u64 a)
4697{
4698        return 0 + 0x10 * a;
4699}
4700
4701/**
4702 * Register (RSL) cgx#_msix_vec#_ctl
4703 *
4704 * CGX MSI-X Vector Table Control and Data Registers This register is the
4705 * MSI-X vector table, indexed by the CGX_INT_VEC_E enumeration.
4706 */
4707union cgxx_msix_vecx_ctl {
4708        u64 u;
4709        struct cgxx_msix_vecx_ctl_s {
4710                u64 data                             : 32;
4711                u64 mask                             : 1;
4712                u64 reserved_33_63                   : 31;
4713        } s;
4714        /* struct cgxx_msix_vecx_ctl_s cn; */
4715};
4716
4717static inline u64 CGXX_MSIX_VECX_CTL(u64 a)
4718        __attribute__ ((pure, always_inline));
4719static inline u64 CGXX_MSIX_VECX_CTL(u64 a)
4720{
4721        return 8 + 0x10 * a;
4722}
4723
4724/**
4725 * Register (RSL) cgx#_smu#_bp_test
4726 *
4727 * INTERNAL: CGX SMU TX Backpressure Test Registers
4728 */
4729union cgxx_smux_bp_test {
4730        u64 u;
4731        struct cgxx_smux_bp_test_s {
4732                u64 lfsr_freq                        : 12;
4733                u64 reserved_12_15                   : 4;
4734                u64 bp_cfg                           : 8;
4735                u64 reserved_24_47                   : 24;
4736                u64 enable                           : 4;
4737                u64 reserved_52_63                   : 12;
4738        } s;
4739        /* struct cgxx_smux_bp_test_s cn; */
4740};
4741
4742static inline u64 CGXX_SMUX_BP_TEST(u64 a)
4743        __attribute__ ((pure, always_inline));
4744static inline u64 CGXX_SMUX_BP_TEST(u64 a)
4745{
4746        return 0x20230 + 0x40000 * a;
4747}
4748
4749/**
4750 * Register (RSL) cgx#_smu#_cbfc_ctl
4751 *
4752 * CGX SMU PFC Control Registers Internal: INTERNAL: XOFF for a specific
4753 * class/channel \<i\> is XOFF\<i\> = ([PHYS_EN]\<i\> & cmr_rx_phys_bp) |
4754 * ([LOGL_EN]\<i\> & cmr_rx_logl_xoff\<i\>).
4755 */
4756union cgxx_smux_cbfc_ctl {
4757        u64 u;
4758        struct cgxx_smux_cbfc_ctl_s {
4759                u64 rx_en                            : 1;
4760                u64 tx_en                            : 1;
4761                u64 drp_en                           : 1;
4762                u64 bck_en                           : 1;
4763                u64 reserved_4_31                    : 28;
4764                u64 logl_en                          : 16;
4765                u64 phys_en                          : 16;
4766        } s;
4767        /* struct cgxx_smux_cbfc_ctl_s cn; */
4768};
4769
4770static inline u64 CGXX_SMUX_CBFC_CTL(u64 a)
4771        __attribute__ ((pure, always_inline));
4772static inline u64 CGXX_SMUX_CBFC_CTL(u64 a)
4773{
4774        return 0x20218 + 0x40000 * a;
4775}
4776
4777/**
4778 * Register (RSL) cgx#_smu#_ctrl
4779 *
4780 * CGX SMU Control Registers
4781 */
4782union cgxx_smux_ctrl {
4783        u64 u;
4784        struct cgxx_smux_ctrl_s {
4785                u64 rx_idle                          : 1;
4786                u64 tx_idle                          : 1;
4787                u64 reserved_2_63                    : 62;
4788        } s;
4789        /* struct cgxx_smux_ctrl_s cn; */
4790};
4791
4792static inline u64 CGXX_SMUX_CTRL(u64 a)
4793        __attribute__ ((pure, always_inline));
4794static inline u64 CGXX_SMUX_CTRL(u64 a)
4795{
4796        return 0x20200 + 0x40000 * a;
4797}
4798
4799/**
4800 * Register (RSL) cgx#_smu#_ext_loopback
4801 *
4802 * CGX SMU External Loopback Registers In loopback mode, the IFG1+IFG2 of
4803 * local and remote parties must match exactly; otherwise loopback FIFO
4804 * will overrun: CGX()_SMU()_TX_INT[LB_OVRFLW].
4805 */
4806union cgxx_smux_ext_loopback {
4807        u64 u;
4808        struct cgxx_smux_ext_loopback_s {
4809                u64 thresh                           : 6;
4810                u64 reserved_6_7                     : 2;
4811                u64 depth                            : 6;
4812                u64 reserved_14_15                   : 2;
4813                u64 en                               : 1;
4814                u64 reserved_17_63                   : 47;
4815        } s;
4816        /* struct cgxx_smux_ext_loopback_s cn; */
4817};
4818
4819static inline u64 CGXX_SMUX_EXT_LOOPBACK(u64 a)
4820        __attribute__ ((pure, always_inline));
4821static inline u64 CGXX_SMUX_EXT_LOOPBACK(u64 a)
4822{
4823        return 0x20208 + 0x40000 * a;
4824}
4825
4826/**
4827 * Register (RSL) cgx#_smu#_hg2_control
4828 *
4829 * CGX SMU HiGig2 Control Registers HiGig2 TX- and RX-enable are normally
4830 * set together for HiGig2 messaging. Setting just the TX or RX bit
4831 * results in only the HG2 message transmit or receive capability.
4832 * Setting [PHYS_EN] and [LOGL_EN] to 1 allows link PAUSE or backpressure
4833 * to NIX as per the received HiGig2 message. Setting these fields to 0
4834 * disables link PAUSE and backpressure to NIX in response to received
4835 * messages.  CGX()_SMU()_TX_CTL[HG_EN] must be set (to enable HiGig)
4836 * whenever either [HG2TX_EN] or [HG2RX_EN] are set.
4837 * CGX()_SMU()_RX_UDD_SKP[LEN] must be set to 16 (to select HiGig2)
4838 * whenever either [HG2TX_EN] or [HG2RX_EN] are set.
4839 * CGX()_CMR_RX_OVR_BP[EN]\<0\> must be set and
4840 * CGX()_CMR_RX_OVR_BP[BP]\<0\> must be cleared to 0 (to forcibly disable
4841 * hardware-automatic 802.3 PAUSE packet generation) with the HiGig2
4842 * Protocol when [HG2TX_EN] = 0. (The HiGig2 protocol is indicated by
4843 * CGX()_SMU()_TX_CTL[HG_EN] = 1 and CGX()_SMU()_RX_UDD_SKP[LEN]=16.)
4844 * Hardware can only autogenerate backpressure via HiGig2 messages
4845 * (optionally, when [HG2TX_EN] = 1) with the HiGig2 protocol.
4846 */
4847union cgxx_smux_hg2_control {
4848        u64 u;
4849        struct cgxx_smux_hg2_control_s {
4850                u64 logl_en                          : 16;
4851                u64 phys_en                          : 1;
4852                u64 hg2rx_en                         : 1;
4853                u64 hg2tx_en                         : 1;
4854                u64 reserved_19_63                   : 45;
4855        } s;
4856        /* struct cgxx_smux_hg2_control_s cn; */
4857};
4858
4859static inline u64 CGXX_SMUX_HG2_CONTROL(u64 a)
4860        __attribute__ ((pure, always_inline));
4861static inline u64 CGXX_SMUX_HG2_CONTROL(u64 a)
4862{
4863        return 0x20210 + 0x40000 * a;
4864}
4865
4866/**
4867 * Register (RSL) cgx#_smu#_mmsi_ctl_sta
4868 *
4869 * CGX SMU MAC Merge Service Interface (MMSI) Control/Status Registers
4870 * MMSI control and status registers for frame preemption mode. Refer to
4871 * IEEE 802.3br, Clause 99.
4872 */
4873union cgxx_smux_mmsi_ctl_sta {
4874        u64 u;
4875        struct cgxx_smux_mmsi_ctl_sta_s {
4876                u64 p_en                             : 1;
4877                u64 dis_v                            : 1;
4878                u64 afs                              : 2;
4879                u64 v_sta                            : 3;
4880                u64 tx_pactive                       : 1;
4881                u64 reserved_8_31                    : 24;
4882                u64 v_time                           : 24;
4883                u64 reserved_56_63                   : 8;
4884        } s;
4885        /* struct cgxx_smux_mmsi_ctl_sta_s cn; */
4886};
4887
4888static inline u64 CGXX_SMUX_MMSI_CTL_STA(u64 a)
4889        __attribute__ ((pure, always_inline));
4890static inline u64 CGXX_SMUX_MMSI_CTL_STA(u64 a)
4891{
4892        return 0x20220 + 0x40000 * a;
4893}
4894
4895/**
4896 * Register (RSL) cgx#_smu#_rx_bad_col_ctrl
4897 *
4898 * CGX SMU RX Bad Column High Registers
4899 */
4900union cgxx_smux_rx_bad_col_ctrl {
4901        u64 u;
4902        struct cgxx_smux_rx_bad_col_ctrl_s {
4903                u64 lane_rxc                         : 16;
4904                u64 state                            : 3;
4905                u64 val                              : 1;
4906                u64 reserved_20_63                   : 44;
4907        } s;
4908        /* struct cgxx_smux_rx_bad_col_ctrl_s cn; */
4909};
4910
4911static inline u64 CGXX_SMUX_RX_BAD_COL_CTRL(u64 a)
4912        __attribute__ ((pure, always_inline));
4913static inline u64 CGXX_SMUX_RX_BAD_COL_CTRL(u64 a)
4914{
4915        return 0x20060 + 0x40000 * a;
4916}
4917
4918/**
4919 * Register (RSL) cgx#_smu#_rx_bad_col_data_hi
4920 *
4921 * CGX SMU RX Bad Column Low Registers
4922 */
4923union cgxx_smux_rx_bad_col_data_hi {
4924        u64 u;
4925        struct cgxx_smux_rx_bad_col_data_hi_s {
4926                u64 lane_rxd                         : 64;
4927        } s;
4928        /* struct cgxx_smux_rx_bad_col_data_hi_s cn; */
4929};
4930
4931static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_HI(u64 a)
4932        __attribute__ ((pure, always_inline));
4933static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_HI(u64 a)
4934{
4935        return 0x20058 + 0x40000 * a;
4936}
4937
4938/**
4939 * Register (RSL) cgx#_smu#_rx_bad_col_data_lo
4940 *
4941 * CGX SMU RX Bad Column Low Registers
4942 */
4943union cgxx_smux_rx_bad_col_data_lo {
4944        u64 u;
4945        struct cgxx_smux_rx_bad_col_data_lo_s {
4946                u64 lane_rxd                         : 64;
4947        } s;
4948        /* struct cgxx_smux_rx_bad_col_data_lo_s cn; */
4949};
4950
4951static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_LO(u64 a)
4952        __attribute__ ((pure, always_inline));
4953static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_LO(u64 a)
4954{
4955        return 0x20050 + 0x40000 * a;
4956}
4957
4958/**
4959 * Register (RSL) cgx#_smu#_rx_ctl
4960 *
4961 * CGX SMU RX Control Registers
4962 */
4963union cgxx_smux_rx_ctl {
4964        u64 u;
4965        struct cgxx_smux_rx_ctl_s {
4966                u64 status                           : 2;
4967                u64 reserved_2_63                    : 62;
4968        } s;
4969        /* struct cgxx_smux_rx_ctl_s cn; */
4970};
4971
4972static inline u64 CGXX_SMUX_RX_CTL(u64 a)
4973        __attribute__ ((pure, always_inline));
4974static inline u64 CGXX_SMUX_RX_CTL(u64 a)
4975{
4976        return 0x20048 + 0x40000 * a;
4977}
4978
4979/**
4980 * Register (RSL) cgx#_smu#_rx_decision
4981 *
4982 * CGX SMU Packet Decision Registers This register specifies the byte
4983 * count used to determine when to accept or to filter a packet. As each
4984 * byte in a packet is received by CGX, the L2 byte count (i.e. the
4985 * number of bytes from the beginning of the L2 header (DMAC)) is
4986 * compared against CNT. In normal operation, the L2 header begins after
4987 * the PREAMBLE + SFD (CGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 1) and any
4988 * optional UDD skip data (CGX()_SMU()_RX_UDD_SKP[LEN]).
4989 */
4990union cgxx_smux_rx_decision {
4991        u64 u;
4992        struct cgxx_smux_rx_decision_s {
4993                u64 cnt                              : 5;
4994                u64 reserved_5_63                    : 59;
4995        } s;
4996        /* struct cgxx_smux_rx_decision_s cn; */
4997};
4998
4999static inline u64 CGXX_SMUX_RX_DECISION(u64 a)
5000        __attribute__ ((pure, always_inline));
5001static inline u64 CGXX_SMUX_RX_DECISION(u64 a)
5002{
5003        return 0x20038 + 0x40000 * a;
5004}
5005
5006/**
5007 * Register (RSL) cgx#_smu#_rx_frm_chk
5008 *
5009 * CGX SMU RX Frame Check Registers The CSRs provide the enable bits for
5010 * a subset of errors passed to CMR encoded.
5011 */
5012union cgxx_smux_rx_frm_chk {
5013        u64 u;
5014        struct cgxx_smux_rx_frm_chk_s {
5015                u64 reserved_0_2                     : 3;
5016                u64 jabber                           : 1;
5017                u64 fcserr_d                         : 1;
5018                u64 fcserr_c                         : 1;
5019                u64 reserved_6                       : 1;
5020                u64 rcverr                           : 1;
5021                u64 skperr                           : 1;
5022                u64 reserved_9_63                    : 55;
5023        } s;
5024        /* struct cgxx_smux_rx_frm_chk_s cn; */
5025};
5026
5027static inline u64 CGXX_SMUX_RX_FRM_CHK(u64 a)
5028        __attribute__ ((pure, always_inline));
5029static inline u64 CGXX_SMUX_RX_FRM_CHK(u64 a)
5030{
5031        return 0x20028 + 0x40000 * a;
5032}
5033
5034/**
5035 * Register (RSL) cgx#_smu#_rx_frm_ctl
5036 *
5037 * CGX SMU RX Frame Control Registers This register controls the handling
5038 * of the frames. The [CTL_BCK] and [CTL_DRP] bits control how the
5039 * hardware handles incoming PAUSE packets. The most common modes of
5040 * operation: _ [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything
5041 * _ [CTL_BCK] = 0, [CTL_DRP] = 0: software sees all PAUSE frames _
5042 * [CTL_BCK] = 0, [CTL_DRP] = 1: all PAUSE frames are completely ignored
5043 * These control bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in
5044 * half-duplex mode. Since PAUSE packets only apply to full duplex
5045 * operation, any PAUSE packet would constitute an exception which should
5046 * be handled by the processing cores. PAUSE packets should not be
5047 * forwarded.
5048 */
5049union cgxx_smux_rx_frm_ctl {
5050        u64 u;
5051        struct cgxx_smux_rx_frm_ctl_s {
5052                u64 pre_chk                          : 1;
5053                u64 pre_strp                         : 1;
5054                u64 ctl_drp                          : 1;
5055                u64 ctl_bck                          : 1;
5056                u64 ctl_mcst                         : 1;
5057                u64 ctl_smac                         : 1;
5058                u64 reserved_6_11                    : 6;
5059                u64 ptp_mode                         : 1;
5060                u64 reserved_13_63                   : 51;
5061        } s;
5062        /* struct cgxx_smux_rx_frm_ctl_s cn; */
5063};
5064
5065static inline u64 CGXX_SMUX_RX_FRM_CTL(u64 a)
5066        __attribute__ ((pure, always_inline));
5067static inline u64 CGXX_SMUX_RX_FRM_CTL(u64 a)
5068{
5069        return 0x20020 + 0x40000 * a;
5070}
5071
5072/**
5073 * Register (RSL) cgx#_smu#_rx_int
5074 *
5075 * CGX SMU Receive Interrupt Registers SMU Interrupt Register. Internal:
5076 * Exception conditions \<9\> and \<4:0\> can also set the rcv/opcode in
5077 * the received packet's work queue entry. CGX()_SMU()_RX_FRM_CHK
5078 * provides a bit mask for configuring which conditions set the error.
5079 */
5080union cgxx_smux_rx_int {
5081        u64 u;
5082        struct cgxx_smux_rx_int_s {
5083                u64 jabber                           : 1;
5084                u64 fcserr                           : 1;
5085                u64 rcverr                           : 1;
5086                u64 skperr                           : 1;
5087                u64 pcterr                           : 1;
5088                u64 rsverr                           : 1;
5089                u64 loc_fault                        : 1;
5090                u64 rem_fault                        : 1;
5091                u64 bad_seq                          : 1;
5092                u64 bad_term                         : 1;
5093                u64 hg2fld                           : 1;
5094                u64 hg2cc                            : 1;
5095                u64 badver                           : 1;
5096                u64 badrsp                           : 1;
5097                u64 reserved_14_63                   : 50;
5098        } s;
5099        /* struct cgxx_smux_rx_int_s cn; */
5100};
5101
5102static inline u64 CGXX_SMUX_RX_INT(u64 a)
5103        __attribute__ ((pure, always_inline));
5104static inline u64 CGXX_SMUX_RX_INT(u64 a)
5105{
5106        return 0x20000 + 0x40000 * a;
5107}
5108
5109/**
5110 * Register (RSL) cgx#_smu#_rx_int_ena_w1c
5111 *
5112 * CGX SMU Receive Interrupt Enable Clear Registers This register clears
5113 * interrupt enable bits.
5114 */
5115union cgxx_smux_rx_int_ena_w1c {
5116        u64 u;
5117        struct cgxx_smux_rx_int_ena_w1c_s {
5118                u64 jabber                           : 1;
5119                u64 fcserr                           : 1;
5120                u64 rcverr                           : 1;
5121                u64 skperr                           : 1;
5122                u64 pcterr                           : 1;
5123                u64 rsverr                           : 1;
5124                u64 loc_fault                        : 1;
5125                u64 rem_fault                        : 1;
5126                u64 bad_seq                          : 1;
5127                u64 bad_term                         : 1;
5128                u64 hg2fld                           : 1;
5129                u64 hg2cc                            : 1;
5130                u64 badver                           : 1;
5131                u64 badrsp                           : 1;
5132                u64 reserved_14_63                   : 50;
5133        } s;
5134        /* struct cgxx_smux_rx_int_ena_w1c_s cn; */
5135};
5136
5137static inline u64 CGXX_SMUX_RX_INT_ENA_W1C(u64 a)
5138        __attribute__ ((pure, always_inline));
5139static inline u64 CGXX_SMUX_RX_INT_ENA_W1C(u64 a)
5140{
5141        return 0x20010 + 0x40000 * a;
5142}
5143
5144/**
5145 * Register (RSL) cgx#_smu#_rx_int_ena_w1s
5146 *
5147 * CGX SMU Receive Interrupt Enable Set Registers This register sets
5148 * interrupt enable bits.
5149 */
5150union cgxx_smux_rx_int_ena_w1s {
5151        u64 u;
5152        struct cgxx_smux_rx_int_ena_w1s_s {
5153                u64 jabber                           : 1;
5154                u64 fcserr                           : 1;
5155                u64 rcverr                           : 1;
5156                u64 skperr                           : 1;
5157                u64 pcterr                           : 1;
5158                u64 rsverr                           : 1;
5159                u64 loc_fault                        : 1;
5160                u64 rem_fault                        : 1;
5161                u64 bad_seq                          : 1;
5162                u64 bad_term                         : 1;
5163                u64 hg2fld                           : 1;
5164                u64 hg2cc                            : 1;
5165                u64 badver                           : 1;
5166                u64 badrsp                           : 1;
5167                u64 reserved_14_63                   : 50;
5168        } s;
5169        /* struct cgxx_smux_rx_int_ena_w1s_s cn; */
5170};
5171
5172static inline u64 CGXX_SMUX_RX_INT_ENA_W1S(u64 a)
5173        __attribute__ ((pure, always_inline));
5174static inline u64 CGXX_SMUX_RX_INT_ENA_W1S(u64 a)
5175{
5176        return 0x20018 + 0x40000 * a;
5177}
5178
5179/**
5180 * Register (RSL) cgx#_smu#_rx_int_w1s
5181 *
5182 * CGX SMU Receive Interrupt Set Registers This register sets interrupt
5183 * bits.
5184 */
5185union cgxx_smux_rx_int_w1s {
5186        u64 u;
5187        struct cgxx_smux_rx_int_w1s_s {
5188                u64 jabber                           : 1;
5189                u64 fcserr                           : 1;
5190                u64 rcverr                           : 1;
5191                u64 skperr                           : 1;
5192                u64 pcterr                           : 1;
5193                u64 rsverr                           : 1;
5194                u64 loc_fault                        : 1;
5195                u64 rem_fault                        : 1;
5196                u64 bad_seq                          : 1;
5197                u64 bad_term                         : 1;
5198                u64 hg2fld                           : 1;
5199                u64 hg2cc                            : 1;
5200                u64 badver                           : 1;
5201                u64 badrsp                           : 1;
5202                u64 reserved_14_63                   : 50;
5203        } s;
5204        /* struct cgxx_smux_rx_int_w1s_s cn; */
5205};
5206
5207static inline u64 CGXX_SMUX_RX_INT_W1S(u64 a)
5208        __attribute__ ((pure, always_inline));
5209static inline u64 CGXX_SMUX_RX_INT_W1S(u64 a)
5210{
5211        return 0x20008 + 0x40000 * a;
5212}
5213
5214/**
5215 * Register (RSL) cgx#_smu#_rx_jabber
5216 *
5217 * CGX SMU Maximum Packet-Size Registers This register specifies the
5218 * maximum size for packets, beyond which the SMU truncates. Internal:
5219 * JABBER[CNT] is checked against the packet that arrives from SPU.  The
5220 * checking is performed before preamble is stripped or PTP is inserted.
5221 * If present, preamble is counted as eight bytes of the incoming packet.
5222 */
5223union cgxx_smux_rx_jabber {
5224        u64 u;
5225        struct cgxx_smux_rx_jabber_s {
5226                u64 cnt                              : 16;
5227                u64 reserved_16_63                   : 48;
5228        } s;
5229        /* struct cgxx_smux_rx_jabber_s cn; */
5230};
5231
5232static inline u64 CGXX_SMUX_RX_JABBER(u64 a)
5233        __attribute__ ((pure, always_inline));
5234static inline u64 CGXX_SMUX_RX_JABBER(u64 a)
5235{
5236        return 0x20030 + 0x40000 * a;
5237}
5238
5239/**
5240 * Register (RSL) cgx#_smu#_rx_udd_skp
5241 *
5242 * CGX SMU User-Defined Data Skip Registers Internal: (1) The skip bytes
5243 * are part of the packet and will be sent down the NCB packet interface
5244 * and will be handled by NIX.  (2) The system can determine if the UDD
5245 * bytes are included in the FCS check by using the FCSSEL field if the
5246 * FCS check is enabled.  (3) Assume that the preamble/sfd is always at
5247 * the start of the frame even before UDD bytes.  In most cases, there
5248 * will be no preamble in these cases since it will be packet interface
5249 * in direct communication to another packet interface (MAC to MAC)
5250 * without a PHY involved.  (4) We can still do address filtering and
5251 * control packet filtering if the user desires.  (5) In all cases, the
5252 * UDD bytes will be sent down the packet interface as part of the
5253 * packet.  The UDD bytes are never stripped from the actual packet.
5254 */
5255union cgxx_smux_rx_udd_skp {
5256        u64 u;
5257        struct cgxx_smux_rx_udd_skp_s {
5258                u64 len                              : 7;
5259                u64 reserved_7                       : 1;
5260                u64 fcssel                           : 1;
5261                u64 reserved_9_63                    : 55;
5262        } s;
5263        /* struct cgxx_smux_rx_udd_skp_s cn; */
5264};
5265
5266static inline u64 CGXX_SMUX_RX_UDD_SKP(u64 a)
5267        __attribute__ ((pure, always_inline));
5268static inline u64 CGXX_SMUX_RX_UDD_SKP(u64 a)
5269{
5270        return 0x20040 + 0x40000 * a;
5271}
5272
5273/**
5274 * Register (RSL) cgx#_smu#_rx_wol_ctrl0
5275 *
5276 * CGX SMU RX Wake-on-LAN Control 0 Registers
5277 */
5278union cgxx_smux_rx_wol_ctrl0 {
5279        u64 u;
5280        struct cgxx_smux_rx_wol_ctrl0_s {
5281                u64 dmac                             : 48;
5282                u64 pswd_len                         : 4;
5283                u64 reserved_52_63                   : 12;
5284        } s;
5285        /* struct cgxx_smux_rx_wol_ctrl0_s cn; */
5286};
5287
5288static inline u64 CGXX_SMUX_RX_WOL_CTRL0(u64 a)
5289        __attribute__ ((pure, always_inline));
5290static inline u64 CGXX_SMUX_RX_WOL_CTRL0(u64 a)
5291{
5292        return 0x20068 + 0x40000 * a;
5293}
5294
5295/**
5296 * Register (RSL) cgx#_smu#_rx_wol_ctrl1
5297 *
5298 * CGX SMU RX Wake-on-LAN Control 1 Registers
5299 */
5300union cgxx_smux_rx_wol_ctrl1 {
5301        u64 u;
5302        struct cgxx_smux_rx_wol_ctrl1_s {
5303                u64 pswd                             : 64;
5304        } s;
5305        /* struct cgxx_smux_rx_wol_ctrl1_s cn; */
5306};
5307
5308static inline u64 CGXX_SMUX_RX_WOL_CTRL1(u64 a)
5309        __attribute__ ((pure, always_inline));
5310static inline u64 CGXX_SMUX_RX_WOL_CTRL1(u64 a)
5311{
5312        return 0x20070 + 0x40000 * a;
5313}
5314
5315/**
5316 * Register (RSL) cgx#_smu#_rx_wol_int
5317 *
5318 * CGX SMU RX WOL Interrupt Registers These registers allow WOL
5319 * interrupts to be sent to the control processor.
5320 */
5321union cgxx_smux_rx_wol_int {
5322        u64 u;
5323        struct cgxx_smux_rx_wol_int_s {
5324                u64 wol_rcvd                         : 1;
5325                u64 reserved_1_63                    : 63;
5326        } s;
5327        /* struct cgxx_smux_rx_wol_int_s cn; */
5328};
5329
5330static inline u64 CGXX_SMUX_RX_WOL_INT(u64 a)
5331        __attribute__ ((pure, always_inline));
5332static inline u64 CGXX_SMUX_RX_WOL_INT(u64 a)
5333{
5334        return 0x20078 + 0x40000 * a;
5335}
5336
5337/**
5338 * Register (RSL) cgx#_smu#_rx_wol_int_ena_w1c
5339 *
5340 * CGX SMU RX WOL Interrupt Enable Clear Registers This register clears
5341 * interrupt enable bits.
5342 */
5343union cgxx_smux_rx_wol_int_ena_w1c {
5344        u64 u;
5345        struct cgxx_smux_rx_wol_int_ena_w1c_s {
5346                u64 wol_rcvd                         : 1;
5347                u64 reserved_1_63                    : 63;
5348        } s;
5349        /* struct cgxx_smux_rx_wol_int_ena_w1c_s cn; */
5350};
5351
5352static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1C(u64 a)
5353        __attribute__ ((pure, always_inline));
5354static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1C(u64 a)
5355{
5356        return 0x20088 + 0x40000 * a;
5357}
5358
5359/**
5360 * Register (RSL) cgx#_smu#_rx_wol_int_ena_w1s
5361 *
5362 * CGX SMU RX WOL Interrupt Enable Set Registers This register sets
5363 * interrupt enable bits.
5364 */
5365union cgxx_smux_rx_wol_int_ena_w1s {
5366        u64 u;
5367        struct cgxx_smux_rx_wol_int_ena_w1s_s {
5368                u64 wol_rcvd                         : 1;
5369                u64 reserved_1_63                    : 63;
5370        } s;
5371        /* struct cgxx_smux_rx_wol_int_ena_w1s_s cn; */
5372};
5373
5374static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1S(u64 a)
5375        __attribute__ ((pure, always_inline));
5376static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1S(u64 a)
5377{
5378        return 0x20090 + 0x40000 * a;
5379}
5380
5381/**
5382 * Register (RSL) cgx#_smu#_rx_wol_int_w1s
5383 *
5384 * CGX SMU RX WOL Interrupt Set Registers This register sets interrupt
5385 * bits.
5386 */
5387union cgxx_smux_rx_wol_int_w1s {
5388        u64 u;
5389        struct cgxx_smux_rx_wol_int_w1s_s {
5390                u64 wol_rcvd                         : 1;
5391                u64 reserved_1_63                    : 63;
5392        } s;
5393        /* struct cgxx_smux_rx_wol_int_w1s_s cn; */
5394};
5395
5396static inline u64 CGXX_SMUX_RX_WOL_INT_W1S(u64 a)
5397        __attribute__ ((pure, always_inline));
5398static inline u64 CGXX_SMUX_RX_WOL_INT_W1S(u64 a)
5399{
5400        return 0x20080 + 0x40000 * a;
5401}
5402
5403/**
5404 * Register (RSL) cgx#_smu#_smac
5405 *
5406 * CGX SMU SMAC Registers
5407 */
5408union cgxx_smux_smac {
5409        u64 u;
5410        struct cgxx_smux_smac_s {
5411                u64 smac                             : 48;
5412                u64 reserved_48_63                   : 16;
5413        } s;
5414        /* struct cgxx_smux_smac_s cn; */
5415};
5416
5417static inline u64 CGXX_SMUX_SMAC(u64 a)
5418        __attribute__ ((pure, always_inline));
5419static inline u64 CGXX_SMUX_SMAC(u64 a)
5420{
5421        return 0x20108 + 0x40000 * a;
5422}
5423
5424/**
5425 * Register (RSL) cgx#_smu#_tx_append
5426 *
5427 * CGX SMU TX Append Control Registers For more details on the
5428 * interactions between FCS and PAD, see also the description of
5429 * CGX()_SMU()_TX_MIN_PKT[MIN_SIZE].
5430 */
5431union cgxx_smux_tx_append {
5432        u64 u;
5433        struct cgxx_smux_tx_append_s {
5434                u64 preamble                         : 1;
5435                u64 pad                              : 1;
5436                u64 fcs_d                            : 1;
5437                u64 fcs_c                            : 1;
5438                u64 reserved_4_63                    : 60;
5439        } s;
5440        /* struct cgxx_smux_tx_append_s cn; */
5441};
5442
5443static inline u64 CGXX_SMUX_TX_APPEND(u64 a)
5444        __attribute__ ((pure, always_inline));
5445static inline u64 CGXX_SMUX_TX_APPEND(u64 a)
5446{
5447        return 0x20100 + 0x40000 * a;
5448}
5449
5450/**
5451 * Register (RSL) cgx#_smu#_tx_ctl
5452 *
5453 * CGX SMU Transmit Control Registers
5454 */
5455union cgxx_smux_tx_ctl {
5456        u64 u;
5457        struct cgxx_smux_tx_ctl_s {
5458                u64 dic_en                           : 1;
5459                u64 uni_en                           : 1;
5460                u64 x4a_dis                          : 1;
5461                u64 mia_en                           : 1;
5462                u64 ls                               : 2;
5463                u64 ls_byp                           : 1;
5464                u64 l2p_bp_conv                      : 1;
5465                u64 hg_en                            : 1;
5466                u64 hg_pause_hgi                     : 2;
5467                u64 reserved_11_63                   : 53;
5468        } s;
5469        /* struct cgxx_smux_tx_ctl_s cn; */
5470};
5471
5472static inline u64 CGXX_SMUX_TX_CTL(u64 a)
5473        __attribute__ ((pure, always_inline));
5474static inline u64 CGXX_SMUX_TX_CTL(u64 a)
5475{
5476        return 0x20178 + 0x40000 * a;
5477}
5478
5479/**
5480 * Register (RSL) cgx#_smu#_tx_dack
5481 *
5482 * CGX SMU TX Drop Counters Registers
5483 */
5484union cgxx_smux_tx_dack {
5485        u64 u;
5486        struct cgxx_smux_tx_dack_s {
5487                u64 dpi_sdrop_ack                    : 16;
5488                u64 reserved_16_63                   : 48;
5489        } s;
5490        /* struct cgxx_smux_tx_dack_s cn; */
5491};
5492
5493static inline u64 CGXX_SMUX_TX_DACK(u64 a)
5494        __attribute__ ((pure, always_inline));
5495static inline u64 CGXX_SMUX_TX_DACK(u64 a)
5496{
5497        return 0x201b0 + 0x40000 * a;
5498}
5499
5500/**
5501 * Register (RSL) cgx#_smu#_tx_dcnt
5502 *
5503 * CGX SMU TX Drop Counters Registers
5504 */
5505union cgxx_smux_tx_dcnt {
5506        u64 u;
5507        struct cgxx_smux_tx_dcnt_s {
5508                u64 dpi_sdrop_cnt                    : 16;
5509                u64 reserved_16_63                   : 48;
5510        } s;
5511        /* struct cgxx_smux_tx_dcnt_s cn; */
5512};
5513
5514static inline u64 CGXX_SMUX_TX_DCNT(u64 a)
5515        __attribute__ ((pure, always_inline));
5516static inline u64 CGXX_SMUX_TX_DCNT(u64 a)
5517{
5518        return 0x201a8 + 0x40000 * a;
5519}
5520
5521/**
5522 * Register (RSL) cgx#_smu#_tx_eee
5523 *
5524 * INTERNAL: CGX SMU TX EEE Configure Registers  Resvered. Internal:
5525 * These registers control when SMU TX requests to enter or exist LPI.
5526 * Those registers take effect only when EEE is supported and enabled for
5527 * a given LMAC.
5528 */
5529union cgxx_smux_tx_eee {
5530        u64 u;
5531        struct cgxx_smux_tx_eee_s {
5532                u64 idle_thresh                      : 28;
5533                u64 reserved_28                      : 1;
5534                u64 force_lpi                        : 1;
5535                u64 wakeup                           : 1;
5536                u64 auto_lpi                         : 1;
5537                u64 idle_cnt                         : 28;
5538                u64 reserved_60_61                   : 2;
5539                u64 tx_lpi_wake                      : 1;
5540                u64 tx_lpi                           : 1;
5541        } s;
5542        /* struct cgxx_smux_tx_eee_s cn; */
5543};
5544
5545static inline u64 CGXX_SMUX_TX_EEE(u64 a)
5546        __attribute__ ((pure, always_inline));
5547static inline u64 CGXX_SMUX_TX_EEE(u64 a)
5548{
5549        return 0x20190 + 0x40000 * a;
5550}
5551
5552/**
5553 * Register (RSL) cgx#_smu#_tx_eee_timer_status
5554 *
5555 * INTERNAL: CGX SMU TX EEE TIMER STATUS Registers  Reserved. Internal:
5556 * These registers configure SMU TX EEE timing parameters.
5557 */
5558union cgxx_smux_tx_eee_timer_status {
5559        u64 u;
5560        struct cgxx_smux_tx_eee_timer_status_s {
5561                u64 lpi_wake_cnt                     : 16;
5562                u64 reserved_16_30                   : 15;
5563                u64 wake_timer_done                  : 1;
5564                u64 link_ok_cnt                      : 30;
5565                u64 reserved_62                      : 1;
5566                u64 link_timer_done                  : 1;
5567        } s;
5568        /* struct cgxx_smux_tx_eee_timer_status_s cn; */
5569};
5570
5571static inline u64 CGXX_SMUX_TX_EEE_TIMER_STATUS(u64 a)
5572        __attribute__ ((pure, always_inline));
5573static inline u64 CGXX_SMUX_TX_EEE_TIMER_STATUS(u64 a)
5574{
5575        return 0x201a0 + 0x40000 * a;
5576}
5577
5578/**
5579 * Register (RSL) cgx#_smu#_tx_eee_timing
5580 *
5581 * INTERNAL: CGX SMU TX EEE TIMING Parameter Registers  Reserved.
5582 * Internal: These registers configure SMU TX EEE timing parameters.
5583 */
5584union cgxx_smux_tx_eee_timing {
5585        u64 u;
5586        struct cgxx_smux_tx_eee_timing_s {
5587                u64 w_sys_tx_min                     : 16;
5588                u64 reserved_16_31                   : 16;
5589                u64 link_ok_min                      : 30;
5590                u64 reserved_62_63                   : 2;
5591        } s;
5592        /* struct cgxx_smux_tx_eee_timing_s cn; */
5593};
5594
5595static inline u64 CGXX_SMUX_TX_EEE_TIMING(u64 a)
5596        __attribute__ ((pure, always_inline));
5597static inline u64 CGXX_SMUX_TX_EEE_TIMING(u64 a)
5598{
5599        return 0x20198 + 0x40000 * a;
5600}
5601
5602/**
5603 * Register (RSL) cgx#_smu#_tx_ifg
5604 *
5605 * CGX SMU TX Interframe-Gap Cycles Registers Programming IFG1 and IFG2:
5606 * * For XAUI/RXAUI/10G/25G/40G/50G/100G systems that require IEEE 802.3
5607 * compatibility, the [IFG1]+[IFG2] sum must be 12. * In loopback mode,
5608 * the [IFG1]+[IFG2] of local and remote parties must match exactly;
5609 * otherwise loopback FIFO will overrun: CGX()_SMU()_TX_INT[LB_OVRFLW]. *
5610 * When CGX()_SMU()_TX_CTL[DIC_EN] is set, [IFG1]+[IFG2] sum must be at
5611 * least 8. The behavior of smaller values is un-determined. * When
5612 * CGX()_SMU()_TX_CTL[DIC_EN] is cleared, the minimum value of
5613 * [IFG1]+[IFG2] is 1 for 40G/50G/100G LMAC_TYPE configurations and 5 for
5614 * all other values. The behavior of smaller values is un-determined.
5615 * Internal: When CGX()_SMU()_TX_CTL[DIC_EN] is set, SMU TX treats
5616 * ([IFG1]+[IFG2]) \< 8 as 8 for 40G/50G/100G MACs and ([IFG1]+[IFG2]) \<
5617 * 8 as 8 for other MACs. When CGX()_SMU()_TX_CTL[DIC_EN] is cleared, SMU
5618 * TX can work correctly with any IFG1 and IFG2.
5619 */
5620union cgxx_smux_tx_ifg {
5621        u64 u;
5622        struct cgxx_smux_tx_ifg_s {
5623                u64 ifg1                             : 4;
5624                u64 ifg2                             : 4;
5625                u64 mia_amt                          : 2;
5626                u64 reserved_10_15                   : 6;
5627                u64 mia_cnt                          : 8;
5628                u64 reserved_24_63                   : 40;
5629        } s;
5630        /* struct cgxx_smux_tx_ifg_s cn; */
5631};
5632
5633static inline u64 CGXX_SMUX_TX_IFG(u64 a)
5634        __attribute__ ((pure, always_inline));
5635static inline u64 CGXX_SMUX_TX_IFG(u64 a)
5636{
5637        return 0x20160 + 0x40000 * a;
5638}
5639
5640/**
5641 * Register (RSL) cgx#_smu#_tx_int
5642 *
5643 * CGX SMU TX Interrupt Registers
5644 */
5645union cgxx_smux_tx_int {
5646        u64 u;
5647        struct cgxx_smux_tx_int_s {
5648                u64 undflw                           : 1;
5649                u64 xchange                          : 1;
5650                u64 fake_commit                      : 1;
5651                u64 lb_undflw                        : 1;
5652                u64 lb_ovrflw                        : 1;
5653                u64 dpi_sdrop                        : 1;
5654                u64 reserved_6_63                    : 58;
5655        } s;
5656        /* struct cgxx_smux_tx_int_s cn; */
5657};
5658
5659static inline u64 CGXX_SMUX_TX_INT(u64 a)
5660        __attribute__ ((pure, always_inline));
5661static inline u64 CGXX_SMUX_TX_INT(u64 a)
5662{
5663        return 0x20140 + 0x40000 * a;
5664}
5665
5666/**
5667 * Register (RSL) cgx#_smu#_tx_int_ena_w1c
5668 *
5669 * CGX SMU TX Interrupt Enable Clear Registers This register clears
5670 * interrupt enable bits.
5671 */
5672union cgxx_smux_tx_int_ena_w1c {
5673        u64 u;
5674        struct cgxx_smux_tx_int_ena_w1c_s {
5675                u64 undflw                           : 1;
5676                u64 xchange                          : 1;
5677                u64 fake_commit                      : 1;
5678                u64 lb_undflw                        : 1;
5679                u64 lb_ovrflw                        : 1;
5680                u64 dpi_sdrop                        : 1;
5681                u64 reserved_6_63                    : 58;
5682        } s;
5683        /* struct cgxx_smux_tx_int_ena_w1c_s cn; */
5684};
5685
5686static inline u64 CGXX_SMUX_TX_INT_ENA_W1C(u64 a)
5687        __attribute__ ((pure, always_inline));
5688static inline u64 CGXX_SMUX_TX_INT_ENA_W1C(u64 a)
5689{
5690        return 0x20150 + 0x40000 * a;
5691}
5692
5693/**
5694 * Register (RSL) cgx#_smu#_tx_int_ena_w1s
5695 *
5696 * CGX SMU TX Interrupt Enable Set Registers This register sets interrupt
5697 * enable bits.
5698 */
5699union cgxx_smux_tx_int_ena_w1s {
5700        u64 u;
5701        struct cgxx_smux_tx_int_ena_w1s_s {
5702                u64 undflw                           : 1;
5703                u64 xchange                          : 1;
5704                u64 fake_commit                      : 1;
5705                u64 lb_undflw                        : 1;
5706                u64 lb_ovrflw                        : 1;
5707                u64 dpi_sdrop                        : 1;
5708                u64 reserved_6_63                    : 58;
5709        } s;
5710        /* struct cgxx_smux_tx_int_ena_w1s_s cn; */
5711};
5712
5713static inline u64 CGXX_SMUX_TX_INT_ENA_W1S(u64 a)
5714        __attribute__ ((pure, always_inline));
5715static inline u64 CGXX_SMUX_TX_INT_ENA_W1S(u64 a)
5716{
5717        return 0x20158 + 0x40000 * a;
5718}
5719
5720/**
5721 * Register (RSL) cgx#_smu#_tx_int_w1s
5722 *
5723 * CGX SMU TX Interrupt Set Registers This register sets interrupt bits.
5724 */
5725union cgxx_smux_tx_int_w1s {
5726        u64 u;
5727        struct cgxx_smux_tx_int_w1s_s {
5728                u64 undflw                           : 1;
5729                u64 xchange                          : 1;
5730                u64 fake_commit                      : 1;
5731                u64 lb_undflw                        : 1;
5732                u64 lb_ovrflw                        : 1;
5733                u64 dpi_sdrop                        : 1;
5734                u64 reserved_6_63                    : 58;
5735        } s;
5736        /* struct cgxx_smux_tx_int_w1s_s cn; */
5737};
5738
5739static inline u64 CGXX_SMUX_TX_INT_W1S(u64 a)
5740        __attribute__ ((pure, always_inline));
5741static inline u64 CGXX_SMUX_TX_INT_W1S(u64 a)
5742{
5743        return 0x20148 + 0x40000 * a;
5744}
5745
5746/**
5747 * Register (RSL) cgx#_smu#_tx_min_pkt
5748 *
5749 * CGX SMU TX Minimum-Size-Packet Registers Internal: [MIN_SIZE] less
5750 * than 16 will be ignored by hardware which will use 16 instead.
5751 */
5752union cgxx_smux_tx_min_pkt {
5753        u64 u;
5754        struct cgxx_smux_tx_min_pkt_s {
5755                u64 min_size                         : 8;
5756                u64 reserved_8_63                    : 56;
5757        } s;
5758        /* struct cgxx_smux_tx_min_pkt_s cn; */
5759};
5760
5761static inline u64 CGXX_SMUX_TX_MIN_PKT(u64 a)
5762        __attribute__ ((pure, always_inline));
5763static inline u64 CGXX_SMUX_TX_MIN_PKT(u64 a)
5764{
5765        return 0x20118 + 0x40000 * a;
5766}
5767
5768/**
5769 * Register (RSL) cgx#_smu#_tx_pause_pkt_dmac
5770 *
5771 * CGX SMU TX PAUSE-Packet DMAC-Field Registers This register provides
5772 * the DMAC value that is placed in outbound PAUSE packets.
5773 */
5774union cgxx_smux_tx_pause_pkt_dmac {
5775        u64 u;
5776        struct cgxx_smux_tx_pause_pkt_dmac_s {
5777                u64 dmac                             : 48;
5778                u64 reserved_48_63                   : 16;
5779        } s;
5780        /* struct cgxx_smux_tx_pause_pkt_dmac_s cn; */
5781};
5782
5783static inline u64 CGXX_SMUX_TX_PAUSE_PKT_DMAC(u64 a)
5784        __attribute__ ((pure, always_inline));
5785static inline u64 CGXX_SMUX_TX_PAUSE_PKT_DMAC(u64 a)
5786{
5787        return 0x20168 + 0x40000 * a;
5788}
5789
5790/**
5791 * Register (RSL) cgx#_smu#_tx_pause_pkt_interval
5792 *
5793 * CGX SMU TX PAUSE-Packet Transmission-Interval Registers This register
5794 * specifies how often PAUSE packets are sent.
5795 */
5796union cgxx_smux_tx_pause_pkt_interval {
5797        u64 u;
5798        struct cgxx_smux_tx_pause_pkt_interval_s {
5799                u64 interval                         : 16;
5800                u64 hg2_intra_interval               : 16;
5801                u64 hg2_intra_en                     : 1;
5802                u64 reserved_33_63                   : 31;
5803        } s;
5804        /* struct cgxx_smux_tx_pause_pkt_interval_s cn; */
5805};
5806
5807static inline u64 CGXX_SMUX_TX_PAUSE_PKT_INTERVAL(u64 a)
5808        __attribute__ ((pure, always_inline));
5809static inline u64 CGXX_SMUX_TX_PAUSE_PKT_INTERVAL(u64 a)
5810{
5811        return 0x20120 + 0x40000 * a;
5812}
5813
5814/**
5815 * Register (RSL) cgx#_smu#_tx_pause_pkt_time
5816 *
5817 * CGX SMU TX PAUSE Packet Time Registers
5818 */
5819union cgxx_smux_tx_pause_pkt_time {
5820        u64 u;
5821        struct cgxx_smux_tx_pause_pkt_time_s {
5822                u64 p_time                           : 16;
5823                u64 reserved_16_63                   : 48;
5824        } s;
5825        /* struct cgxx_smux_tx_pause_pkt_time_s cn; */
5826};
5827
5828static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TIME(u64 a)
5829        __attribute__ ((pure, always_inline));
5830static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TIME(u64 a)
5831{
5832        return 0x20110 + 0x40000 * a;
5833}
5834
5835/**
5836 * Register (RSL) cgx#_smu#_tx_pause_pkt_type
5837 *
5838 * CGX SMU TX PAUSE-Packet P_TYPE-Field Registers This register provides
5839 * the P_TYPE field that is placed in outbound PAUSE packets.
5840 */
5841union cgxx_smux_tx_pause_pkt_type {
5842        u64 u;
5843        struct cgxx_smux_tx_pause_pkt_type_s {
5844                u64 p_type                           : 16;
5845                u64 reserved_16_63                   : 48;
5846        } s;
5847        /* struct cgxx_smux_tx_pause_pkt_type_s cn; */
5848};
5849
5850static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TYPE(u64 a)
5851        __attribute__ ((pure, always_inline));
5852static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TYPE(u64 a)
5853{
5854        return 0x20170 + 0x40000 * a;
5855}
5856
5857/**
5858 * Register (RSL) cgx#_smu#_tx_pause_togo
5859 *
5860 * CGX SMU TX Time-to-Backpressure Registers
5861 */
5862union cgxx_smux_tx_pause_togo {
5863        u64 u;
5864        struct cgxx_smux_tx_pause_togo_s {
5865                u64 p_time                           : 16;
5866                u64 msg_time                         : 16;
5867                u64 reserved_32_63                   : 32;
5868        } s;
5869        /* struct cgxx_smux_tx_pause_togo_s cn; */
5870};
5871
5872static inline u64 CGXX_SMUX_TX_PAUSE_TOGO(u64 a)
5873        __attribute__ ((pure, always_inline));
5874static inline u64 CGXX_SMUX_TX_PAUSE_TOGO(u64 a)
5875{
5876        return 0x20130 + 0x40000 * a;
5877}
5878
5879/**
5880 * Register (RSL) cgx#_smu#_tx_pause_zero
5881 *
5882 * CGX SMU TX PAUSE Zero Registers
5883 */
5884union cgxx_smux_tx_pause_zero {
5885        u64 u;
5886        struct cgxx_smux_tx_pause_zero_s {
5887                u64 send                             : 1;
5888                u64 reserved_1_63                    : 63;
5889        } s;
5890        /* struct cgxx_smux_tx_pause_zero_s cn; */
5891};
5892
5893static inline u64 CGXX_SMUX_TX_PAUSE_ZERO(u64 a)
5894        __attribute__ ((pure, always_inline));
5895static inline u64 CGXX_SMUX_TX_PAUSE_ZERO(u64 a)
5896{
5897        return 0x20138 + 0x40000 * a;
5898}
5899
5900/**
5901 * Register (RSL) cgx#_smu#_tx_soft_pause
5902 *
5903 * CGX SMU TX Soft PAUSE Registers
5904 */
5905union cgxx_smux_tx_soft_pause {
5906        u64 u;
5907        struct cgxx_smux_tx_soft_pause_s {
5908                u64 p_time                           : 16;
5909                u64 reserved_16_63                   : 48;
5910        } s;
5911        /* struct cgxx_smux_tx_soft_pause_s cn; */
5912};
5913
5914static inline u64 CGXX_SMUX_TX_SOFT_PAUSE(u64 a)
5915        __attribute__ ((pure, always_inline));
5916static inline u64 CGXX_SMUX_TX_SOFT_PAUSE(u64 a)
5917{
5918        return 0x20128 + 0x40000 * a;
5919}
5920
5921/**
5922 * Register (RSL) cgx#_smu#_tx_thresh
5923 *
5924 * CGX SMU TX Threshold Registers
5925 */
5926union cgxx_smux_tx_thresh {
5927        u64 u;
5928        struct cgxx_smux_tx_thresh_s {
5929                u64 cnt                              : 12;
5930                u64 reserved_12_15                   : 4;
5931                u64 dpi_thresh                       : 5;
5932                u64 reserved_21_23                   : 3;
5933                u64 dpi_depth                        : 5;
5934                u64 reserved_29_31                   : 3;
5935                u64 ecnt                             : 12;
5936                u64 reserved_44_63                   : 20;
5937        } s;
5938        /* struct cgxx_smux_tx_thresh_s cn; */
5939};
5940
5941static inline u64 CGXX_SMUX_TX_THRESH(u64 a)
5942        __attribute__ ((pure, always_inline));
5943static inline u64 CGXX_SMUX_TX_THRESH(u64 a)
5944{
5945        return 0x20180 + 0x40000 * a;
5946}
5947
5948/**
5949 * Register (RSL) cgx#_spu#_an_adv
5950 *
5951 * CGX SPU Autonegotiation Advertisement Registers Software programs this
5952 * register with the contents of the AN-link code word base page to be
5953 * transmitted during autonegotiation. (See IEEE 802.3 section 73.6 for
5954 * details.) Any write operations to this register prior to completion of
5955 * autonegotiation, as indicated by CGX()_SPU()_AN_STATUS[AN_COMPLETE],
5956 * should be followed by a renegotiation in order for the new values to
5957 * take effect. Renegotiation is initiated by setting
5958 * CGX()_SPU()_AN_CONTROL[AN_RESTART]. Once autonegotiation has
5959 * completed, software can examine this register along with
5960 * CGX()_SPU()_AN_LP_BASE to determine the highest common denominator
5961 * technology.
5962 */
5963union cgxx_spux_an_adv {
5964        u64 u;
5965        struct cgxx_spux_an_adv_s {
5966                u64 s                                : 5;
5967                u64 e                                : 5;
5968                u64 pause                            : 1;
5969                u64 asm_dir                          : 1;
5970                u64 xnp_able                         : 1;
5971                u64 rf                               : 1;
5972                u64 ack                              : 1;
5973                u64 np                               : 1;
5974                u64 t                                : 5;
5975                u64 a1g_kx                           : 1;
5976                u64 a10g_kx4                         : 1;
5977                u64 a10g_kr                          : 1;
5978                u64 a40g_kr4                         : 1;
5979                u64 a40g_cr4                         : 1;
5980                u64 a100g_cr10                       : 1;
5981                u64 a100g_kp4                        : 1;
5982                u64 a100g_kr4                        : 1;
5983                u64 a100g_cr4                        : 1;
5984                u64 a25g_krs_crs                     : 1;
5985                u64 a25g_kr_cr                       : 1;
5986                u64 arsv                             : 12;
5987                u64 a25g_rs_fec_req                  : 1;
5988                u64 a25g_br_fec_req                  : 1;
5989                u64 fec_able                         : 1;
5990                u64 fec_req                          : 1;
5991                u64 reserved_48_63                   : 16;
5992        } s;
5993        /* struct cgxx_spux_an_adv_s cn; */
5994};
5995
5996static inline u64 CGXX_SPUX_AN_ADV(u64 a)
5997        __attribute__ ((pure, always_inline));
5998static inline u64 CGXX_SPUX_AN_ADV(u64 a)
5999{
6000        return 0x10198 + 0x40000 * a;
6001}
6002
6003/**
6004 * Register (RSL) cgx#_spu#_an_bp_status
6005 *
6006 * CGX SPU Autonegotiation Backplane Ethernet & BASE-R Copper Status
6007 * Registers The contents of this register are updated during
6008 * autonegotiation and are valid when CGX()_SPU()_AN_STATUS[AN_COMPLETE]
6009 * is set. At that time, one of the port type bits will be set depending
6010 * on the AN priority resolution. The port types are listed in order of
6011 * decreasing priority. If a BASE-R type is negotiated then [FEC] or
6012 * [RS_FEC] will be set to indicate whether/which FEC operation has been
6013 * negotiated and will be clear otherwise.
6014 */
6015union cgxx_spux_an_bp_status {
6016        u64 u;
6017        struct cgxx_spux_an_bp_status_s {
6018                u64 bp_an_able                       : 1;
6019                u64 n1g_kx                           : 1;
6020                u64 n10g_kx4                         : 1;
6021                u64 n10g_kr                          : 1;
6022                u64 n25g_kr1                         : 1;
6023                u64 n25g_cr1                         : 1;
6024                u64 n25g_krs_crs                     : 1;
6025                u64 n25g_kr_cr                       : 1;
6026                u64 n40g_kr4                         : 1;
6027                u64 n40g_cr4                         : 1;
6028                u64 n50g_kr2                         : 1;
6029                u64 n50g_cr2                         : 1;
6030                u64 n100g_cr10                       : 1;
6031                u64 n100g_kp4                        : 1;
6032                u64 n100g_kr4                        : 1;
6033                u64 n100g_cr4                        : 1;
6034                u64 fec                              : 1;
6035                u64 rs_fec                           : 1;
6036                u64 reserved_18_63                   : 46;
6037        } s;
6038        /* struct cgxx_spux_an_bp_status_s cn; */
6039};
6040
6041static inline u64 CGXX_SPUX_AN_BP_STATUS(u64 a)
6042        __attribute__ ((pure, always_inline));
6043static inline u64 CGXX_SPUX_AN_BP_STATUS(u64 a)
6044{
6045        return 0x101b8 + 0x40000 * a;
6046}
6047
6048/**
6049 * Register (RSL) cgx#_spu#_an_control
6050 *
6051 * CGX SPU Autonegotiation Control Registers
6052 */
6053union cgxx_spux_an_control {
6054        u64 u;
6055        struct cgxx_spux_an_control_s {
6056                u64 reserved_0_8                     : 9;
6057                u64 an_restart                       : 1;
6058                u64 reserved_10_11                   : 2;
6059                u64 an_en                            : 1;
6060                u64 xnp_en                           : 1;
6061                u64 reserved_14                      : 1;
6062                u64 an_reset                         : 1;
6063                u64 an_arb_link_chk_en               : 1;
6064                u64 usx_an_arb_link_chk_en           : 1;
6065                u64 reserved_18_63                   : 46;
6066        } s;
6067        /* struct cgxx_spux_an_control_s cn; */
6068};
6069
6070static inline u64 CGXX_SPUX_AN_CONTROL(u64 a)
6071        __attribute__ ((pure, always_inline));
6072static inline u64 CGXX_SPUX_AN_CONTROL(u64 a)
6073{
6074        return 0x10188 + 0x40000 * a;
6075}
6076
6077/**
6078 * Register (RSL) cgx#_spu#_an_lp_base
6079 *
6080 * CGX SPU Autonegotiation Link-Partner Base-Page Ability Registers This
6081 * register captures the contents of the latest AN link code word base
6082 * page received from the link partner during autonegotiation. (See IEEE
6083 * 802.3 section 73.6 for details.) CGX()_SPU()_AN_STATUS[PAGE_RX] is set
6084 * when this register is updated by hardware.
6085 */
6086union cgxx_spux_an_lp_base {
6087        u64 u;
6088        struct cgxx_spux_an_lp_base_s {
6089                u64 s                                : 5;
6090                u64 e                                : 5;
6091                u64 pause                            : 1;
6092                u64 asm_dir                          : 1;
6093                u64 xnp_able                         : 1;
6094                u64 rf                               : 1;
6095                u64 ack                              : 1;
6096                u64 np                               : 1;
6097                u64 t                                : 5;
6098                u64 a1g_kx                           : 1;
6099                u64 a10g_kx4                         : 1;
6100                u64 a10g_kr                          : 1;
6101                u64 a40g_kr4                         : 1;
6102                u64 a40g_cr4                         : 1;
6103                u64 a100g_cr10                       : 1;
6104                u64 a100g_kp4                        : 1;
6105                u64 a100g_kr4                        : 1;
6106                u64 a100g_cr4                        : 1;
6107                u64 a25g_krs_crs                     : 1;
6108                u64 a25g_kr_cr                       : 1;
6109                u64 arsv                             : 12;
6110                u64 a25g_rs_fec_req                  : 1;
6111                u64 a25g_br_fec_req                  : 1;
6112                u64 fec_able                         : 1;
6113                u64 fec_req                          : 1;
6114                u64 reserved_48_63                   : 16;
6115        } s;
6116        /* struct cgxx_spux_an_lp_base_s cn; */
6117};
6118
6119static inline u64 CGXX_SPUX_AN_LP_BASE(u64 a)
6120        __attribute__ ((pure, always_inline));
6121static inline u64 CGXX_SPUX_AN_LP_BASE(u64 a)
6122{
6123        return 0x101a0 + 0x40000 * a;
6124}
6125
6126/**
6127 * Register (RSL) cgx#_spu#_an_lp_xnp
6128 *
6129 * CGX SPU Autonegotiation Link Partner Extended Next Page Ability
6130 * Registers This register captures the contents of the latest next page
6131 * code word received from the link partner during autonegotiation, if
6132 * any. See IEEE 802.3 section 73.7.7 for details.
6133 */
6134union cgxx_spux_an_lp_xnp {
6135        u64 u;
6136        struct cgxx_spux_an_lp_xnp_s {
6137                u64 m_u                              : 11;
6138                u64 toggle                           : 1;
6139                u64 ack2                             : 1;
6140                u64 mp                               : 1;
6141                u64 ack                              : 1;
6142                u64 np                               : 1;
6143                u64 u                                : 32;
6144                u64 reserved_48_63                   : 16;
6145        } s;
6146        /* struct cgxx_spux_an_lp_xnp_s cn; */
6147};
6148
6149static inline u64 CGXX_SPUX_AN_LP_XNP(u64 a)
6150        __attribute__ ((pure, always_inline));
6151static inline u64 CGXX_SPUX_AN_LP_XNP(u64 a)
6152{
6153        return 0x101b0 + 0x40000 * a;
6154}
6155
6156/**
6157 * Register (RSL) cgx#_spu#_an_status
6158 *
6159 * CGX SPU Autonegotiation Status Registers
6160 */
6161union cgxx_spux_an_status {
6162        u64 u;
6163        struct cgxx_spux_an_status_s {
6164                u64 lp_an_able                       : 1;
6165                u64 reserved_1                       : 1;
6166                u64 link_status                      : 1;
6167                u64 an_able                          : 1;
6168                u64 rmt_flt                          : 1;
6169                u64 an_complete                      : 1;
6170                u64 page_rx                          : 1;
6171                u64 xnp_stat                         : 1;
6172                u64 reserved_8                       : 1;
6173                u64 prl_flt                          : 1;
6174                u64 reserved_10_63                   : 54;
6175        } s;
6176        /* struct cgxx_spux_an_status_s cn; */
6177};
6178
6179static inline u64 CGXX_SPUX_AN_STATUS(u64 a)
6180        __attribute__ ((pure, always_inline));
6181static inline u64 CGXX_SPUX_AN_STATUS(u64 a)
6182{
6183        return 0x10190 + 0x40000 * a;
6184}
6185
6186/**
6187 * Register (RSL) cgx#_spu#_an_xnp_tx
6188 *
6189 * CGX SPU Autonegotiation Extended Next Page Transmit Registers Software
6190 * programs this register with the contents of the AN message next page
6191 * or unformatted next page link code word to be transmitted during
6192 * autonegotiation. Next page exchange occurs after the base link code
6193 * words have been exchanged if either end of the link segment sets the
6194 * NP bit to 1, indicating that it has at least one next page to send.
6195 * Once initiated, next page exchange continues until both ends of the
6196 * link segment set their NP bits to 0. See IEEE 802.3 section 73.7.7 for
6197 * details.
6198 */
6199union cgxx_spux_an_xnp_tx {
6200        u64 u;
6201        struct cgxx_spux_an_xnp_tx_s {
6202                u64 m_u                              : 11;
6203                u64 toggle                           : 1;
6204                u64 ack2                             : 1;
6205                u64 mp                               : 1;
6206                u64 ack                              : 1;
6207                u64 np                               : 1;
6208                u64 u                                : 32;
6209                u64 reserved_48_63                   : 16;
6210        } s;
6211        /* struct cgxx_spux_an_xnp_tx_s cn; */
6212};
6213
6214static inline u64 CGXX_SPUX_AN_XNP_TX(u64 a)
6215        __attribute__ ((pure, always_inline));
6216static inline u64 CGXX_SPUX_AN_XNP_TX(u64 a)
6217{
6218        return 0x101a8 + 0x40000 * a;
6219}
6220
6221/**
6222 * Register (RSL) cgx#_spu#_br_algn_status
6223 *
6224 * CGX SPU Multilane BASE-R PCS Alignment-Status Registers This register
6225 * implements the IEEE 802.3 multilane BASE-R PCS alignment status 1-4
6226 * registers (3.50-3.53). It is valid only when the LPCS type is
6227 * 40GBASE-R, 50GBASE-R, 100GBASE-R, (CGX()_CMR()_CONFIG[LMAC_TYPE] =
6228 * CGX_LMAC_TYPES_E::FORTYG_R,FIFTYG_R,HUNDREDG_R), and always returns
6229 * 0x0 for all other LPCS types. Service interfaces (lanes) 19-0 (100G)
6230 * and 3-0 (all others) are mapped to PCS lanes 19-0 or 3-0 via
6231 * CGX()_SPU()_BR_LANE_MAP()[LN_MAPPING]. For 100G, logical lane 0 fans
6232 * out to service interfaces 0-4, logical lane 1 fans out to service
6233 * interfaces 5-9, ... etc. For all other modes, logical lanes and
6234 * service interfaces are identical. Logical interfaces (lanes) map to
6235 * SerDes lanes via CGX()_CMR()_CONFIG[LANE_TO_SDS] (programmable).
6236 */
6237union cgxx_spux_br_algn_status {
6238        u64 u;
6239        struct cgxx_spux_br_algn_status_s {
6240                u64 block_lock                       : 20;
6241                u64 reserved_20_29                   : 10;
6242                u64 alignd                           : 1;
6243                u64 reserved_31_40                   : 10;
6244                u64 marker_lock                      : 20;
6245                u64 reserved_61_63                   : 3;
6246        } s;
6247        /* struct cgxx_spux_br_algn_status_s cn; */
6248};
6249
6250static inline u64 CGXX_SPUX_BR_ALGN_STATUS(u64 a)
6251        __attribute__ ((pure, always_inline));
6252static inline u64 CGXX_SPUX_BR_ALGN_STATUS(u64 a)
6253{
6254        return 0x10050 + 0x40000 * a;
6255}
6256
6257/**
6258 * Register (RSL) cgx#_spu#_br_lane_map#
6259 *
6260 * CGX SPU 40,50,100GBASE-R Lane-Mapping Registers This register
6261 * implements the IEEE 802.3 lane 0-19 mapping registers (3.400-3.403).
6262 * It is valid only when the LPCS type is 40GBASE-R, 50GBASE-R,
6263 * 100GBASE-R, USXGMII (CGX()_CMR()_CONFIG[LMAC_TYPE]), and always
6264 * returns 0x0 for all other LPCS types. The LNx_MAPPING field for each
6265 * programmed PCS lane (called service interface in 802.3) is valid when
6266 * that lane has achieved alignment marker lock on the receive side (i.e.
6267 * the associated CGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), and is
6268 * invalid otherwise. When valid, it returns the actual detected receive
6269 * PCS lane number based on the received alignment marker contents
6270 * received on that service interface.  In RS-FEC mode the LNx_MAPPING
6271 * field is valid when that lane has achieved alignment marker lock on
6272 * the receive side (i.e. the associated
6273 * CGX()_SPU()_RSFEC_STATUS[AMPS_LOCK] = 1), and is invalid otherwise.
6274 * When valid, it returns the actual detected receive FEC lane number
6275 * based on the received alignment marker contents received on that
6276 * logical lane therefore expect for RS-FEC that LNx_MAPPING = x.  The
6277 * mapping is flexible because IEEE 802.3 allows multilane BASE-R receive
6278 * lanes to be re-ordered. Note that for the transmit side, each logical
6279 * lane is mapped to a physical SerDes lane based on the programming of
6280 * CGX()_CMR()_CONFIG[LANE_TO_SDS]. For the receive side,
6281 * CGX()_CMR()_CONFIG[LANE_TO_SDS] specifies the logical lane to physical
6282 * SerDes lane mapping, and this register specifies the service interface
6283 * (or lane) to PCS lane mapping.
6284 */
6285union cgxx_spux_br_lane_mapx {
6286        u64 u;
6287        struct cgxx_spux_br_lane_mapx_s {
6288                u64 ln_mapping                       : 6;
6289                u64 reserved_6_63                    : 58;
6290        } s;
6291        /* struct cgxx_spux_br_lane_mapx_s cn; */
6292};
6293
6294static inline u64 CGXX_SPUX_BR_LANE_MAPX(u64 a, u64 b)
6295        __attribute__ ((pure, always_inline));
6296static inline u64 CGXX_SPUX_BR_LANE_MAPX(u64 a, u64 b)
6297{
6298        return 0x10600 + 0x40000 * a + 8 * b;
6299}
6300
6301/**
6302 * Register (RSL) cgx#_spu#_br_pmd_control
6303 *
6304 * CGX SPU BASE-R PMD Control Registers
6305 */
6306union cgxx_spux_br_pmd_control {
6307        u64 u;
6308        struct cgxx_spux_br_pmd_control_s {
6309                u64 train_restart                    : 1;
6310                u64 train_en                         : 1;
6311                u64 use_lane_poly                    : 1;
6312                u64 max_wait_disable                 : 1;
6313                u64 reserved_4_63                    : 60;
6314        } s;
6315        struct cgxx_spux_br_pmd_control_cn96xx {
6316                u64 train_restart                    : 1;
6317                u64 train_en                         : 1;
6318                u64 use_lane_poly                    : 1;
6319                u64 reserved_3_63                    : 61;
6320        } cn96xx;
6321        /* struct cgxx_spux_br_pmd_control_s cnf95xxp1; */
6322        /* struct cgxx_spux_br_pmd_control_cn96xx cnf95xxp2; */
6323};
6324
6325static inline u64 CGXX_SPUX_BR_PMD_CONTROL(u64 a)
6326        __attribute__ ((pure, always_inline));
6327static inline u64 CGXX_SPUX_BR_PMD_CONTROL(u64 a)
6328{
6329        return 0x100a8 + 0x40000 * a;
6330}
6331
6332/**
6333 * Register (RSL) cgx#_spu#_br_pmd_ld_cup
6334 *
6335 * INTERNAL:CGX SPU BASE-R PMD Local Device Coefficient Update Registers
6336 * This register implements MDIO register 1.154 of 802.3-2012 Section 5
6337 * CL45 for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note
6338 * that for 10G, 25G LN0_ only is used.  It implements  MDIO registers
6339 * 1.1300-1.1303 for all other BASE-R modes (40G, 50G, 100G) per
6340 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
6341 * fields in this register are read/write even though they are specified
6342 * as read-only in 802.3.  The register is automatically cleared at the
6343 * start of training. When link training is in progress, each field
6344 * reflects the contents of the coefficient update field in the
6345 * associated lane's outgoing training frame.  If
6346 * CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register
6347 * must be updated by software during link training and hardware updates
6348 * are disabled. If CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear,
6349 * this register is automatically updated by hardware, and it should not
6350 * be written by software. The lane fields in this register are indexed
6351 * by logical PCS lane ID.
6352 */
6353union cgxx_spux_br_pmd_ld_cup {
6354        u64 u;
6355        struct cgxx_spux_br_pmd_ld_cup_s {
6356                u64 ln0_cup                          : 16;
6357                u64 ln1_cup                          : 16;
6358                u64 ln2_cup                          : 16;
6359                u64 ln3_cup                          : 16;
6360        } s;
6361        /* struct cgxx_spux_br_pmd_ld_cup_s cn; */
6362};
6363
6364static inline u64 CGXX_SPUX_BR_PMD_LD_CUP(u64 a)
6365        __attribute__ ((pure, always_inline));
6366static inline u64 CGXX_SPUX_BR_PMD_LD_CUP(u64 a)
6367{
6368        return 0x100c8 + 0x40000 * a;
6369}
6370
6371/**
6372 * Register (RSL) cgx#_spu#_br_pmd_ld_rep
6373 *
6374 * INTERNAL:CGX SPU BASE-R PMD Local Device Status Report Registers  This
6375 * register implements MDIO register 1.155 of 802.3-2012 Section 5 CL45
6376 * for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note that
6377 * for 10G, 25G LN0_ only is used.  It implements  MDIO registers
6378 * 1.1400-1.1403 for all other BASE-R modes (40G, 50G, 100G) per
6379 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
6380 * fields in this register are read/write even though they are specified
6381 * as read-only in 802.3.  The register is automatically cleared at the
6382 * start of training. Each field reflects the contents of the status
6383 * report field in the associated lane's outgoing training frame.  If
6384 * CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register
6385 * must be updated by software during link training and hardware updates
6386 * are disabled. If CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear,
6387 * this register is automatically updated by hardware, and it should not
6388 * be written by software. The lane fields in this register are indexed
6389 * by logical PCS lane ID.
6390 */
6391union cgxx_spux_br_pmd_ld_rep {
6392        u64 u;
6393        struct cgxx_spux_br_pmd_ld_rep_s {
6394                u64 ln0_rep                          : 16;
6395                u64 ln1_rep                          : 16;
6396                u64 ln2_rep                          : 16;
6397                u64 ln3_rep                          : 16;
6398        } s;
6399        /* struct cgxx_spux_br_pmd_ld_rep_s cn; */
6400};
6401
6402static inline u64 CGXX_SPUX_BR_PMD_LD_REP(u64 a)
6403        __attribute__ ((pure, always_inline));
6404static inline u64 CGXX_SPUX_BR_PMD_LD_REP(u64 a)
6405{
6406        return 0x100d0 + 0x40000 * a;
6407}
6408
6409/**
6410 * Register (RSL) cgx#_spu#_br_pmd_lp_cup
6411 *
6412 * INTERNAL:CGX SPU BASE-R PMD Link Partner Coefficient Update Registers
6413 * This register implements MDIO register 1.152 of 802.3-2012 Section 5
6414 * CL45 for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note
6415 * that for 10G, 25G LN0_ only is used.  It implements  MDIO registers
6416 * 1.1100-1.1103 for all other BASE-R modes (40G, 50G, 100G) per
6417 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
6418 * register is automatically cleared at the start of training. Each field
6419 * reflects the contents of the coefficient update field in the lane's
6420 * most recently received training frame. This register should not be
6421 * written when link training is enabled, i.e. when
6422 * CGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set. The lane fields in this
6423 * register are indexed by logical PCS lane ID.
6424 */
6425union cgxx_spux_br_pmd_lp_cup {
6426        u64 u;
6427        struct cgxx_spux_br_pmd_lp_cup_s {
6428                u64 ln0_cup                          : 16;
6429                u64 ln1_cup                          : 16;
6430                u64 ln2_cup                          : 16;
6431                u64 ln3_cup                          : 16;
6432        } s;
6433        /* struct cgxx_spux_br_pmd_lp_cup_s cn; */
6434};
6435
6436static inline u64 CGXX_SPUX_BR_PMD_LP_CUP(u64 a)
6437        __attribute__ ((pure, always_inline));
6438static inline u64 CGXX_SPUX_BR_PMD_LP_CUP(u64 a)
6439{
6440        return 0x100b8 + 0x40000 * a;
6441}
6442
6443/**
6444 * Register (RSL) cgx#_spu#_br_pmd_lp_rep
6445 *
6446 * INTERNAL:CGX SPU BASE-R PMD Link Partner Status Report Registers  This
6447 * register implements MDIO register 1.153 of 802.3-2012 Section 5 CL45
6448 * for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note that
6449 * for 10G, 25G LN0_ only is used.  It implements  MDIO registers
6450 * 1.1200-1.1203 for all other BASE-R modes (40G, 50G, 100G) per
6451 * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
6452 * register is automatically cleared at the start of training. Each field
6453 * reflects the contents of the coefficient update field in the lane's
6454 * most recently received training frame. This register should not be
6455 * written when link training is enabled, i.e. when
6456 * CGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set. The lane fields in this
6457 * register are indexed by logical PCS lane ID.
6458 */
6459union cgxx_spux_br_pmd_lp_rep {
6460        u64 u;
6461        struct cgxx_spux_br_pmd_lp_rep_s {
6462                u64 ln0_rep                          : 16;
6463                u64 ln1_rep                          : 16;
6464                u64 ln2_rep                          : 16;
6465                u64 ln3_rep                          : 16;
6466        } s;
6467        /* struct cgxx_spux_br_pmd_lp_rep_s cn; */
6468};
6469
6470static inline u64 CGXX_SPUX_BR_PMD_LP_REP(u64 a)
6471        __attribute__ ((pure, always_inline));
6472static inline u64 CGXX_SPUX_BR_PMD_LP_REP(u64 a)
6473{
6474        return 0x100c0 + 0x40000 * a;
6475}
6476
6477/**
6478 * Register (RSL) cgx#_spu#_br_pmd_status
6479 *
6480 * INTERNAL:CGX SPU BASE-R PMD Status Registers  The lane fields in this
6481 * register are indexed by logical PCS lane ID. The lane 0 field (LN0_*)
6482 * is valid for 10GBASE-R, 25GBASE-R, 40GBASE-R, 50GBASE-R and
6483 * 100GBASE-R. The lane 1 field (LN1_*) is valid for 40GBASE-R, 50GBASE-R
6484 * and 100GBASE-R. The remaining fields (LN2_*, LN3_*) are only valid for
6485 * 40GBASE-R and 100GBASE-R.
6486 */
6487union cgxx_spux_br_pmd_status {
6488        u64 u;
6489        struct cgxx_spux_br_pmd_status_s {
6490                u64 ln0_train_status                 : 4;
6491                u64 ln1_train_status                 : 4;
6492                u64 ln2_train_status                 : 4;
6493                u64 ln3_train_status                 : 4;
6494                u64 reserved_16_63                   : 48;
6495        } s;
6496        /* struct cgxx_spux_br_pmd_status_s cn; */
6497};
6498
6499static inline u64 CGXX_SPUX_BR_PMD_STATUS(u64 a)
6500        __attribute__ ((pure, always_inline));
6501static inline u64 CGXX_SPUX_BR_PMD_STATUS(u64 a)
6502{
6503        return 0x100b0 + 0x40000 * a;
6504}
6505
6506/**
6507 * Register (RSL) cgx#_spu#_br_status1
6508 *
6509 * CGX SPU BASE-R Status 1 Registers
6510 */
6511union cgxx_spux_br_status1 {
6512        u64 u;
6513        struct cgxx_spux_br_status1_s {
6514                u64 blk_lock                         : 1;
6515                u64 hi_ber                           : 1;
6516                u64 prbs31                           : 1;
6517                u64 prbs9                            : 1;
6518                u64 reserved_4_11                    : 8;
6519                u64 rcv_lnk                          : 1;
6520                u64 reserved_13_63                   : 51;
6521        } s;
6522        /* struct cgxx_spux_br_status1_s cn; */
6523};
6524
6525static inline u64 CGXX_SPUX_BR_STATUS1(u64 a)
6526        __attribute__ ((pure, always_inline));
6527static inline u64 CGXX_SPUX_BR_STATUS1(u64 a)
6528{
6529        return 0x10030 + 0x40000 * a;
6530}
6531
6532/**
6533 * Register (RSL) cgx#_spu#_br_status2
6534 *
6535 * CGX SPU BASE-R Status 2 Registers This register implements a
6536 * combination of the following IEEE 802.3 registers: * BASE-R PCS status
6537 * 2 (MDIO address 3.33). * BASE-R BER high-order counter (MDIO address
6538 * 3.44). * Errored-blocks high-order counter (MDIO address 3.45).  Note
6539 * that the relative locations of some fields have been moved from IEEE
6540 * 802.3 in order to make the register layout more software friendly: the
6541 * BER counter high-order and low-order bits from sections 3.44 and 3.33
6542 * have been combined into the contiguous, 22-bit [BER_CNT] field;
6543 * likewise, the errored-blocks counter high-order and low-order bits
6544 * from section 3.45 have been combined into the contiguous, 22-bit
6545 * [ERR_BLKS] field.
6546 */
6547union cgxx_spux_br_status2 {
6548        u64 u;
6549        struct cgxx_spux_br_status2_s {
6550                u64 reserved_0_13                    : 14;
6551                u64 latched_ber                      : 1;
6552                u64 latched_lock                     : 1;
6553                u64 ber_cnt                          : 22;
6554                u64 reserved_38_39                   : 2;
6555                u64 err_blks                         : 22;
6556                u64 reserved_62_63                   : 2;
6557        } s;
6558        /* struct cgxx_spux_br_status2_s cn; */
6559};
6560
6561static inline u64 CGXX_SPUX_BR_STATUS2(u64 a)
6562        __attribute__ ((pure, always_inline));
6563static inline u64 CGXX_SPUX_BR_STATUS2(u64 a)
6564{
6565        return 0x10038 + 0x40000 * a;
6566}
6567
6568/**
6569 * Register (RSL) cgx#_spu#_br_tp_control
6570 *
6571 * CGX SPU BASE-R Test-Pattern Control Registers Refer to the test
6572 * pattern methodology described in 802.3 sections 49.2.8 and 82.2.10.
6573 */
6574union cgxx_spux_br_tp_control {
6575        u64 u;
6576        struct cgxx_spux_br_tp_control_s {
6577                u64 dp_sel                           : 1;
6578                u64 tp_sel                           : 1;
6579                u64 rx_tp_en                         : 1;
6580                u64 tx_tp_en                         : 1;
6581                u64 prbs31_tx                        : 1;
6582                u64 prbs31_rx                        : 1;
6583                u64 prbs9_tx                         : 1;
6584                u64 scramble_tp                      : 2;
6585                u64 pr_tp_data_type                  : 1;
6586                u64 reserved_10_63                   : 54;
6587        } s;
6588        /* struct cgxx_spux_br_tp_control_s cn; */
6589};
6590
6591static inline u64 CGXX_SPUX_BR_TP_CONTROL(u64 a)
6592        __attribute__ ((pure, always_inline));
6593static inline u64 CGXX_SPUX_BR_TP_CONTROL(u64 a)
6594{
6595        return 0x10040 + 0x40000 * a;
6596}
6597
6598/**
6599 * Register (RSL) cgx#_spu#_br_tp_err_cnt
6600 *
6601 * CGX SPU BASE-R Test-Pattern Error-Count Registers This register
6602 * provides the BASE-R PCS test-pattern error counter.
6603 */
6604union cgxx_spux_br_tp_err_cnt {
6605        u64 u;
6606        struct cgxx_spux_br_tp_err_cnt_s {
6607                u64 err_cnt                          : 16;
6608                u64 reserved_16_63                   : 48;
6609        } s;
6610        /* struct cgxx_spux_br_tp_err_cnt_s cn; */
6611};
6612
6613static inline u64 CGXX_SPUX_BR_TP_ERR_CNT(u64 a)
6614        __attribute__ ((pure, always_inline));
6615static inline u64 CGXX_SPUX_BR_TP_ERR_CNT(u64 a)
6616{
6617        return 0x10048 + 0x40000 * a;
6618}
6619
6620/**
6621 * Register (RSL) cgx#_spu#_br_tp_seed_a
6622 *
6623 * CGX SPU BASE-R Test-Pattern Seed A Registers Refer to the test pattern
6624 * methodology described in 802.3 sections 49.2.8 and 82.2.10.
6625 */
6626union cgxx_spux_br_tp_seed_a {
6627        u64 u;
6628        struct cgxx_spux_br_tp_seed_a_s {
6629                u64 tp_seed_a                        : 58;
6630                u64 reserved_58_63                   : 6;
6631        } s;
6632        /* struct cgxx_spux_br_tp_seed_a_s cn; */
6633};
6634
6635static inline u64 CGXX_SPUX_BR_TP_SEED_A(u64 a)
6636        __attribute__ ((pure, always_inline));
6637static inline u64 CGXX_SPUX_BR_TP_SEED_A(u64 a)
6638{
6639        return 0x10060 + 0x40000 * a;
6640}
6641
6642/**
6643 * Register (RSL) cgx#_spu#_br_tp_seed_b
6644 *
6645 * CGX SPU BASE-R Test-Pattern Seed B Registers Refer to the test pattern
6646 * methodology described in 802.3 sections 49.2.8 and 82.2.10.
6647 */
6648union cgxx_spux_br_tp_seed_b {
6649        u64 u;
6650        struct cgxx_spux_br_tp_seed_b_s {
6651                u64 tp_seed_b                        : 58;
6652                u64 reserved_58_63                   : 6;
6653        } s;
6654        /* struct cgxx_spux_br_tp_seed_b_s cn; */
6655};
6656
6657static inline u64 CGXX_SPUX_BR_TP_SEED_B(u64 a)
6658        __attribute__ ((pure, always_inline));
6659static inline u64 CGXX_SPUX_BR_TP_SEED_B(u64 a)
6660{
6661        return 0x10068 + 0x40000 * a;
6662}
6663
6664/**
6665 * Register (RSL) cgx#_spu#_bx_status
6666 *
6667 * CGX SPU BASE-X Status Registers
6668 */
6669union cgxx_spux_bx_status {
6670        u64 u;
6671        struct cgxx_spux_bx_status_s {
6672                u64 lsync                            : 4;
6673                u64 reserved_4_10                    : 7;
6674                u64 pattst                           : 1;
6675                u64 alignd                           : 1;
6676                u64 reserved_13_63                   : 51;
6677        } s;
6678        /* struct cgxx_spux_bx_status_s cn; */
6679};
6680
6681static inline u64 CGXX_SPUX_BX_STATUS(u64 a)
6682        __attribute__ ((pure, always_inline));
6683static inline u64 CGXX_SPUX_BX_STATUS(u64 a)
6684{
6685        return 0x10028 + 0x40000 * a;
6686}
6687
6688/**
6689 * Register (RSL) cgx#_spu#_control1
6690 *
6691 * CGX SPU Control 1 Registers
6692 */
6693union cgxx_spux_control1 {
6694        u64 u;
6695        struct cgxx_spux_control1_s {
6696                u64 reserved_0_1                     : 2;
6697                u64 spd                              : 4;
6698                u64 spdsel0                          : 1;
6699                u64 reserved_7_10                    : 4;
6700                u64 lo_pwr                           : 1;
6701                u64 reserved_12                      : 1;
6702                u64 spdsel1                          : 1;
6703                u64 loopbck                          : 1;
6704                u64 reset                            : 1;
6705                u64 usxgmii_type                     : 3;
6706                u64 usxgmii_rate                     : 3;
6707                u64 disable_am                       : 1;
6708                u64 reserved_23_63                   : 41;
6709        } s;
6710        struct cgxx_spux_control1_cn96xxp1 {
6711                u64 reserved_0_1                     : 2;
6712                u64 spd                              : 4;
6713                u64 spdsel0                          : 1;
6714                u64 reserved_7_10                    : 4;
6715                u64 lo_pwr                           : 1;
6716                u64 reserved_12                      : 1;
6717                u64 spdsel1                          : 1;
6718                u64 loopbck                          : 1;
6719                u64 reset                            : 1;
6720                u64 usxgmii_type                     : 3;
6721                u64 usxgmii_rate                     : 3;
6722                u64 reserved_22_63                   : 42;
6723        } cn96xxp1;
6724        /* struct cgxx_spux_control1_s cn96xxp3; */
6725        /* struct cgxx_spux_control1_cn96xxp1 cnf95xxp1; */
6726        struct cgxx_spux_control1_cnf95xxp2 {
6727                u64 reserved_0_1                     : 2;
6728                u64 spd                              : 4;
6729                u64 spdsel0                          : 1;
6730                u64 reserved_7_10                    : 4;
6731                u64 lo_pwr                           : 1;
6732                u64 reserved_12                      : 1;
6733                u64 spdsel1                          : 1;
6734                u64 loopbck                          : 1;
6735                u64 reset                            : 1;
6736                u64 usxgmii_type                     : 3;
6737                u64 usxgmii_rate                     : 3;
6738                u64 reserved_22                      : 1;
6739                u64 reserved_23_63                   : 41;
6740        } cnf95xxp2;
6741};
6742
6743static inline u64 CGXX_SPUX_CONTROL1(u64 a)
6744        __attribute__ ((pure, always_inline));
6745static inline u64 CGXX_SPUX_CONTROL1(u64 a)
6746{
6747        return 0x10000 + 0x40000 * a;
6748}
6749
6750/**
6751 * Register (RSL) cgx#_spu#_control2
6752 *
6753 * CGX SPU Control 2 Registers
6754 */
6755union cgxx_spux_control2 {
6756        u64 u;
6757        struct cgxx_spux_control2_s {
6758                u64 pcs_type                         : 4;
6759                u64 reserved_4_63                    : 60;
6760        } s;
6761        /* struct cgxx_spux_control2_s cn; */
6762};
6763
6764static inline u64 CGXX_SPUX_CONTROL2(u64 a)
6765        __attribute__ ((pure, always_inline));
6766static inline u64 CGXX_SPUX_CONTROL2(u64 a)
6767{
6768        return 0x10018 + 0x40000 * a;
6769}
6770
6771/**
6772 * Register (RSL) cgx#_spu#_fec_abil
6773 *
6774 * CGX SPU Forward Error Correction Ability Registers
6775 */
6776union cgxx_spux_fec_abil {
6777        u64 u;
6778        struct cgxx_spux_fec_abil_s {
6779                u64 fec_abil                         : 1;
6780                u64 err_abil                         : 1;
6781                u64 reserved_2_63                    : 62;
6782        } s;
6783        /* struct cgxx_spux_fec_abil_s cn; */
6784};
6785
6786static inline u64 CGXX_SPUX_FEC_ABIL(u64 a)
6787        __attribute__ ((pure, always_inline));
6788static inline u64 CGXX_SPUX_FEC_ABIL(u64 a)
6789{
6790        return 0x100d8 + 0x40000 * a;
6791}
6792
6793/**
6794 * Register (RSL) cgx#_spu#_fec_control
6795 *
6796 * CGX SPU Forward Error Correction Control Registers
6797 */
6798union cgxx_spux_fec_control {
6799        u64 u;
6800        struct cgxx_spux_fec_control_s {
6801                u64 fec_en                           : 2;
6802                u64 err_en                           : 1;
6803                u64 fec_byp_ind_en                   : 1;
6804                u64 fec_byp_cor_en                   : 1;
6805                u64 reserved_5_63                    : 59;
6806        } s;
6807        /* struct cgxx_spux_fec_control_s cn; */
6808};
6809
6810static inline u64 CGXX_SPUX_FEC_CONTROL(u64 a)
6811        __attribute__ ((pure, always_inline));
6812static inline u64 CGXX_SPUX_FEC_CONTROL(u64 a)
6813{
6814        return 0x100e0 + 0x40000 * a;
6815}
6816
6817/**
6818 * Register (RSL) cgx#_spu#_fec_ln#_rsfec_err
6819 *
6820 * CGX SPU Reed-Solomon FEC Symbol Error Counter for FEC Lanes 0-3
6821 * Registers This register is valid only when Reed-Solomon FEC is
6822 * enabled. The symbol error counters are defined in 802.3 section
6823 * 91.6.11 (for 100G and extended to 50G) and 802.3by-2016 section
6824 * 108.6.9 (for 25G and extended to USXGMII). The counter is reset to all
6825 * zeros when the register is read, and held at all ones in case of
6826 * overflow.  The reset operation takes precedence over the increment
6827 * operation; if the register is read on the same clock cycle as an
6828 * increment operation, the counter is reset to all zeros and the
6829 * increment operation is lost. The counters are writable for test
6830 * purposes, rather than read-only as specified in IEEE 802.3.
6831 */
6832union cgxx_spux_fec_lnx_rsfec_err {
6833        u64 u;
6834        struct cgxx_spux_fec_lnx_rsfec_err_s {
6835                u64 symb_err_cnt                     : 32;
6836                u64 reserved_32_63                   : 32;
6837        } s;
6838        /* struct cgxx_spux_fec_lnx_rsfec_err_s cn; */
6839};
6840
6841static inline u64 CGXX_SPUX_FEC_LNX_RSFEC_ERR(u64 a, u64 b)
6842        __attribute__ ((pure, always_inline));
6843static inline u64 CGXX_SPUX_FEC_LNX_RSFEC_ERR(u64 a, u64 b)
6844{
6845        return 0x10900 + 0x40000 * a + 8 * b;
6846}
6847
6848/**
6849 * Register (RSL) cgx#_spu#_int
6850 *
6851 * CGX SPU Interrupt Registers
6852 */
6853union cgxx_spux_int {
6854        u64 u;
6855        struct cgxx_spux_int_s {
6856                u64 rx_link_up                       : 1;
6857                u64 rx_link_down                     : 1;
6858                u64 err_blk                          : 1;
6859                u64 bitlckls                         : 1;
6860                u64 synlos                           : 1;
6861                u64 algnlos                          : 1;
6862                u64 dbg_sync                         : 1;
6863                u64 bip_err                          : 1;
6864                u64 fec_corr                         : 1;
6865                u64 fec_uncorr                       : 1;
6866                u64 an_page_rx                       : 1;
6867                u64 an_link_good                     : 1;
6868                u64 an_complete                      : 1;
6869                u64 training_done                    : 1;
6870                u64 training_failure                 : 1;
6871                u64 fec_align_status                 : 1;
6872                u64 rsfec_corr                       : 1;
6873                u64 rsfec_uncorr                     : 1;
6874                u64 hi_ser                           : 1;
6875                u64 usx_an_lnk_st                    : 1;
6876                u64 usx_an_cpt                       : 1;
6877                u64 reserved_21_63                   : 43;
6878        } s;
6879        /* struct cgxx_spux_int_s cn; */
6880};
6881
6882static inline u64 CGXX_SPUX_INT(u64 a)
6883        __attribute__ ((pure, always_inline));
6884static inline u64 CGXX_SPUX_INT(u64 a)
6885{
6886        return 0x10220 + 0x40000 * a;
6887}
6888
6889/**
6890 * Register (RSL) cgx#_spu#_int_ena_w1c
6891 *
6892 * CGX SPU Interrupt Enable Clear Registers This register clears
6893 * interrupt enable bits.
6894 */
6895union cgxx_spux_int_ena_w1c {
6896        u64 u;
6897        struct cgxx_spux_int_ena_w1c_s {
6898                u64 rx_link_up                       : 1;
6899                u64 rx_link_down                     : 1;
6900                u64 err_blk                          : 1;
6901                u64 bitlckls                         : 1;
6902                u64 synlos                           : 1;
6903                u64 algnlos                          : 1;
6904                u64 dbg_sync                         : 1;
6905                u64 bip_err                          : 1;
6906                u64 fec_corr                         : 1;
6907                u64 fec_uncorr                       : 1;
6908                u64 an_page_rx                       : 1;
6909                u64 an_link_good                     : 1;
6910                u64 an_complete                      : 1;
6911                u64 training_done                    : 1;
6912                u64 training_failure                 : 1;
6913                u64 fec_align_status                 : 1;
6914                u64 rsfec_corr                       : 1;
6915                u64 rsfec_uncorr                     : 1;
6916                u64 hi_ser                           : 1;
6917                u64 usx_an_lnk_st                    : 1;
6918                u64 usx_an_cpt                       : 1;
6919                u64 reserved_21_63                   : 43;
6920        } s;
6921        /* struct cgxx_spux_int_ena_w1c_s cn; */
6922};
6923
6924static inline u64 CGXX_SPUX_INT_ENA_W1C(u64 a)
6925        __attribute__ ((pure, always_inline));
6926static inline u64 CGXX_SPUX_INT_ENA_W1C(u64 a)
6927{
6928        return 0x10230 + 0x40000 * a;
6929}
6930
6931/**
6932 * Register (RSL) cgx#_spu#_int_ena_w1s
6933 *
6934 * CGX SPU Interrupt Enable Set Registers This register sets interrupt
6935 * enable bits.
6936 */
6937union cgxx_spux_int_ena_w1s {
6938        u64 u;
6939        struct cgxx_spux_int_ena_w1s_s {
6940                u64 rx_link_up                       : 1;
6941                u64 rx_link_down                     : 1;
6942                u64 err_blk                          : 1;
6943                u64 bitlckls                         : 1;
6944                u64 synlos                           : 1;
6945                u64 algnlos                          : 1;
6946                u64 dbg_sync                         : 1;
6947                u64 bip_err                          : 1;
6948                u64 fec_corr                         : 1;
6949                u64 fec_uncorr                       : 1;
6950                u64 an_page_rx                       : 1;
6951                u64 an_link_good                     : 1;
6952                u64 an_complete                      : 1;
6953                u64 training_done                    : 1;
6954                u64 training_failure                 : 1;
6955                u64 fec_align_status                 : 1;
6956                u64 rsfec_corr                       : 1;
6957                u64 rsfec_uncorr                     : 1;
6958                u64 hi_ser                           : 1;
6959                u64 usx_an_lnk_st                    : 1;
6960                u64 usx_an_cpt                       : 1;
6961                u64 reserved_21_63                   : 43;
6962        } s;
6963        /* struct cgxx_spux_int_ena_w1s_s cn; */
6964};
6965
6966static inline u64 CGXX_SPUX_INT_ENA_W1S(u64 a)
6967        __attribute__ ((pure, always_inline));
6968static inline u64 CGXX_SPUX_INT_ENA_W1S(u64 a)
6969{
6970        return 0x10238 + 0x40000 * a;
6971}
6972
6973/**
6974 * Register (RSL) cgx#_spu#_int_w1s
6975 *
6976 * CGX SPU Interrupt Set Registers This register sets interrupt bits.
6977 */
6978union cgxx_spux_int_w1s {
6979        u64 u;
6980        struct cgxx_spux_int_w1s_s {
6981                u64 rx_link_up                       : 1;
6982                u64 rx_link_down                     : 1;
6983                u64 err_blk                          : 1;
6984                u64 bitlckls                         : 1;
6985                u64 synlos                           : 1;
6986                u64 algnlos                          : 1;
6987                u64 dbg_sync                         : 1;
6988                u64 bip_err                          : 1;
6989                u64 fec_corr                         : 1;
6990                u64 fec_uncorr                       : 1;
6991                u64 an_page_rx                       : 1;
6992                u64 an_link_good                     : 1;
6993                u64 an_complete                      : 1;
6994                u64 training_done                    : 1;
6995                u64 training_failure                 : 1;
6996                u64 fec_align_status                 : 1;
6997                u64 rsfec_corr                       : 1;
6998                u64 rsfec_uncorr                     : 1;
6999                u64 hi_ser                           : 1;
7000                u64 usx_an_lnk_st                    : 1;
7001                u64 usx_an_cpt                       : 1;
7002                u64 reserved_21_63                   : 43;
7003        } s;
7004        /* struct cgxx_spux_int_w1s_s cn; */
7005};
7006
7007static inline u64 CGXX_SPUX_INT_W1S(u64 a)
7008        __attribute__ ((pure, always_inline));
7009static inline u64 CGXX_SPUX_INT_W1S(u64 a)
7010{
7011        return 0x10228 + 0x40000 * a;
7012}
7013
7014/**
7015 * Register (RSL) cgx#_spu#_ln#_br_bip_err_cnt
7016 *
7017 * CGX SPU 40,50,100GBASE-R BIP Error-Counter Registers This register
7018 * implements the IEEE 802.3 BIP error-counter registers for PCS lanes
7019 * 0-19 (3.200-3.203). It is valid only when the LPCS type is 40GBASE-R,
7020 * 50GBASE-R, 100GBASE-R, (CGX()_CMR()_CONFIG[LMAC_TYPE]), and always
7021 * returns 0x0 for all other LPCS types. The counters are indexed by the
7022 * RX PCS lane number based on the alignment marker detected on each lane
7023 * and captured in CGX()_SPU()_BR_LANE_MAP(). Each counter counts the BIP
7024 * errors for its PCS lane, and is held at all ones in case of overflow.
7025 * The counters are reset to all zeros when this register is read by
7026 * software.  The reset operation takes precedence over the increment
7027 * operation; if the register is read on the same clock cycle as an
7028 * increment operation, the counter is reset to all zeros and the
7029 * increment operation is lost. The counters are writable for test
7030 * purposes, rather than read-only as specified in IEEE 802.3.
7031 */
7032union cgxx_spux_lnx_br_bip_err_cnt {
7033        u64 u;
7034        struct cgxx_spux_lnx_br_bip_err_cnt_s {
7035                u64 bip_err_cnt                      : 16;
7036                u64 reserved_16_63                   : 48;
7037        } s;
7038        /* struct cgxx_spux_lnx_br_bip_err_cnt_s cn; */
7039};
7040
7041static inline u64 CGXX_SPUX_LNX_BR_BIP_ERR_CNT(u64 a, u64 b)
7042        __attribute__ ((pure, always_inline));
7043static inline u64 CGXX_SPUX_LNX_BR_BIP_ERR_CNT(u64 a, u64 b)
7044{
7045        return 0x10500 + 0x40000 * a + 8 * b;
7046}
7047
7048/**
7049 * Register (RSL) cgx#_spu#_ln#_fec_corr_blks
7050 *
7051 * CGX SPU FEC Corrected-Blocks Counters 0-19 Registers This register is
7052 * valid only when the LPCS type is BASE-R
7053 * (CGX()_CMR()_CONFIG[LMAC_TYPE]) and applies to BASE-R FEC and Reed-
7054 * Solomon FEC (RS-FEC). When BASE-R FEC is enabled, the FEC corrected-
7055 * block counters are defined in IEEE 802.3 section 74.8.4.1. Each
7056 * corrected-blocks counter increments by one for a corrected FEC block,
7057 * i.e. an FEC block that has been received with invalid parity on the
7058 * associated PCS lane and has been corrected by the FEC decoder. The
7059 * counter is reset to all zeros when the register is read, and held at
7060 * all ones in case of overflow.  The reset operation takes precedence
7061 * over the increment operation; if the register is read on the same
7062 * clock cycle as an increment operation, the counter is reset to all
7063 * zeros and the increment operation is lost. The counters are writable
7064 * for test purposes, rather than read-only as specified in IEEE 802.3.
7065 */
7066union cgxx_spux_lnx_fec_corr_blks {
7067        u64 u;
7068        struct cgxx_spux_lnx_fec_corr_blks_s {
7069                u64 ln_corr_blks                     : 32;
7070                u64 reserved_32_63                   : 32;
7071        } s;
7072        /* struct cgxx_spux_lnx_fec_corr_blks_s cn; */
7073};
7074
7075static inline u64 CGXX_SPUX_LNX_FEC_CORR_BLKS(u64 a, u64 b)
7076        __attribute__ ((pure, always_inline));
7077static inline u64 CGXX_SPUX_LNX_FEC_CORR_BLKS(u64 a, u64 b)
7078{
7079        return 0x10700 + 0x40000 * a + 8 * b;
7080}
7081
7082/**
7083 * Register (RSL) cgx#_spu#_ln#_fec_uncorr_blks
7084 *
7085 * CGX SPU FEC Uncorrected-Blocks Counters 0-19 Registers This register
7086 * is valid only when the LPCS type is BASE-R
7087 * (CGX()_CMR()_CONFIG[LMAC_TYPE]) and applies to BASE-R FEC and Reed-
7088 * Solomon FEC (RS-FEC). When BASE-R FEC is enabled, the FEC corrected-
7089 * block counters are defined in IEEE 802.3 section 74.8.4.2. Each
7090 * uncorrected-blocks counter increments by one for an uncorrected FEC
7091 * block, i.e. an FEC block that has been received with invalid parity on
7092 * the associated PCS lane and has not been corrected by the FEC decoder.
7093 * The counter is reset to all zeros when the register is read, and held
7094 * at all ones in case of overflow.  The reset operation takes precedence
7095 * over the increment operation; if the register is read on the same
7096 * clock cycle as an increment operation, the counter is reset to all
7097 * zeros and the increment operation is lost. The counters are writable
7098 * for test purposes, rather than read-only as specified in IEEE 802.3.
7099 */
7100union cgxx_spux_lnx_fec_uncorr_blks {
7101        u64 u;
7102        struct cgxx_spux_lnx_fec_uncorr_blks_s {
7103                u64 ln_uncorr_blks                   : 32;
7104                u64 reserved_32_63                   : 32;
7105        } s;
7106        /* struct cgxx_spux_lnx_fec_uncorr_blks_s cn; */
7107};
7108
7109static inline u64 CGXX_SPUX_LNX_FEC_UNCORR_BLKS(u64 a, u64 b)
7110        __attribute__ ((pure, always_inline));
7111static inline u64 CGXX_SPUX_LNX_FEC_UNCORR_BLKS(u64 a, u64 b)
7112{
7113        return 0x10800 + 0x40000 * a + 8 * b;
7114}
7115
7116/**
7117 * Register (RSL) cgx#_spu#_lpcs_states
7118 *
7119 * CGX SPU BASE-X Transmit/Receive States Registers
7120 */
7121union cgxx_spux_lpcs_states {
7122        u64 u;
7123        struct cgxx_spux_lpcs_states_s {
7124                u64 deskew_sm                        : 3;
7125                u64 reserved_3                       : 1;
7126                u64 deskew_am_found                  : 20;
7127                u64 bx_rx_sm                         : 2;
7128                u64 reserved_26_27                   : 2;
7129                u64 br_rx_sm                         : 3;
7130                u64 reserved_31_63                   : 33;
7131        } s;
7132        /* struct cgxx_spux_lpcs_states_s cn; */
7133};
7134
7135static inline u64 CGXX_SPUX_LPCS_STATES(u64 a)
7136        __attribute__ ((pure, always_inline));
7137static inline u64 CGXX_SPUX_LPCS_STATES(u64 a)
7138{
7139        return 0x10208 + 0x40000 * a;
7140}
7141
7142/**
7143 * Register (RSL) cgx#_spu#_misc_control
7144 *
7145 * CGX SPU Miscellaneous Control Registers "* RX logical PCS lane
7146 * polarity vector \<3:0\> = [XOR_RXPLRT]\<3:0\> ^ {4{[RXPLRT]}}. * TX
7147 * logical PCS lane polarity vector \<3:0\> = [XOR_TXPLRT]\<3:0\> ^
7148 * {4{[TXPLRT]}}.  In short, keep [RXPLRT] and [TXPLRT] cleared, and use
7149 * [XOR_RXPLRT] and [XOR_TXPLRT] fields to define the polarity per
7150 * logical PCS lane. Only bit 0 of vector is used for 10GBASE-R, and only
7151 * bits 1:0 of vector are used for RXAUI."
7152 */
7153union cgxx_spux_misc_control {
7154        u64 u;
7155        struct cgxx_spux_misc_control_s {
7156                u64 txplrt                           : 1;
7157                u64 rxplrt                           : 1;
7158                u64 xor_txplrt                       : 4;
7159                u64 xor_rxplrt                       : 4;
7160                u64 intlv_rdisp                      : 1;
7161                u64 skip_after_term                  : 1;
7162                u64 rx_packet_dis                    : 1;
7163                u64 rx_edet_signal_ok                : 1;
7164                u64 reserved_14_63                   : 50;
7165        } s;
7166        /* struct cgxx_spux_misc_control_s cn; */
7167};
7168
7169static inline u64 CGXX_SPUX_MISC_CONTROL(u64 a)
7170        __attribute__ ((pure, always_inline));
7171static inline u64 CGXX_SPUX_MISC_CONTROL(u64 a)
7172{
7173        return 0x10218 + 0x40000 * a;
7174}
7175
7176/**
7177 * Register (RSL) cgx#_spu#_rsfec_corr
7178 *
7179 * CGX SPU Reed-Solomon FEC Corrected Codeword Counter Register This
7180 * register implements the IEEE 802.3 RS-FEC corrected codewords counter
7181 * described in 802.3 section 91.6.8 (for 100G and extended to 50G) and
7182 * 802.3by-2016 section 108.6.7 (for 25G and extended to USXGMII).
7183 */
7184union cgxx_spux_rsfec_corr {
7185        u64 u;
7186        struct cgxx_spux_rsfec_corr_s {
7187                u64 cw_cnt                           : 32;
7188                u64 reserved_32_63                   : 32;
7189        } s;
7190        /* struct cgxx_spux_rsfec_corr_s cn; */
7191};
7192
7193static inline u64 CGXX_SPUX_RSFEC_CORR(u64 a)
7194        __attribute__ ((pure, always_inline));
7195static inline u64 CGXX_SPUX_RSFEC_CORR(u64 a)
7196{
7197        return 0x10088 + 0x40000 * a;
7198}
7199
7200/**
7201 * Register (RSL) cgx#_spu#_rsfec_status
7202 *
7203 * CGX SPU Reed-Solomon FEC Status Registers This register implements the
7204 * IEEE 802.3 RS-FEC status and lane mapping registers as described in
7205 * 802.3 section 91.6 (for 100G and extended to 50G) and 802.3by-2016
7206 * section 108-6 (for 25G and extended to USXGMII).
7207 */
7208union cgxx_spux_rsfec_status {
7209        u64 u;
7210        struct cgxx_spux_rsfec_status_s {
7211                u64 fec_lane_mapping                 : 8;
7212                u64 fec_align_status                 : 1;
7213                u64 amps_lock                        : 4;
7214                u64 hi_ser                           : 1;
7215                u64 fec_byp_ind_abil                 : 1;
7216                u64 fec_byp_cor_abil                 : 1;
7217                u64 reserved_16_63                   : 48;
7218        } s;
7219        /* struct cgxx_spux_rsfec_status_s cn; */
7220};
7221
7222static inline u64 CGXX_SPUX_RSFEC_STATUS(u64 a)
7223        __attribute__ ((pure, always_inline));
7224static inline u64 CGXX_SPUX_RSFEC_STATUS(u64 a)
7225{
7226        return 0x10080 + 0x40000 * a;
7227}
7228
7229/**
7230 * Register (RSL) cgx#_spu#_rsfec_uncorr
7231 *
7232 * CGX SPU Reed-Solomon FEC Uncorrected Codeword Counter Register This
7233 * register implements the IEEE 802.3 RS-FEC uncorrected codewords
7234 * counter described in 802.3 section 91.6.9 (for 100G and extended to
7235 * 50G) and 802.3by-2016 section 108.6.8 (for 25G and extended to
7236 * USXGMII).
7237 */
7238union cgxx_spux_rsfec_uncorr {
7239        u64 u;
7240        struct cgxx_spux_rsfec_uncorr_s {
7241                u64 cw_cnt                           : 32;
7242                u64 reserved_32_63                   : 32;
7243        } s;
7244        /* struct cgxx_spux_rsfec_uncorr_s cn; */
7245};
7246
7247static inline u64 CGXX_SPUX_RSFEC_UNCORR(u64 a)
7248        __attribute__ ((pure, always_inline));
7249static inline u64 CGXX_SPUX_RSFEC_UNCORR(u64 a)
7250{
7251        return 0x10090 + 0x40000 * a;
7252}
7253
7254/**
7255 * Register (RSL) cgx#_spu#_rx_eee_wake
7256 *
7257 * INTERNAL: CGX SPU  RX EEE Wake Error Counter  Registers  Reserved.
7258 * Internal: A counter that is incremented each time that the LPI receive
7259 * state diagram enters the RX_WTF state indicating that a wake time
7260 * fault has been detected.
7261 */
7262union cgxx_spux_rx_eee_wake {
7263        u64 u;
7264        struct cgxx_spux_rx_eee_wake_s {
7265                u64 wtf_error_counter                : 16;
7266                u64 reserved_16_63                   : 48;
7267        } s;
7268        /* struct cgxx_spux_rx_eee_wake_s cn; */
7269};
7270
7271static inline u64 CGXX_SPUX_RX_EEE_WAKE(u64 a)
7272        __attribute__ ((pure, always_inline));
7273static inline u64 CGXX_SPUX_RX_EEE_WAKE(u64 a)
7274{
7275        return 0x103e0 + 8 * a;
7276}
7277
7278/**
7279 * Register (RSL) cgx#_spu#_rx_lpi_timing
7280 *
7281 * INTERNAL: CGX SPU RX EEE LPI Timing Parameters Registers  Reserved.
7282 * Internal: This register specifies receiver LPI timing parameters Tqr,
7283 * Twr and Twtf.
7284 */
7285union cgxx_spux_rx_lpi_timing {
7286        u64 u;
7287        struct cgxx_spux_rx_lpi_timing_s {
7288                u64 twtf                             : 20;
7289                u64 twr                              : 20;
7290                u64 tqr                              : 20;
7291                u64 reserved_60_61                   : 2;
7292                u64 rx_lpi_fw                        : 1;
7293                u64 rx_lpi_en                        : 1;
7294        } s;
7295        /* struct cgxx_spux_rx_lpi_timing_s cn; */
7296};
7297
7298static inline u64 CGXX_SPUX_RX_LPI_TIMING(u64 a)
7299        __attribute__ ((pure, always_inline));
7300static inline u64 CGXX_SPUX_RX_LPI_TIMING(u64 a)
7301{
7302        return 0x103c0 + 8 * a;
7303}
7304
7305/**
7306 * Register (RSL) cgx#_spu#_rx_lpi_timing2
7307 *
7308 * INTERNAL: CGX SPU RX EEE LPI Timing2 Parameters Registers  Reserved.
7309 * Internal: This register specifies receiver LPI timing parameters
7310 * hold_off_timer.
7311 */
7312union cgxx_spux_rx_lpi_timing2 {
7313        u64 u;
7314        struct cgxx_spux_rx_lpi_timing2_s {
7315                u64 hold_off_timer                   : 20;
7316                u64 reserved_20_63                   : 44;
7317        } s;
7318        /* struct cgxx_spux_rx_lpi_timing2_s cn; */
7319};
7320
7321static inline u64 CGXX_SPUX_RX_LPI_TIMING2(u64 a)
7322        __attribute__ ((pure, always_inline));
7323static inline u64 CGXX_SPUX_RX_LPI_TIMING2(u64 a)
7324{
7325        return 0x10420 + 8 * a;
7326}
7327
7328/**
7329 * Register (RSL) cgx#_spu#_rx_mrk_cnt
7330 *
7331 * CGX SPU Receiver Marker Interval Count Control Registers
7332 */
7333union cgxx_spux_rx_mrk_cnt {
7334        u64 u;
7335        struct cgxx_spux_rx_mrk_cnt_s {
7336                u64 mrk_cnt                          : 20;
7337                u64 reserved_20_43                   : 24;
7338                u64 by_mrk_100g                      : 1;
7339                u64 reserved_45_47                   : 3;
7340                u64 ram_mrk_cnt                      : 8;
7341                u64 reserved_56_63                   : 8;
7342        } s;
7343        /* struct cgxx_spux_rx_mrk_cnt_s cn; */
7344};
7345
7346static inline u64 CGXX_SPUX_RX_MRK_CNT(u64 a)
7347        __attribute__ ((pure, always_inline));
7348static inline u64 CGXX_SPUX_RX_MRK_CNT(u64 a)
7349{
7350        return 0x103a0 + 8 * a;
7351}
7352
7353/**
7354 * Register (RSL) cgx#_spu#_spd_abil
7355 *
7356 * CGX SPU PCS Speed Ability Registers
7357 */
7358union cgxx_spux_spd_abil {
7359        u64 u;
7360        struct cgxx_spux_spd_abil_s {
7361                u64 tengb                            : 1;
7362                u64 tenpasst                         : 1;
7363                u64 usxgmii                          : 1;
7364                u64 twentyfivegb                     : 1;
7365                u64 fortygb                          : 1;
7366                u64 fiftygb                          : 1;
7367                u64 hundredgb                        : 1;
7368                u64 reserved_7_63                    : 57;
7369        } s;
7370        /* struct cgxx_spux_spd_abil_s cn; */
7371};
7372
7373static inline u64 CGXX_SPUX_SPD_ABIL(u64 a)
7374        __attribute__ ((pure, always_inline));
7375static inline u64 CGXX_SPUX_SPD_ABIL(u64 a)
7376{
7377        return 0x10010 + 0x40000 * a;
7378}
7379
7380/**
7381 * Register (RSL) cgx#_spu#_status1
7382 *
7383 * CGX SPU Status 1 Registers
7384 */
7385union cgxx_spux_status1 {
7386        u64 u;
7387        struct cgxx_spux_status1_s {
7388                u64 reserved_0                       : 1;
7389                u64 lpable                           : 1;
7390                u64 rcv_lnk                          : 1;
7391                u64 reserved_3_6                     : 4;
7392                u64 flt                              : 1;
7393                u64 rx_lpi_indication                : 1;
7394                u64 tx_lpi_indication                : 1;
7395                u64 rx_lpi_received                  : 1;
7396                u64 tx_lpi_received                  : 1;
7397                u64 reserved_12_63                   : 52;
7398        } s;
7399        /* struct cgxx_spux_status1_s cn; */
7400};
7401
7402static inline u64 CGXX_SPUX_STATUS1(u64 a)
7403        __attribute__ ((pure, always_inline));
7404static inline u64 CGXX_SPUX_STATUS1(u64 a)
7405{
7406        return 0x10008 + 0x40000 * a;
7407}
7408
7409/**
7410 * Register (RSL) cgx#_spu#_status2
7411 *
7412 * CGX SPU Status 2 Registers
7413 */
7414union cgxx_spux_status2 {
7415        u64 u;
7416        struct cgxx_spux_status2_s {
7417                u64 tengb_r                          : 1;
7418                u64 tengb_x                          : 1;
7419                u64 tengb_w                          : 1;
7420                u64 tengb_t                          : 1;
7421                u64 usxgmii_r                        : 1;
7422                u64 twentyfivegb_r                   : 1;
7423                u64 fortygb_r                        : 1;
7424                u64 fiftygb_r                        : 1;
7425                u64 hundredgb_r                      : 1;
7426                u64 reserved_9                       : 1;
7427                u64 rcvflt                           : 1;
7428                u64 xmtflt                           : 1;
7429                u64 reserved_12_13                   : 2;
7430                u64 dev                              : 2;
7431                u64 reserved_16_63                   : 48;
7432        } s;
7433        /* struct cgxx_spux_status2_s cn; */
7434};
7435
7436static inline u64 CGXX_SPUX_STATUS2(u64 a)
7437        __attribute__ ((pure, always_inline));
7438static inline u64 CGXX_SPUX_STATUS2(u64 a)
7439{
7440        return 0x10020 + 0x40000 * a;
7441}
7442
7443/**
7444 * Register (RSL) cgx#_spu#_tx_lpi_timing
7445 *
7446 * INTERNAL: CGX SPU TX EEE LPI Timing Parameters Registers  Reserved.
7447 * Internal: Transmit LPI timing parameters Tsl, Tql and Tul
7448 */
7449union cgxx_spux_tx_lpi_timing {
7450        u64 u;
7451        struct cgxx_spux_tx_lpi_timing_s {
7452                u64 tql                              : 19;
7453                u64 reserved_19_31                   : 13;
7454                u64 tul                              : 12;
7455                u64 reserved_44_47                   : 4;
7456                u64 tsl                              : 12;
7457                u64 reserved_60                      : 1;
7458                u64 tx_lpi_ignore_twl                : 1;
7459                u64 tx_lpi_fw                        : 1;
7460                u64 tx_lpi_en                        : 1;
7461        } s;
7462        /* struct cgxx_spux_tx_lpi_timing_s cn; */
7463};
7464
7465static inline u64 CGXX_SPUX_TX_LPI_TIMING(u64 a)
7466        __attribute__ ((pure, always_inline));
7467static inline u64 CGXX_SPUX_TX_LPI_TIMING(u64 a)
7468{
7469        return 0x10400 + 8 * a;
7470}
7471
7472/**
7473 * Register (RSL) cgx#_spu#_tx_lpi_timing2
7474 *
7475 * INTERNAL: CGX SPU TX EEE LPI Timing2 Parameters Registers  Reserved.
7476 * Internal: This register specifies transmit LPI timer parameters.
7477 */
7478union cgxx_spux_tx_lpi_timing2 {
7479        u64 u;
7480        struct cgxx_spux_tx_lpi_timing2_s {
7481                u64 t1u                              : 8;
7482                u64 reserved_8_11                    : 4;
7483                u64 twl                              : 12;
7484                u64 reserved_24_31                   : 8;
7485                u64 twl2                             : 12;
7486                u64 reserved_44_47                   : 4;
7487                u64 tbyp                             : 12;
7488                u64 reserved_60_63                   : 4;
7489        } s;
7490        /* struct cgxx_spux_tx_lpi_timing2_s cn; */
7491};
7492
7493static inline u64 CGXX_SPUX_TX_LPI_TIMING2(u64 a)
7494        __attribute__ ((pure, always_inline));
7495static inline u64 CGXX_SPUX_TX_LPI_TIMING2(u64 a)
7496{
7497        return 0x10440 + 8 * a;
7498}
7499
7500/**
7501 * Register (RSL) cgx#_spu#_tx_mrk_cnt
7502 *
7503 * CGX SPU Transmitter Marker Interval Count Control Registers
7504 */
7505union cgxx_spux_tx_mrk_cnt {
7506        u64 u;
7507        struct cgxx_spux_tx_mrk_cnt_s {
7508                u64 mrk_cnt                          : 20;
7509                u64 reserved_20_43                   : 24;
7510                u64 by_mrk_100g                      : 1;
7511                u64 reserved_45_47                   : 3;
7512                u64 ram_mrk_cnt                      : 8;
7513                u64 reserved_56_63                   : 8;
7514        } s;
7515        /* struct cgxx_spux_tx_mrk_cnt_s cn; */
7516};
7517
7518static inline u64 CGXX_SPUX_TX_MRK_CNT(u64 a)
7519        __attribute__ ((pure, always_inline));
7520static inline u64 CGXX_SPUX_TX_MRK_CNT(u64 a)
7521{
7522        return 0x10380 + 8 * a;
7523}
7524
7525/**
7526 * Register (RSL) cgx#_spu#_usx_an_adv
7527 *
7528 * CGX SPU USXGMII Autonegotiation Advertisement Registers Software
7529 * programs this register with the contents of the AN-link code word base
7530 * page to be transmitted during autonegotiation. Any write operations to
7531 * this register prior to completion of autonegotiation should be
7532 * followed by a renegotiation in order for the new values to take
7533 * effect. Once autonegotiation has completed, software can examine this
7534 * register along with CGX()_SPU()_USX_AN_ADV to determine the highest
7535 * common denominator technology. The format for this register is from
7536 * USXGMII Multiport specification section 1.1.2 Table 2.
7537 */
7538union cgxx_spux_usx_an_adv {
7539        u64 u;
7540        struct cgxx_spux_usx_an_adv_s {
7541                u64 set                              : 1;
7542                u64 reserved_1_6                     : 6;
7543                u64 eee_clk_stop_abil                : 1;
7544                u64 eee_abil                         : 1;
7545                u64 spd                              : 3;
7546                u64 dplx                             : 1;
7547                u64 reserved_13_14                   : 2;
7548                u64 lnk_st                           : 1;
7549                u64 reserved_16_63                   : 48;
7550        } s;
7551        /* struct cgxx_spux_usx_an_adv_s cn; */
7552};
7553
7554static inline u64 CGXX_SPUX_USX_AN_ADV(u64 a)
7555        __attribute__ ((pure, always_inline));
7556static inline u64 CGXX_SPUX_USX_AN_ADV(u64 a)
7557{
7558        return 0x101d0 + 0x40000 * a;
7559}
7560
7561/**
7562 * Register (RSL) cgx#_spu#_usx_an_control
7563 *
7564 * CGX SPU USXGMII Autonegotiation Control Register
7565 */
7566union cgxx_spux_usx_an_control {
7567        u64 u;
7568        struct cgxx_spux_usx_an_control_s {
7569                u64 reserved_0_8                     : 9;
7570                u64 rst_an                           : 1;
7571                u64 reserved_10_11                   : 2;
7572                u64 an_en                            : 1;
7573                u64 reserved_13_14                   : 2;
7574                u64 an_reset                         : 1;
7575                u64 reserved_16_63                   : 48;
7576        } s;
7577        /* struct cgxx_spux_usx_an_control_s cn; */
7578};
7579
7580static inline u64 CGXX_SPUX_USX_AN_CONTROL(u64 a)
7581        __attribute__ ((pure, always_inline));
7582static inline u64 CGXX_SPUX_USX_AN_CONTROL(u64 a)
7583{
7584        return 0x101c0 + 0x40000 * a;
7585}
7586
7587/**
7588 * Register (RSL) cgx#_spu#_usx_an_expansion
7589 *
7590 * CGX SPU USXGMII Autonegotiation Expansion Register This register is
7591 * only used to signal page reception.
7592 */
7593union cgxx_spux_usx_an_expansion {
7594        u64 u;
7595        struct cgxx_spux_usx_an_expansion_s {
7596                u64 reserved_0                       : 1;
7597                u64 an_page_received                 : 1;
7598                u64 next_page_able                   : 1;
7599                u64 reserved_3_63                    : 61;
7600        } s;
7601        /* struct cgxx_spux_usx_an_expansion_s cn; */
7602};
7603
7604static inline u64 CGXX_SPUX_USX_AN_EXPANSION(u64 a)
7605        __attribute__ ((pure, always_inline));
7606static inline u64 CGXX_SPUX_USX_AN_EXPANSION(u64 a)
7607{
7608        return 0x101e0 + 0x40000 * a;
7609}
7610
7611/**
7612 * Register (RSL) cgx#_spu#_usx_an_flow_ctrl
7613 *
7614 * CGX SPU USXGMII Flow Control Registers This register is used by
7615 * software to affect USXGMII AN hardware behavior.
7616 */
7617union cgxx_spux_usx_an_flow_ctrl {
7618        u64 u;
7619        struct cgxx_spux_usx_an_flow_ctrl_s {
7620                u64 start_idle_detect                : 1;
7621                u64 reserved_1_63                    : 63;
7622        } s;
7623        /* struct cgxx_spux_usx_an_flow_ctrl_s cn; */
7624};
7625
7626static inline u64 CGXX_SPUX_USX_AN_FLOW_CTRL(u64 a)
7627        __attribute__ ((pure, always_inline));
7628static inline u64 CGXX_SPUX_USX_AN_FLOW_CTRL(u64 a)
7629{
7630        return 0x101e8 + 0x40000 * a;
7631}
7632
7633/**
7634 * Register (RSL) cgx#_spu#_usx_an_link_timer
7635 *
7636 * CGX SPU USXGMII Link Timer Registers This is the link timer register.
7637 */
7638union cgxx_spux_usx_an_link_timer {
7639        u64 u;
7640        struct cgxx_spux_usx_an_link_timer_s {
7641                u64 count                            : 16;
7642                u64 reserved_16_63                   : 48;
7643        } s;
7644        /* struct cgxx_spux_usx_an_link_timer_s cn; */
7645};
7646
7647static inline u64 CGXX_SPUX_USX_AN_LINK_TIMER(u64 a)
7648        __attribute__ ((pure, always_inline));
7649static inline u64 CGXX_SPUX_USX_AN_LINK_TIMER(u64 a)
7650{
7651        return 0x101f0 + 0x40000 * a;
7652}
7653
7654/**
7655 * Register (RSL) cgx#_spu#_usx_an_lp_abil
7656 *
7657 * CGX SPU USXGMII Autonegotiation Link-Partner Advertisement Registers
7658 * This register captures the contents of the latest AN link code word
7659 * base page received from the link partner during autonegotiation. This
7660 * is register 5 per IEEE 802.3, Clause 37.
7661 * CGX()_SPU()_USX_AN_EXPANSION[AN_PAGE_RECEIVED] is set when this
7662 * register is updated by hardware.
7663 */
7664union cgxx_spux_usx_an_lp_abil {
7665        u64 u;
7666        struct cgxx_spux_usx_an_lp_abil_s {
7667                u64 set                              : 1;
7668                u64 reserved_1_6                     : 6;
7669                u64 eee_clk_stop_abil                : 1;
7670                u64 eee_abil                         : 1;
7671                u64 spd                              : 3;
7672                u64 dplx                             : 1;
7673                u64 reserved_13_14                   : 2;
7674                u64 lnk_st                           : 1;
7675                u64 reserved_16_63                   : 48;
7676        } s;
7677        /* struct cgxx_spux_usx_an_lp_abil_s cn; */
7678};
7679
7680static inline u64 CGXX_SPUX_USX_AN_LP_ABIL(u64 a)
7681        __attribute__ ((pure, always_inline));
7682static inline u64 CGXX_SPUX_USX_AN_LP_ABIL(u64 a)
7683{
7684        return 0x101d8 + 0x40000 * a;
7685}
7686
7687/**
7688 * Register (RSL) cgx#_spu#_usx_an_status
7689 *
7690 * CGX SPU USXGMII Autonegotiation Status Register
7691 */
7692union cgxx_spux_usx_an_status {
7693        u64 u;
7694        struct cgxx_spux_usx_an_status_s {
7695                u64 extnd                            : 1;
7696                u64 reserved_1                       : 1;
7697                u64 lnk_st                           : 1;
7698                u64 an_abil                          : 1;
7699                u64 rmt_flt                          : 1;
7700                u64 an_cpt                           : 1;
7701                u64 reserved_6_63                    : 58;
7702        } s;
7703        /* struct cgxx_spux_usx_an_status_s cn; */
7704};
7705
7706static inline u64 CGXX_SPUX_USX_AN_STATUS(u64 a)
7707        __attribute__ ((pure, always_inline));
7708static inline u64 CGXX_SPUX_USX_AN_STATUS(u64 a)
7709{
7710        return 0x101c8 + 0x40000 * a;
7711}
7712
7713/**
7714 * Register (RSL) cgx#_spu_dbg_control
7715 *
7716 * CGX SPU Debug Control Registers
7717 */
7718union cgxx_spu_dbg_control {
7719        u64 u;
7720        struct cgxx_spu_dbg_control_s {
7721                u64 marker_rxp                       : 15;
7722                u64 reserved_15                      : 1;
7723                u64 scramble_dis                     : 1;
7724                u64 reserved_17_18                   : 2;
7725                u64 br_pmd_train_soft_en             : 1;
7726                u64 reserved_20_27                   : 8;
7727                u64 timestamp_norm_dis               : 1;
7728                u64 an_nonce_match_dis               : 1;
7729                u64 br_ber_mon_dis                   : 1;
7730                u64 rf_cw_mon_erly_restart_dis       : 1;
7731                u64 us_clk_period                    : 12;
7732                u64 ms_clk_period                    : 12;
7733                u64 reserved_56_63                   : 8;
7734        } s;
7735        struct cgxx_spu_dbg_control_cn96xxp1 {
7736                u64 marker_rxp                       : 15;
7737                u64 reserved_15                      : 1;
7738                u64 scramble_dis                     : 1;
7739                u64 reserved_17_18                   : 2;
7740                u64 br_pmd_train_soft_en             : 1;
7741                u64 reserved_20_27                   : 8;
7742                u64 timestamp_norm_dis               : 1;
7743                u64 an_nonce_match_dis               : 1;
7744                u64 br_ber_mon_dis                   : 1;
7745                u64 reserved_31                      : 1;
7746                u64 us_clk_period                    : 12;
7747                u64 ms_clk_period                    : 12;
7748                u64 reserved_56_63                   : 8;
7749        } cn96xxp1;
7750        /* struct cgxx_spu_dbg_control_s cn96xxp3; */
7751        /* struct cgxx_spu_dbg_control_cn96xxp1 cnf95xxp1; */
7752        /* struct cgxx_spu_dbg_control_s cnf95xxp2; */
7753};
7754
7755static inline u64 CGXX_SPU_DBG_CONTROL(void)
7756        __attribute__ ((pure, always_inline));
7757static inline u64 CGXX_SPU_DBG_CONTROL(void)
7758{
7759        return 0x10300;
7760}
7761
7762/**
7763 * Register (RSL) cgx#_spu_sds#_skew_status
7764 *
7765 * CGX SPU SerDes Lane Skew Status Registers This register provides
7766 * SerDes lane skew status. One register per physical SerDes lane.
7767 */
7768union cgxx_spu_sdsx_skew_status {
7769        u64 u;
7770        struct cgxx_spu_sdsx_skew_status_s {
7771                u64 skew_status                      : 32;
7772                u64 reserved_32_63                   : 32;
7773        } s;
7774        /* struct cgxx_spu_sdsx_skew_status_s cn; */
7775};
7776
7777static inline u64 CGXX_SPU_SDSX_SKEW_STATUS(u64 a)
7778        __attribute__ ((pure, always_inline));
7779static inline u64 CGXX_SPU_SDSX_SKEW_STATUS(u64 a)
7780{
7781        return 0x10340 + 8 * a;
7782}
7783
7784/**
7785 * Register (RSL) cgx#_spu_sds#_states
7786 *
7787 * CGX SPU SerDes States Registers This register provides SerDes lane
7788 * states. One register per physical SerDes lane.
7789 */
7790union cgxx_spu_sdsx_states {
7791        u64 u;
7792        struct cgxx_spu_sdsx_states_s {
7793                u64 bx_sync_sm                       : 4;
7794                u64 br_sh_cnt                        : 11;
7795                u64 br_block_lock                    : 1;
7796                u64 br_sh_invld_cnt                  : 7;
7797                u64 reserved_23                      : 1;
7798                u64 fec_sync_cnt                     : 4;
7799                u64 fec_block_sync                   : 1;
7800                u64 reserved_29                      : 1;
7801                u64 an_rx_sm                         : 2;
7802                u64 an_arb_sm                        : 3;
7803                u64 reserved_35                      : 1;
7804                u64 train_lock_bad_markers           : 3;
7805                u64 train_lock_found_1st_marker      : 1;
7806                u64 train_frame_lock                 : 1;
7807                u64 train_code_viol                  : 1;
7808                u64 train_sm                         : 3;
7809                u64 reserved_45_47                   : 3;
7810                u64 am_lock_sm                       : 2;
7811                u64 am_lock_invld_cnt                : 2;
7812                u64 reserved_52_63                   : 12;
7813        } s;
7814        /* struct cgxx_spu_sdsx_states_s cn; */
7815};
7816
7817static inline u64 CGXX_SPU_SDSX_STATES(u64 a)
7818        __attribute__ ((pure, always_inline));
7819static inline u64 CGXX_SPU_SDSX_STATES(u64 a)
7820{
7821        return 0x10360 + 8 * a;
7822}
7823
7824/**
7825 * Register (RSL) cgx#_spu_usxgmii_control
7826 *
7827 * CGX SPU Common USXGMII Control Register This register is the common
7828 * control register that enables USXGMII Mode. The fields in this
7829 * register are preserved across any LMAC soft-resets. For an LMAC in
7830 * soft- reset state in USXGMII mode, the CGX will transmit Remote Fault
7831 * BASE-R blocks.
7832 */
7833union cgxx_spu_usxgmii_control {
7834        u64 u;
7835        struct cgxx_spu_usxgmii_control_s {
7836                u64 enable                           : 1;
7837                u64 usxgmii_type                     : 3;
7838                u64 sds_id                           : 2;
7839                u64 reserved_6_63                    : 58;
7840        } s;
7841        /* struct cgxx_spu_usxgmii_control_s cn; */
7842};
7843
7844static inline u64 CGXX_SPU_USXGMII_CONTROL(void)
7845        __attribute__ ((pure, always_inline));
7846static inline u64 CGXX_SPU_USXGMII_CONTROL(void)
7847{
7848        return 0x10920;
7849}
7850
7851#endif /* __CSRS_CGX_H__ */
7852