linux/drivers/infiniband/hw/hfi1/chip.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2015 - 2017 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47
  48/*
  49 * This file contains all of the code that is specific to the HFI chip
  50 */
  51
  52#include <linux/pci.h>
  53#include <linux/delay.h>
  54#include <linux/interrupt.h>
  55#include <linux/module.h>
  56
  57#include "hfi.h"
  58#include "trace.h"
  59#include "mad.h"
  60#include "pio.h"
  61#include "sdma.h"
  62#include "eprom.h"
  63#include "efivar.h"
  64#include "platform.h"
  65#include "aspm.h"
  66#include "affinity.h"
  67#include "debugfs.h"
  68
  69#define NUM_IB_PORTS 1
  70
  71uint kdeth_qp;
  72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
  73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
  74
  75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
  76module_param(num_vls, uint, S_IRUGO);
  77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  78
  79/*
  80 * Default time to aggregate two 10K packets from the idle state
  81 * (timer not running). The timer starts at the end of the first packet,
  82 * so only the time for one 10K packet and header plus a bit extra is needed.
  83 * 10 * 1024 + 64 header byte = 10304 byte
  84 * 10304 byte / 12.5 GB/s = 824.32ns
  85 */
  86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
  87module_param(rcv_intr_timeout, uint, S_IRUGO);
  88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
  89
  90uint rcv_intr_count = 16; /* same as qib */
  91module_param(rcv_intr_count, uint, S_IRUGO);
  92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
  93
  94ushort link_crc_mask = SUPPORTED_CRCS;
  95module_param(link_crc_mask, ushort, S_IRUGO);
  96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
  97
  98uint loopback;
  99module_param_named(loopback, loopback, uint, S_IRUGO);
 100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
 101
 102/* Other driver tunables */
 103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
 104static ushort crc_14b_sideband = 1;
 105static uint use_flr = 1;
 106uint quick_linkup; /* skip LNI */
 107
 108struct flag_table {
 109        u64 flag;       /* the flag */
 110        char *str;      /* description string */
 111        u16 extra;      /* extra information */
 112        u16 unused0;
 113        u32 unused1;
 114};
 115
 116/* str must be a string constant */
 117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
 118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
 119
 120/* Send Error Consequences */
 121#define SEC_WRITE_DROPPED       0x1
 122#define SEC_PACKET_DROPPED      0x2
 123#define SEC_SC_HALTED           0x4     /* per-context only */
 124#define SEC_SPC_FREEZE          0x8     /* per-HFI only */
 125
 126#define DEFAULT_KRCVQS            2
 127#define MIN_KERNEL_KCTXTS         2
 128#define FIRST_KERNEL_KCTXT        1
 129
 130/*
 131 * RSM instance allocation
 132 *   0 - Verbs
 133 *   1 - User Fecn Handling
 134 *   2 - Vnic
 135 */
 136#define RSM_INS_VERBS             0
 137#define RSM_INS_FECN              1
 138#define RSM_INS_VNIC              2
 139
 140/* Bit offset into the GUID which carries HFI id information */
 141#define GUID_HFI_INDEX_SHIFT     39
 142
 143/* extract the emulation revision */
 144#define emulator_rev(dd) ((dd)->irev >> 8)
 145/* parallel and serial emulation versions are 3 and 4 respectively */
 146#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
 147#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
 148
 149/* RSM fields for Verbs */
 150/* packet type */
 151#define IB_PACKET_TYPE         2ull
 152#define QW_SHIFT               6ull
 153/* QPN[7..1] */
 154#define QPN_WIDTH              7ull
 155
 156/* LRH.BTH: QW 0, OFFSET 48 - for match */
 157#define LRH_BTH_QW             0ull
 158#define LRH_BTH_BIT_OFFSET     48ull
 159#define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
 160#define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
 161#define LRH_BTH_SELECT
 162#define LRH_BTH_MASK           3ull
 163#define LRH_BTH_VALUE          2ull
 164
 165/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
 166#define LRH_SC_QW              0ull
 167#define LRH_SC_BIT_OFFSET      56ull
 168#define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
 169#define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
 170#define LRH_SC_MASK            128ull
 171#define LRH_SC_VALUE           0ull
 172
 173/* SC[n..0] QW 0, OFFSET 60 - for select */
 174#define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
 175
 176/* QPN[m+n:1] QW 1, OFFSET 1 */
 177#define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
 178
 179/* RSM fields for Vnic */
 180/* L2_TYPE: QW 0, OFFSET 61 - for match */
 181#define L2_TYPE_QW             0ull
 182#define L2_TYPE_BIT_OFFSET     61ull
 183#define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
 184#define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
 185#define L2_TYPE_MASK           3ull
 186#define L2_16B_VALUE           2ull
 187
 188/* L4_TYPE QW 1, OFFSET 0 - for match */
 189#define L4_TYPE_QW              1ull
 190#define L4_TYPE_BIT_OFFSET      0ull
 191#define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
 192#define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
 193#define L4_16B_TYPE_MASK        0xFFull
 194#define L4_16B_ETH_VALUE        0x78ull
 195
 196/* 16B VESWID - for select */
 197#define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
 198/* 16B ENTROPY - for select */
 199#define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
 200
 201/* defines to build power on SC2VL table */
 202#define SC2VL_VAL( \
 203        num, \
 204        sc0, sc0val, \
 205        sc1, sc1val, \
 206        sc2, sc2val, \
 207        sc3, sc3val, \
 208        sc4, sc4val, \
 209        sc5, sc5val, \
 210        sc6, sc6val, \
 211        sc7, sc7val) \
 212( \
 213        ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
 214        ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
 215        ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
 216        ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
 217        ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
 218        ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
 219        ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
 220        ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
 221)
 222
 223#define DC_SC_VL_VAL( \
 224        range, \
 225        e0, e0val, \
 226        e1, e1val, \
 227        e2, e2val, \
 228        e3, e3val, \
 229        e4, e4val, \
 230        e5, e5val, \
 231        e6, e6val, \
 232        e7, e7val, \
 233        e8, e8val, \
 234        e9, e9val, \
 235        e10, e10val, \
 236        e11, e11val, \
 237        e12, e12val, \
 238        e13, e13val, \
 239        e14, e14val, \
 240        e15, e15val) \
 241( \
 242        ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
 243        ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
 244        ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
 245        ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
 246        ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
 247        ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
 248        ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
 249        ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
 250        ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
 251        ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
 252        ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
 253        ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
 254        ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
 255        ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
 256        ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
 257        ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
 258)
 259
 260/* all CceStatus sub-block freeze bits */
 261#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
 262                        | CCE_STATUS_RXE_FROZE_SMASK \
 263                        | CCE_STATUS_TXE_FROZE_SMASK \
 264                        | CCE_STATUS_TXE_PIO_FROZE_SMASK)
 265/* all CceStatus sub-block TXE pause bits */
 266#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
 267                        | CCE_STATUS_TXE_PAUSED_SMASK \
 268                        | CCE_STATUS_SDMA_PAUSED_SMASK)
 269/* all CceStatus sub-block RXE pause bits */
 270#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
 271
 272#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
 273#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
 274
 275/*
 276 * CCE Error flags.
 277 */
 278static struct flag_table cce_err_status_flags[] = {
 279/* 0*/  FLAG_ENTRY0("CceCsrParityErr",
 280                CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
 281/* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
 282                CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
 283/* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
 284                CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
 285/* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
 286                CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
 287/* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
 288                CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
 289/* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
 290                CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
 291/* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
 292                CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
 293/* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
 294                CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
 295/* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
 296                CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
 297/* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 298            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
 299/*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 300            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
 301/*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
 302            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
 303/*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
 304                CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
 305/*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 306                CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
 307/*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 308                CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
 309/*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 310                CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
 311/*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 312                CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
 313/*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 314                CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
 315/*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
 316                CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
 317/*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
 318                CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
 319/*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
 320                CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
 321/*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
 322                CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
 323/*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
 324                CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
 325/*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
 326                CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
 327/*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
 328                CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
 329/*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
 330                CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
 331/*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
 332                CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
 333/*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
 334                CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
 335/*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
 336                CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
 337/*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
 338                CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
 339/*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
 340                CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
 341/*31*/  FLAG_ENTRY0("LATriggered",
 342                CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
 343/*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
 344                CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
 345/*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
 346                CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
 347/*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
 348                CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
 349/*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
 350                CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
 351/*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
 352                CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
 353/*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
 354                CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
 355/*38*/  FLAG_ENTRY0("CceIntMapCorErr",
 356                CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
 357/*39*/  FLAG_ENTRY0("CceIntMapUncErr",
 358                CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
 359/*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
 360                CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
 361/*41-63 reserved*/
 362};
 363
 364/*
 365 * Misc Error flags
 366 */
 367#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
 368static struct flag_table misc_err_status_flags[] = {
 369/* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
 370/* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
 371/* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
 372/* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
 373/* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
 374/* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
 375/* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
 376/* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
 377/* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
 378/* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
 379/*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
 380/*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
 381/*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
 382};
 383
 384/*
 385 * TXE PIO Error flags and consequences
 386 */
 387static struct flag_table pio_err_status_flags[] = {
 388/* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
 389        SEC_WRITE_DROPPED,
 390        SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
 391/* 1*/  FLAG_ENTRY("PioWriteAddrParity",
 392        SEC_SPC_FREEZE,
 393        SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
 394/* 2*/  FLAG_ENTRY("PioCsrParity",
 395        SEC_SPC_FREEZE,
 396        SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
 397/* 3*/  FLAG_ENTRY("PioSbMemFifo0",
 398        SEC_SPC_FREEZE,
 399        SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
 400/* 4*/  FLAG_ENTRY("PioSbMemFifo1",
 401        SEC_SPC_FREEZE,
 402        SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
 403/* 5*/  FLAG_ENTRY("PioPccFifoParity",
 404        SEC_SPC_FREEZE,
 405        SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
 406/* 6*/  FLAG_ENTRY("PioPecFifoParity",
 407        SEC_SPC_FREEZE,
 408        SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
 409/* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
 410        SEC_SPC_FREEZE,
 411        SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
 412/* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
 413        SEC_SPC_FREEZE,
 414        SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
 415/* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
 416        SEC_SPC_FREEZE,
 417        SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
 418/*10*/  FLAG_ENTRY("PioSmPktResetParity",
 419        SEC_SPC_FREEZE,
 420        SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
 421/*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
 422        SEC_SPC_FREEZE,
 423        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
 424/*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
 425        SEC_SPC_FREEZE,
 426        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
 427/*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
 428        0,
 429        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
 430/*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
 431        0,
 432        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
 433/*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
 434        SEC_SPC_FREEZE,
 435        SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
 436/*16*/  FLAG_ENTRY("PioPpmcPblFifo",
 437        SEC_SPC_FREEZE,
 438        SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
 439/*17*/  FLAG_ENTRY("PioInitSmIn",
 440        0,
 441        SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
 442/*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
 443        SEC_SPC_FREEZE,
 444        SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
 445/*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
 446        SEC_SPC_FREEZE,
 447        SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
 448/*20*/  FLAG_ENTRY("PioHostAddrMemCor",
 449        0,
 450        SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
 451/*21*/  FLAG_ENTRY("PioWriteDataParity",
 452        SEC_SPC_FREEZE,
 453        SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
 454/*22*/  FLAG_ENTRY("PioStateMachine",
 455        SEC_SPC_FREEZE,
 456        SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
 457/*23*/  FLAG_ENTRY("PioWriteQwValidParity",
 458        SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 459        SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
 460/*24*/  FLAG_ENTRY("PioBlockQwCountParity",
 461        SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 462        SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
 463/*25*/  FLAG_ENTRY("PioVlfVlLenParity",
 464        SEC_SPC_FREEZE,
 465        SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
 466/*26*/  FLAG_ENTRY("PioVlfSopParity",
 467        SEC_SPC_FREEZE,
 468        SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
 469/*27*/  FLAG_ENTRY("PioVlFifoParity",
 470        SEC_SPC_FREEZE,
 471        SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
 472/*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
 473        SEC_SPC_FREEZE,
 474        SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
 475/*29*/  FLAG_ENTRY("PioPpmcSopLen",
 476        SEC_SPC_FREEZE,
 477        SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
 478/*30-31 reserved*/
 479/*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
 480        SEC_SPC_FREEZE,
 481        SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
 482/*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
 483        SEC_SPC_FREEZE,
 484        SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
 485/*34*/  FLAG_ENTRY("PioPccSopHeadParity",
 486        SEC_SPC_FREEZE,
 487        SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
 488/*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
 489        SEC_SPC_FREEZE,
 490        SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
 491/*36-63 reserved*/
 492};
 493
 494/* TXE PIO errors that cause an SPC freeze */
 495#define ALL_PIO_FREEZE_ERR \
 496        (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
 497        | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
 498        | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
 499        | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
 500        | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
 501        | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
 502        | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
 503        | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
 504        | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
 505        | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
 506        | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
 507        | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
 508        | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
 509        | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
 510        | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
 511        | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
 512        | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
 513        | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
 514        | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
 515        | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
 516        | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
 517        | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
 518        | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
 519        | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
 520        | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
 521        | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
 522        | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
 523        | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
 524        | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
 525
 526/*
 527 * TXE SDMA Error flags
 528 */
 529static struct flag_table sdma_err_status_flags[] = {
 530/* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
 531                SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
 532/* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
 533                SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
 534/* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
 535                SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
 536/* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
 537                SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
 538/*04-63 reserved*/
 539};
 540
 541/* TXE SDMA errors that cause an SPC freeze */
 542#define ALL_SDMA_FREEZE_ERR  \
 543                (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
 544                | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
 545                | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
 546
 547/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
 548#define PORT_DISCARD_EGRESS_ERRS \
 549        (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
 550        | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
 551        | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
 552
 553/*
 554 * TXE Egress Error flags
 555 */
 556#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
 557static struct flag_table egress_err_status_flags[] = {
 558/* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
 559/* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
 560/* 2 reserved */
 561/* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
 562                SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
 563/* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
 564/* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
 565/* 6 reserved */
 566/* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
 567                SEES(TX_PIO_LAUNCH_INTF_PARITY)),
 568/* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
 569                SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
 570/* 9-10 reserved */
 571/*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
 572                SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
 573/*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
 574/*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
 575/*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
 576/*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
 577/*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
 578                SEES(TX_SDMA0_DISALLOWED_PACKET)),
 579/*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
 580                SEES(TX_SDMA1_DISALLOWED_PACKET)),
 581/*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
 582                SEES(TX_SDMA2_DISALLOWED_PACKET)),
 583/*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
 584                SEES(TX_SDMA3_DISALLOWED_PACKET)),
 585/*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
 586                SEES(TX_SDMA4_DISALLOWED_PACKET)),
 587/*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
 588                SEES(TX_SDMA5_DISALLOWED_PACKET)),
 589/*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
 590                SEES(TX_SDMA6_DISALLOWED_PACKET)),
 591/*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
 592                SEES(TX_SDMA7_DISALLOWED_PACKET)),
 593/*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
 594                SEES(TX_SDMA8_DISALLOWED_PACKET)),
 595/*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
 596                SEES(TX_SDMA9_DISALLOWED_PACKET)),
 597/*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
 598                SEES(TX_SDMA10_DISALLOWED_PACKET)),
 599/*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
 600                SEES(TX_SDMA11_DISALLOWED_PACKET)),
 601/*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
 602                SEES(TX_SDMA12_DISALLOWED_PACKET)),
 603/*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
 604                SEES(TX_SDMA13_DISALLOWED_PACKET)),
 605/*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
 606                SEES(TX_SDMA14_DISALLOWED_PACKET)),
 607/*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
 608                SEES(TX_SDMA15_DISALLOWED_PACKET)),
 609/*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
 610                SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
 611/*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
 612                SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
 613/*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
 614                SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
 615/*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
 616                SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
 617/*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
 618                SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
 619/*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
 620                SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
 621/*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
 622                SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
 623/*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
 624                SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
 625/*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
 626                SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
 627/*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
 628/*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
 629/*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
 630/*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
 631/*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
 632/*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
 633/*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
 634/*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
 635/*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
 636/*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
 637/*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
 638/*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
 639/*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
 640/*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
 641/*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
 642/*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
 643/*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
 644/*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
 645/*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
 646/*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
 647/*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
 648/*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
 649                SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
 650/*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
 651                SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
 652};
 653
 654/*
 655 * TXE Egress Error Info flags
 656 */
 657#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
 658static struct flag_table egress_err_info_flags[] = {
 659/* 0*/  FLAG_ENTRY0("Reserved", 0ull),
 660/* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
 661/* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 662/* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 663/* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
 664/* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
 665/* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
 666/* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
 667/* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
 668/* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
 669/*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
 670/*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
 671/*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
 672/*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
 673/*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
 674/*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
 675/*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
 676/*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
 677/*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
 678/*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
 679/*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
 680/*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
 681};
 682
 683/* TXE Egress errors that cause an SPC freeze */
 684#define ALL_TXE_EGRESS_FREEZE_ERR \
 685        (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
 686        | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
 687        | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
 688        | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
 689        | SEES(TX_LAUNCH_CSR_PARITY) \
 690        | SEES(TX_SBRD_CTL_CSR_PARITY) \
 691        | SEES(TX_CONFIG_PARITY) \
 692        | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
 693        | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
 694        | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
 695        | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
 696        | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
 697        | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
 698        | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
 699        | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
 700        | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
 701        | SEES(TX_CREDIT_RETURN_PARITY))
 702
 703/*
 704 * TXE Send error flags
 705 */
 706#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
 707static struct flag_table send_err_status_flags[] = {
 708/* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
 709/* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
 710/* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
 711};
 712
 713/*
 714 * TXE Send Context Error flags and consequences
 715 */
 716static struct flag_table sc_err_status_flags[] = {
 717/* 0*/  FLAG_ENTRY("InconsistentSop",
 718                SEC_PACKET_DROPPED | SEC_SC_HALTED,
 719                SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
 720/* 1*/  FLAG_ENTRY("DisallowedPacket",
 721                SEC_PACKET_DROPPED | SEC_SC_HALTED,
 722                SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
 723/* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
 724                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 725                SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
 726/* 3*/  FLAG_ENTRY("WriteOverflow",
 727                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 728                SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
 729/* 4*/  FLAG_ENTRY("WriteOutOfBounds",
 730                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 731                SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
 732/* 5-63 reserved*/
 733};
 734
 735/*
 736 * RXE Receive Error flags
 737 */
 738#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
 739static struct flag_table rxe_err_status_flags[] = {
 740/* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
 741/* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
 742/* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
 743/* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
 744/* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
 745/* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
 746/* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
 747/* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
 748/* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
 749/* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
 750/*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
 751/*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
 752/*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
 753/*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
 754/*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
 755/*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
 756/*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
 757                RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
 758/*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
 759/*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
 760/*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
 761                RXES(RBUF_BLOCK_LIST_READ_UNC)),
 762/*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
 763                RXES(RBUF_BLOCK_LIST_READ_COR)),
 764/*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
 765                RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
 766/*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
 767                RXES(RBUF_CSR_QENT_CNT_PARITY)),
 768/*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
 769                RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
 770/*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
 771                RXES(RBUF_CSR_QVLD_BIT_PARITY)),
 772/*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
 773/*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
 774/*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
 775                RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
 776/*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
 777/*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
 778/*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
 779/*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
 780/*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
 781/*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
 782/*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
 783/*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
 784                RXES(RBUF_FL_INITDONE_PARITY)),
 785/*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
 786                RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
 787/*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
 788/*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
 789/*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
 790/*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
 791                RXES(LOOKUP_DES_PART1_UNC_COR)),
 792/*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
 793                RXES(LOOKUP_DES_PART2_PARITY)),
 794/*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
 795/*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
 796/*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
 797/*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
 798/*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
 799/*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
 800/*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
 801/*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
 802/*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
 803/*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
 804/*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
 805/*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
 806/*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
 807/*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
 808/*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
 809/*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
 810/*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
 811/*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
 812/*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
 813/*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
 814/*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
 815/*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
 816};
 817
 818/* RXE errors that will trigger an SPC freeze */
 819#define ALL_RXE_FREEZE_ERR  \
 820        (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
 821        | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
 822        | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
 823        | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
 824        | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
 825        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
 826        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
 827        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
 828        | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
 829        | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
 830        | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
 831        | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
 832        | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
 833        | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
 834        | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
 835        | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
 836        | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
 837        | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
 838        | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
 839        | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
 840        | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
 841        | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
 842        | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
 843        | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
 844        | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
 845        | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
 846        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
 847        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
 848        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
 849        | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
 850        | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
 851        | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
 852        | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
 853        | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
 854        | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
 855        | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
 856        | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
 857        | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
 858        | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
 859        | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
 860        | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
 861        | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
 862        | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
 863        | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
 864
 865#define RXE_FREEZE_ABORT_MASK \
 866        (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
 867        RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
 868        RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
 869
 870/*
 871 * DCC Error Flags
 872 */
 873#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
 874static struct flag_table dcc_err_flags[] = {
 875        FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
 876        FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
 877        FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
 878        FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
 879        FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
 880        FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
 881        FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
 882        FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
 883        FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
 884        FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
 885        FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
 886        FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
 887        FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
 888        FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
 889        FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
 890        FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
 891        FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
 892        FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
 893        FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
 894        FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
 895        FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
 896        FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
 897        FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
 898        FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
 899        FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
 900        FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
 901        FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
 902        FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
 903        FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
 904        FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
 905        FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
 906        FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
 907        FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
 908        FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
 909        FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
 910        FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
 911        FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
 912        FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
 913        FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
 914        FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
 915        FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
 916        FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
 917        FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
 918        FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
 919        FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
 920        FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
 921};
 922
 923/*
 924 * LCB error flags
 925 */
 926#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
 927static struct flag_table lcb_err_flags[] = {
 928/* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
 929/* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
 930/* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
 931/* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
 932                LCBE(ALL_LNS_FAILED_REINIT_TEST)),
 933/* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
 934/* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
 935/* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
 936/* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
 937/* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
 938/* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
 939/*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
 940/*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
 941/*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
 942/*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
 943                LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
 944/*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
 945/*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
 946/*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
 947/*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
 948/*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
 949/*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
 950                LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
 951/*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
 952/*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
 953/*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
 954/*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
 955/*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
 956/*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
 957/*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
 958                LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
 959/*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
 960/*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
 961                LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
 962/*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
 963                LCBE(REDUNDANT_FLIT_PARITY_ERR))
 964};
 965
 966/*
 967 * DC8051 Error Flags
 968 */
 969#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
 970static struct flag_table dc8051_err_flags[] = {
 971        FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
 972        FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
 973        FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
 974        FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
 975        FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
 976        FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
 977        FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
 978        FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
 979        FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
 980                    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
 981        FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
 982};
 983
 984/*
 985 * DC8051 Information Error flags
 986 *
 987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
 988 */
 989static struct flag_table dc8051_info_err_flags[] = {
 990        FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
 991        FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
 992        FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
 993        FLAG_ENTRY0("Serdes internal loopback failure",
 994                    FAILED_SERDES_INTERNAL_LOOPBACK),
 995        FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
 996        FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
 997        FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
 998        FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
 999        FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
1000        FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001        FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1002        FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1003        FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1004        FLAG_ENTRY0("External Device Request Timeout",
1005                    EXTERNAL_DEVICE_REQ_TIMEOUT),
1006};
1007
1008/*
1009 * DC8051 Information Host Information flags
1010 *
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1012 */
1013static struct flag_table dc8051_info_host_msg_flags[] = {
1014        FLAG_ENTRY0("Host request done", 0x0001),
1015        FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016        FLAG_ENTRY0("BC SMA message", 0x0004),
1017        FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018        FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019        FLAG_ENTRY0("External device config request", 0x0020),
1020        FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021        FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022        FLAG_ENTRY0("Link going down", 0x0100),
1023        FLAG_ENTRY0("Link width downgraded", 0x0200),
1024};
1025
1026static u32 encoded_size(u32 size);
1027static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1030                               u8 *continuous);
1031static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032                                  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034                                      u8 *remote_tx_rate, u16 *link_widths);
1035static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036                                     u8 *flag_bits, u16 *link_widths);
1037static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1038                                  u8 *device_rev);
1039static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042                            u8 *tx_polarity_inversion,
1043                            u8 *rx_polarity_inversion, u8 *max_rate);
1044static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045                                unsigned int context, u64 err_status);
1046static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047static void handle_dcc_err(struct hfi1_devdata *dd,
1048                           unsigned int context, u64 err_status);
1049static void handle_lcb_err(struct hfi1_devdata *dd,
1050                           unsigned int context, u64 err_status);
1051static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059static void set_partition_keys(struct hfi1_pportdata *ppd);
1060static const char *link_state_name(u32 state);
1061static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1062                                          u32 state);
1063static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1064                           u64 *out_data);
1065static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd);
1067
1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1070                                            int msecs);
1071static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1072                                  int msecs);
1073static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1074static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1075static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1076                                   int msecs);
1077static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079static void handle_temp_err(struct hfi1_devdata *dd);
1080static void dc_shutdown(struct hfi1_devdata *dd);
1081static void dc_start(struct hfi1_devdata *dd);
1082static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083                           unsigned int *np);
1084static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087
1088/*
1089 * Error interrupt table entry.  This is used as input to the interrupt
1090 * "clear down" routine used for all second tier error interrupt register.
1091 * Second tier interrupt registers have a single bit representing them
1092 * in the top-level CceIntStatus.
1093 */
1094struct err_reg_info {
1095        u32 status;             /* status CSR offset */
1096        u32 clear;              /* clear CSR offset */
1097        u32 mask;               /* mask CSR offset */
1098        void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1099        const char *desc;
1100};
1101
1102#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1103#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1104#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1105
1106/*
1107 * Helpers for building HFI and DC error interrupt table entries.  Different
1108 * helpers are needed because of inconsistent register names.
1109 */
1110#define EE(reg, handler, desc) \
1111        { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1112                handler, desc }
1113#define DC_EE1(reg, handler, desc) \
1114        { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1115#define DC_EE2(reg, handler, desc) \
1116        { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1117
1118/*
1119 * Table of the "misc" grouping of error interrupts.  Each entry refers to
1120 * another register containing more information.
1121 */
1122static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1123/* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1124/* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1125/* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1126/* 3*/  { 0, 0, 0, NULL }, /* reserved */
1127/* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1128/* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1129/* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1130/* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1131        /* the rest are reserved */
1132};
1133
1134/*
1135 * Index into the Various section of the interrupt sources
1136 * corresponding to the Critical Temperature interrupt.
1137 */
1138#define TCRIT_INT_SOURCE 4
1139
1140/*
1141 * SDMA error interrupt entry - refers to another register containing more
1142 * information.
1143 */
1144static const struct err_reg_info sdma_eng_err =
1145        EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1146
1147static const struct err_reg_info various_err[NUM_VARIOUS] = {
1148/* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1149/* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1150/* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1151/* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1152/* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1153        /* rest are reserved */
1154};
1155
1156/*
1157 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1158 * register can not be derived from the MTU value because 10K is not
1159 * a power of 2. Therefore, we need a constant. Everything else can
1160 * be calculated.
1161 */
1162#define DCC_CFG_PORT_MTU_CAP_10240 7
1163
1164/*
1165 * Table of the DC grouping of error interrupts.  Each entry refers to
1166 * another register containing more information.
1167 */
1168static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1169/* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1170/* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1171/* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1172/* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1173        /* the rest are reserved */
1174};
1175
1176struct cntr_entry {
1177        /*
1178         * counter name
1179         */
1180        char *name;
1181
1182        /*
1183         * csr to read for name (if applicable)
1184         */
1185        u64 csr;
1186
1187        /*
1188         * offset into dd or ppd to store the counter's value
1189         */
1190        int offset;
1191
1192        /*
1193         * flags
1194         */
1195        u8 flags;
1196
1197        /*
1198         * accessor for stat element, context either dd or ppd
1199         */
1200        u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1201                       int mode, u64 data);
1202};
1203
1204#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1205#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1206
1207#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1208{ \
1209        name, \
1210        csr, \
1211        offset, \
1212        flags, \
1213        accessor \
1214}
1215
1216/* 32bit RXE */
1217#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1218CNTR_ELEM(#name, \
1219          (counter * 8 + RCV_COUNTER_ARRAY32), \
1220          0, flags | CNTR_32BIT, \
1221          port_access_u32_csr)
1222
1223#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225          (counter * 8 + RCV_COUNTER_ARRAY32), \
1226          0, flags | CNTR_32BIT, \
1227          dev_access_u32_csr)
1228
1229/* 64bit RXE */
1230#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232          (counter * 8 + RCV_COUNTER_ARRAY64), \
1233          0, flags, \
1234          port_access_u64_csr)
1235
1236#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238          (counter * 8 + RCV_COUNTER_ARRAY64), \
1239          0, flags, \
1240          dev_access_u64_csr)
1241
1242#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1243#define OVR_ELM(ctx) \
1244CNTR_ELEM("RcvHdrOvr" #ctx, \
1245          (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1246          0, CNTR_NORMAL, port_access_u64_csr)
1247
1248/* 32bit TXE */
1249#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1250CNTR_ELEM(#name, \
1251          (counter * 8 + SEND_COUNTER_ARRAY32), \
1252          0, flags | CNTR_32BIT, \
1253          port_access_u32_csr)
1254
1255/* 64bit TXE */
1256#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1257CNTR_ELEM(#name, \
1258          (counter * 8 + SEND_COUNTER_ARRAY64), \
1259          0, flags, \
1260          port_access_u64_csr)
1261
1262# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1263CNTR_ELEM(#name,\
1264          counter * 8 + SEND_COUNTER_ARRAY64, \
1265          0, \
1266          flags, \
1267          dev_access_u64_csr)
1268
1269/* CCE */
1270#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1271CNTR_ELEM(#name, \
1272          (counter * 8 + CCE_COUNTER_ARRAY32), \
1273          0, flags | CNTR_32BIT, \
1274          dev_access_u32_csr)
1275
1276#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1277CNTR_ELEM(#name, \
1278          (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1279          0, flags | CNTR_32BIT, \
1280          dev_access_u32_csr)
1281
1282/* DC */
1283#define DC_PERF_CNTR(name, counter, flags) \
1284CNTR_ELEM(#name, \
1285          counter, \
1286          0, \
1287          flags, \
1288          dev_access_u64_csr)
1289
1290#define DC_PERF_CNTR_LCB(name, counter, flags) \
1291CNTR_ELEM(#name, \
1292          counter, \
1293          0, \
1294          flags, \
1295          dc_access_lcb_cntr)
1296
1297/* ibp counters */
1298#define SW_IBP_CNTR(name, cntr) \
1299CNTR_ELEM(#name, \
1300          0, \
1301          0, \
1302          CNTR_SYNTH, \
1303          access_ibp_##cntr)
1304
1305/**
1306 * hfi_addr_from_offset - return addr for readq/writeq
1307 * @dd - the dd device
1308 * @offset - the offset of the CSR within bar0
1309 *
1310 * This routine selects the appropriate base address
1311 * based on the indicated offset.
1312 */
1313static inline void __iomem *hfi1_addr_from_offset(
1314        const struct hfi1_devdata *dd,
1315        u32 offset)
1316{
1317        if (offset >= dd->base2_start)
1318                return dd->kregbase2 + (offset - dd->base2_start);
1319        return dd->kregbase1 + offset;
1320}
1321
1322/**
1323 * read_csr - read CSR at the indicated offset
1324 * @dd - the dd device
1325 * @offset - the offset of the CSR within bar0
1326 *
1327 * Return: the value read or all FF's if there
1328 * is no mapping
1329 */
1330u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1331{
1332        if (dd->flags & HFI1_PRESENT)
1333                return readq(hfi1_addr_from_offset(dd, offset));
1334        return -1;
1335}
1336
1337/**
1338 * write_csr - write CSR at the indicated offset
1339 * @dd - the dd device
1340 * @offset - the offset of the CSR within bar0
1341 * @value - value to write
1342 */
1343void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1344{
1345        if (dd->flags & HFI1_PRESENT) {
1346                void __iomem *base = hfi1_addr_from_offset(dd, offset);
1347
1348                /* avoid write to RcvArray */
1349                if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1350                        return;
1351                writeq(value, base);
1352        }
1353}
1354
1355/**
1356 * get_csr_addr - return te iomem address for offset
1357 * @dd - the dd device
1358 * @offset - the offset of the CSR within bar0
1359 *
1360 * Return: The iomem address to use in subsequent
1361 * writeq/readq operations.
1362 */
1363void __iomem *get_csr_addr(
1364        const struct hfi1_devdata *dd,
1365        u32 offset)
1366{
1367        if (dd->flags & HFI1_PRESENT)
1368                return hfi1_addr_from_offset(dd, offset);
1369        return NULL;
1370}
1371
1372static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1373                                 int mode, u64 value)
1374{
1375        u64 ret;
1376
1377        if (mode == CNTR_MODE_R) {
1378                ret = read_csr(dd, csr);
1379        } else if (mode == CNTR_MODE_W) {
1380                write_csr(dd, csr, value);
1381                ret = value;
1382        } else {
1383                dd_dev_err(dd, "Invalid cntr register access mode");
1384                return 0;
1385        }
1386
1387        hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1388        return ret;
1389}
1390
1391/* Dev Access */
1392static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1393                              void *context, int vl, int mode, u64 data)
1394{
1395        struct hfi1_devdata *dd = context;
1396        u64 csr = entry->csr;
1397
1398        if (entry->flags & CNTR_SDMA) {
1399                if (vl == CNTR_INVALID_VL)
1400                        return 0;
1401                csr += 0x100 * vl;
1402        } else {
1403                if (vl != CNTR_INVALID_VL)
1404                        return 0;
1405        }
1406        return read_write_csr(dd, csr, mode, data);
1407}
1408
1409static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1410                              void *context, int idx, int mode, u64 data)
1411{
1412        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1413
1414        if (dd->per_sdma && idx < dd->num_sdma)
1415                return dd->per_sdma[idx].err_cnt;
1416        return 0;
1417}
1418
1419static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1420                              void *context, int idx, int mode, u64 data)
1421{
1422        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1423
1424        if (dd->per_sdma && idx < dd->num_sdma)
1425                return dd->per_sdma[idx].sdma_int_cnt;
1426        return 0;
1427}
1428
1429static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1430                                   void *context, int idx, int mode, u64 data)
1431{
1432        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1433
1434        if (dd->per_sdma && idx < dd->num_sdma)
1435                return dd->per_sdma[idx].idle_int_cnt;
1436        return 0;
1437}
1438
1439static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1440                                       void *context, int idx, int mode,
1441                                       u64 data)
1442{
1443        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1444
1445        if (dd->per_sdma && idx < dd->num_sdma)
1446                return dd->per_sdma[idx].progress_int_cnt;
1447        return 0;
1448}
1449
1450static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1451                              int vl, int mode, u64 data)
1452{
1453        struct hfi1_devdata *dd = context;
1454
1455        u64 val = 0;
1456        u64 csr = entry->csr;
1457
1458        if (entry->flags & CNTR_VL) {
1459                if (vl == CNTR_INVALID_VL)
1460                        return 0;
1461                csr += 8 * vl;
1462        } else {
1463                if (vl != CNTR_INVALID_VL)
1464                        return 0;
1465        }
1466
1467        val = read_write_csr(dd, csr, mode, data);
1468        return val;
1469}
1470
1471static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1472                              int vl, int mode, u64 data)
1473{
1474        struct hfi1_devdata *dd = context;
1475        u32 csr = entry->csr;
1476        int ret = 0;
1477
1478        if (vl != CNTR_INVALID_VL)
1479                return 0;
1480        if (mode == CNTR_MODE_R)
1481                ret = read_lcb_csr(dd, csr, &data);
1482        else if (mode == CNTR_MODE_W)
1483                ret = write_lcb_csr(dd, csr, data);
1484
1485        if (ret) {
1486                dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1487                return 0;
1488        }
1489
1490        hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1491        return data;
1492}
1493
1494/* Port Access */
1495static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1496                               int vl, int mode, u64 data)
1497{
1498        struct hfi1_pportdata *ppd = context;
1499
1500        if (vl != CNTR_INVALID_VL)
1501                return 0;
1502        return read_write_csr(ppd->dd, entry->csr, mode, data);
1503}
1504
1505static u64 port_access_u64_csr(const struct cntr_entry *entry,
1506                               void *context, int vl, int mode, u64 data)
1507{
1508        struct hfi1_pportdata *ppd = context;
1509        u64 val;
1510        u64 csr = entry->csr;
1511
1512        if (entry->flags & CNTR_VL) {
1513                if (vl == CNTR_INVALID_VL)
1514                        return 0;
1515                csr += 8 * vl;
1516        } else {
1517                if (vl != CNTR_INVALID_VL)
1518                        return 0;
1519        }
1520        val = read_write_csr(ppd->dd, csr, mode, data);
1521        return val;
1522}
1523
1524/* Software defined */
1525static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1526                                u64 data)
1527{
1528        u64 ret;
1529
1530        if (mode == CNTR_MODE_R) {
1531                ret = *cntr;
1532        } else if (mode == CNTR_MODE_W) {
1533                *cntr = data;
1534                ret = data;
1535        } else {
1536                dd_dev_err(dd, "Invalid cntr sw access mode");
1537                return 0;
1538        }
1539
1540        hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1541
1542        return ret;
1543}
1544
1545static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1546                                 int vl, int mode, u64 data)
1547{
1548        struct hfi1_pportdata *ppd = context;
1549
1550        if (vl != CNTR_INVALID_VL)
1551                return 0;
1552        return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1553}
1554
1555static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1556                                 int vl, int mode, u64 data)
1557{
1558        struct hfi1_pportdata *ppd = context;
1559
1560        if (vl != CNTR_INVALID_VL)
1561                return 0;
1562        return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1563}
1564
1565static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1566                                       void *context, int vl, int mode,
1567                                       u64 data)
1568{
1569        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1570
1571        if (vl != CNTR_INVALID_VL)
1572                return 0;
1573        return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1574}
1575
1576static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1577                                   void *context, int vl, int mode, u64 data)
1578{
1579        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1580        u64 zero = 0;
1581        u64 *counter;
1582
1583        if (vl == CNTR_INVALID_VL)
1584                counter = &ppd->port_xmit_discards;
1585        else if (vl >= 0 && vl < C_VL_COUNT)
1586                counter = &ppd->port_xmit_discards_vl[vl];
1587        else
1588                counter = &zero;
1589
1590        return read_write_sw(ppd->dd, counter, mode, data);
1591}
1592
1593static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1594                                       void *context, int vl, int mode,
1595                                       u64 data)
1596{
1597        struct hfi1_pportdata *ppd = context;
1598
1599        if (vl != CNTR_INVALID_VL)
1600                return 0;
1601
1602        return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1603                             mode, data);
1604}
1605
1606static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1607                                      void *context, int vl, int mode, u64 data)
1608{
1609        struct hfi1_pportdata *ppd = context;
1610
1611        if (vl != CNTR_INVALID_VL)
1612                return 0;
1613
1614        return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1615                             mode, data);
1616}
1617
1618u64 get_all_cpu_total(u64 __percpu *cntr)
1619{
1620        int cpu;
1621        u64 counter = 0;
1622
1623        for_each_possible_cpu(cpu)
1624                counter += *per_cpu_ptr(cntr, cpu);
1625        return counter;
1626}
1627
1628static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1629                          u64 __percpu *cntr,
1630                          int vl, int mode, u64 data)
1631{
1632        u64 ret = 0;
1633
1634        if (vl != CNTR_INVALID_VL)
1635                return 0;
1636
1637        if (mode == CNTR_MODE_R) {
1638                ret = get_all_cpu_total(cntr) - *z_val;
1639        } else if (mode == CNTR_MODE_W) {
1640                /* A write can only zero the counter */
1641                if (data == 0)
1642                        *z_val = get_all_cpu_total(cntr);
1643                else
1644                        dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1645        } else {
1646                dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1647                return 0;
1648        }
1649
1650        return ret;
1651}
1652
1653static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1654                              void *context, int vl, int mode, u64 data)
1655{
1656        struct hfi1_devdata *dd = context;
1657
1658        return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1659                              mode, data);
1660}
1661
1662static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1663                                   void *context, int vl, int mode, u64 data)
1664{
1665        struct hfi1_devdata *dd = context;
1666
1667        return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1668                              mode, data);
1669}
1670
1671static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1672                              void *context, int vl, int mode, u64 data)
1673{
1674        struct hfi1_devdata *dd = context;
1675
1676        return dd->verbs_dev.n_piowait;
1677}
1678
1679static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1680                               void *context, int vl, int mode, u64 data)
1681{
1682        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1683
1684        return dd->verbs_dev.n_piodrain;
1685}
1686
1687static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1688                              void *context, int vl, int mode, u64 data)
1689{
1690        struct hfi1_devdata *dd = context;
1691
1692        return dd->verbs_dev.n_txwait;
1693}
1694
1695static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1696                               void *context, int vl, int mode, u64 data)
1697{
1698        struct hfi1_devdata *dd = context;
1699
1700        return dd->verbs_dev.n_kmem_wait;
1701}
1702
1703static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1704                                   void *context, int vl, int mode, u64 data)
1705{
1706        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707
1708        return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1709                              mode, data);
1710}
1711
1712/* Software counters for the error status bits within MISC_ERR_STATUS */
1713static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1714                                             void *context, int vl, int mode,
1715                                             u64 data)
1716{
1717        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1718
1719        return dd->misc_err_status_cnt[12];
1720}
1721
1722static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1723                                          void *context, int vl, int mode,
1724                                          u64 data)
1725{
1726        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1727
1728        return dd->misc_err_status_cnt[11];
1729}
1730
1731static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1732                                               void *context, int vl, int mode,
1733                                               u64 data)
1734{
1735        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1736
1737        return dd->misc_err_status_cnt[10];
1738}
1739
1740static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1741                                                 void *context, int vl,
1742                                                 int mode, u64 data)
1743{
1744        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1745
1746        return dd->misc_err_status_cnt[9];
1747}
1748
1749static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1750                                           void *context, int vl, int mode,
1751                                           u64 data)
1752{
1753        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755        return dd->misc_err_status_cnt[8];
1756}
1757
1758static u64 access_misc_efuse_read_bad_addr_err_cnt(
1759                                const struct cntr_entry *entry,
1760                                void *context, int vl, int mode, u64 data)
1761{
1762        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1763
1764        return dd->misc_err_status_cnt[7];
1765}
1766
1767static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1768                                                void *context, int vl,
1769                                                int mode, u64 data)
1770{
1771        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1772
1773        return dd->misc_err_status_cnt[6];
1774}
1775
1776static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1777                                              void *context, int vl, int mode,
1778                                              u64 data)
1779{
1780        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1781
1782        return dd->misc_err_status_cnt[5];
1783}
1784
1785static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1786                                            void *context, int vl, int mode,
1787                                            u64 data)
1788{
1789        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1790
1791        return dd->misc_err_status_cnt[4];
1792}
1793
1794static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1795                                                 void *context, int vl,
1796                                                 int mode, u64 data)
1797{
1798        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1799
1800        return dd->misc_err_status_cnt[3];
1801}
1802
1803static u64 access_misc_csr_write_bad_addr_err_cnt(
1804                                const struct cntr_entry *entry,
1805                                void *context, int vl, int mode, u64 data)
1806{
1807        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1808
1809        return dd->misc_err_status_cnt[2];
1810}
1811
1812static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1813                                                 void *context, int vl,
1814                                                 int mode, u64 data)
1815{
1816        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1817
1818        return dd->misc_err_status_cnt[1];
1819}
1820
1821static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1822                                          void *context, int vl, int mode,
1823                                          u64 data)
1824{
1825        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1826
1827        return dd->misc_err_status_cnt[0];
1828}
1829
1830/*
1831 * Software counter for the aggregate of
1832 * individual CceErrStatus counters
1833 */
1834static u64 access_sw_cce_err_status_aggregated_cnt(
1835                                const struct cntr_entry *entry,
1836                                void *context, int vl, int mode, u64 data)
1837{
1838        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840        return dd->sw_cce_err_status_aggregate;
1841}
1842
1843/*
1844 * Software counters corresponding to each of the
1845 * error status bits within CceErrStatus
1846 */
1847static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1848                                              void *context, int vl, int mode,
1849                                              u64 data)
1850{
1851        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1852
1853        return dd->cce_err_status_cnt[40];
1854}
1855
1856static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1857                                          void *context, int vl, int mode,
1858                                          u64 data)
1859{
1860        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1861
1862        return dd->cce_err_status_cnt[39];
1863}
1864
1865static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1866                                          void *context, int vl, int mode,
1867                                          u64 data)
1868{
1869        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1870
1871        return dd->cce_err_status_cnt[38];
1872}
1873
1874static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1875                                             void *context, int vl, int mode,
1876                                             u64 data)
1877{
1878        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1879
1880        return dd->cce_err_status_cnt[37];
1881}
1882
1883static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1884                                             void *context, int vl, int mode,
1885                                             u64 data)
1886{
1887        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1888
1889        return dd->cce_err_status_cnt[36];
1890}
1891
1892static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1893                                const struct cntr_entry *entry,
1894                                void *context, int vl, int mode, u64 data)
1895{
1896        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1897
1898        return dd->cce_err_status_cnt[35];
1899}
1900
1901static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1902                                const struct cntr_entry *entry,
1903                                void *context, int vl, int mode, u64 data)
1904{
1905        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1906
1907        return dd->cce_err_status_cnt[34];
1908}
1909
1910static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1911                                                 void *context, int vl,
1912                                                 int mode, u64 data)
1913{
1914        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1915
1916        return dd->cce_err_status_cnt[33];
1917}
1918
1919static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1920                                                void *context, int vl, int mode,
1921                                                u64 data)
1922{
1923        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1924
1925        return dd->cce_err_status_cnt[32];
1926}
1927
1928static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1929                                   void *context, int vl, int mode, u64 data)
1930{
1931        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1932
1933        return dd->cce_err_status_cnt[31];
1934}
1935
1936static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1937                                               void *context, int vl, int mode,
1938                                               u64 data)
1939{
1940        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1941
1942        return dd->cce_err_status_cnt[30];
1943}
1944
1945static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1946                                              void *context, int vl, int mode,
1947                                              u64 data)
1948{
1949        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1950
1951        return dd->cce_err_status_cnt[29];
1952}
1953
1954static u64 access_pcic_transmit_back_parity_err_cnt(
1955                                const struct cntr_entry *entry,
1956                                void *context, int vl, int mode, u64 data)
1957{
1958        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1959
1960        return dd->cce_err_status_cnt[28];
1961}
1962
1963static u64 access_pcic_transmit_front_parity_err_cnt(
1964                                const struct cntr_entry *entry,
1965                                void *context, int vl, int mode, u64 data)
1966{
1967        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1968
1969        return dd->cce_err_status_cnt[27];
1970}
1971
1972static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1973                                             void *context, int vl, int mode,
1974                                             u64 data)
1975{
1976        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1977
1978        return dd->cce_err_status_cnt[26];
1979}
1980
1981static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1982                                            void *context, int vl, int mode,
1983                                            u64 data)
1984{
1985        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1986
1987        return dd->cce_err_status_cnt[25];
1988}
1989
1990static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1991                                              void *context, int vl, int mode,
1992                                              u64 data)
1993{
1994        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1995
1996        return dd->cce_err_status_cnt[24];
1997}
1998
1999static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2000                                             void *context, int vl, int mode,
2001                                             u64 data)
2002{
2003        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2004
2005        return dd->cce_err_status_cnt[23];
2006}
2007
2008static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2009                                                 void *context, int vl,
2010                                                 int mode, u64 data)
2011{
2012        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2013
2014        return dd->cce_err_status_cnt[22];
2015}
2016
2017static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2018                                         void *context, int vl, int mode,
2019                                         u64 data)
2020{
2021        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2022
2023        return dd->cce_err_status_cnt[21];
2024}
2025
2026static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2027                                const struct cntr_entry *entry,
2028                                void *context, int vl, int mode, u64 data)
2029{
2030        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2031
2032        return dd->cce_err_status_cnt[20];
2033}
2034
2035static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2036                                                 void *context, int vl,
2037                                                 int mode, u64 data)
2038{
2039        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2040
2041        return dd->cce_err_status_cnt[19];
2042}
2043
2044static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2045                                             void *context, int vl, int mode,
2046                                             u64 data)
2047{
2048        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2049
2050        return dd->cce_err_status_cnt[18];
2051}
2052
2053static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2054                                            void *context, int vl, int mode,
2055                                            u64 data)
2056{
2057        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2058
2059        return dd->cce_err_status_cnt[17];
2060}
2061
2062static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2063                                              void *context, int vl, int mode,
2064                                              u64 data)
2065{
2066        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2067
2068        return dd->cce_err_status_cnt[16];
2069}
2070
2071static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2072                                             void *context, int vl, int mode,
2073                                             u64 data)
2074{
2075        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2076
2077        return dd->cce_err_status_cnt[15];
2078}
2079
2080static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2081                                                 void *context, int vl,
2082                                                 int mode, u64 data)
2083{
2084        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2085
2086        return dd->cce_err_status_cnt[14];
2087}
2088
2089static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2090                                             void *context, int vl, int mode,
2091                                             u64 data)
2092{
2093        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2094
2095        return dd->cce_err_status_cnt[13];
2096}
2097
2098static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2099                                const struct cntr_entry *entry,
2100                                void *context, int vl, int mode, u64 data)
2101{
2102        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2103
2104        return dd->cce_err_status_cnt[12];
2105}
2106
2107static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2108                                const struct cntr_entry *entry,
2109                                void *context, int vl, int mode, u64 data)
2110{
2111        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2112
2113        return dd->cce_err_status_cnt[11];
2114}
2115
2116static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2117                                const struct cntr_entry *entry,
2118                                void *context, int vl, int mode, u64 data)
2119{
2120        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2121
2122        return dd->cce_err_status_cnt[10];
2123}
2124
2125static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2126                                const struct cntr_entry *entry,
2127                                void *context, int vl, int mode, u64 data)
2128{
2129        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2130
2131        return dd->cce_err_status_cnt[9];
2132}
2133
2134static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2135                                const struct cntr_entry *entry,
2136                                void *context, int vl, int mode, u64 data)
2137{
2138        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2139
2140        return dd->cce_err_status_cnt[8];
2141}
2142
2143static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2144                                                 void *context, int vl,
2145                                                 int mode, u64 data)
2146{
2147        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2148
2149        return dd->cce_err_status_cnt[7];
2150}
2151
2152static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2153                                const struct cntr_entry *entry,
2154                                void *context, int vl, int mode, u64 data)
2155{
2156        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2157
2158        return dd->cce_err_status_cnt[6];
2159}
2160
2161static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2162                                               void *context, int vl, int mode,
2163                                               u64 data)
2164{
2165        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2166
2167        return dd->cce_err_status_cnt[5];
2168}
2169
2170static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2171                                          void *context, int vl, int mode,
2172                                          u64 data)
2173{
2174        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2175
2176        return dd->cce_err_status_cnt[4];
2177}
2178
2179static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2180                                const struct cntr_entry *entry,
2181                                void *context, int vl, int mode, u64 data)
2182{
2183        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2184
2185        return dd->cce_err_status_cnt[3];
2186}
2187
2188static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2189                                                 void *context, int vl,
2190                                                 int mode, u64 data)
2191{
2192        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2193
2194        return dd->cce_err_status_cnt[2];
2195}
2196
2197static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2198                                                void *context, int vl,
2199                                                int mode, u64 data)
2200{
2201        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2202
2203        return dd->cce_err_status_cnt[1];
2204}
2205
2206static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2207                                         void *context, int vl, int mode,
2208                                         u64 data)
2209{
2210        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211
2212        return dd->cce_err_status_cnt[0];
2213}
2214
2215/*
2216 * Software counters corresponding to each of the
2217 * error status bits within RcvErrStatus
2218 */
2219static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2220                                        void *context, int vl, int mode,
2221                                        u64 data)
2222{
2223        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2224
2225        return dd->rcv_err_status_cnt[63];
2226}
2227
2228static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2229                                                void *context, int vl,
2230                                                int mode, u64 data)
2231{
2232        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2233
2234        return dd->rcv_err_status_cnt[62];
2235}
2236
2237static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2238                                               void *context, int vl, int mode,
2239                                               u64 data)
2240{
2241        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2242
2243        return dd->rcv_err_status_cnt[61];
2244}
2245
2246static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2247                                         void *context, int vl, int mode,
2248                                         u64 data)
2249{
2250        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2251
2252        return dd->rcv_err_status_cnt[60];
2253}
2254
2255static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2256                                                 void *context, int vl,
2257                                                 int mode, u64 data)
2258{
2259        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2260
2261        return dd->rcv_err_status_cnt[59];
2262}
2263
2264static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2265                                                 void *context, int vl,
2266                                                 int mode, u64 data)
2267{
2268        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2269
2270        return dd->rcv_err_status_cnt[58];
2271}
2272
2273static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2274                                            void *context, int vl, int mode,
2275                                            u64 data)
2276{
2277        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2278
2279        return dd->rcv_err_status_cnt[57];
2280}
2281
2282static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2283                                           void *context, int vl, int mode,
2284                                           u64 data)
2285{
2286        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2287
2288        return dd->rcv_err_status_cnt[56];
2289}
2290
2291static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2292                                           void *context, int vl, int mode,
2293                                           u64 data)
2294{
2295        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2296
2297        return dd->rcv_err_status_cnt[55];
2298}
2299
2300static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2301                                const struct cntr_entry *entry,
2302                                void *context, int vl, int mode, u64 data)
2303{
2304        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2305
2306        return dd->rcv_err_status_cnt[54];
2307}
2308
2309static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2310                                const struct cntr_entry *entry,
2311                                void *context, int vl, int mode, u64 data)
2312{
2313        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2314
2315        return dd->rcv_err_status_cnt[53];
2316}
2317
2318static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2319                                                 void *context, int vl,
2320                                                 int mode, u64 data)
2321{
2322        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2323
2324        return dd->rcv_err_status_cnt[52];
2325}
2326
2327static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2328                                                 void *context, int vl,
2329                                                 int mode, u64 data)
2330{
2331        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2332
2333        return dd->rcv_err_status_cnt[51];
2334}
2335
2336static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2337                                                 void *context, int vl,
2338                                                 int mode, u64 data)
2339{
2340        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2341
2342        return dd->rcv_err_status_cnt[50];
2343}
2344
2345static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2346                                                 void *context, int vl,
2347                                                 int mode, u64 data)
2348{
2349        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2350
2351        return dd->rcv_err_status_cnt[49];
2352}
2353
2354static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2355                                                 void *context, int vl,
2356                                                 int mode, u64 data)
2357{
2358        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2359
2360        return dd->rcv_err_status_cnt[48];
2361}
2362
2363static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2364                                                 void *context, int vl,
2365                                                 int mode, u64 data)
2366{
2367        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2368
2369        return dd->rcv_err_status_cnt[47];
2370}
2371
2372static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2373                                         void *context, int vl, int mode,
2374                                         u64 data)
2375{
2376        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2377
2378        return dd->rcv_err_status_cnt[46];
2379}
2380
2381static u64 access_rx_hq_intr_csr_parity_err_cnt(
2382                                const struct cntr_entry *entry,
2383                                void *context, int vl, int mode, u64 data)
2384{
2385        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2386
2387        return dd->rcv_err_status_cnt[45];
2388}
2389
2390static u64 access_rx_lookup_csr_parity_err_cnt(
2391                                const struct cntr_entry *entry,
2392                                void *context, int vl, int mode, u64 data)
2393{
2394        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2395
2396        return dd->rcv_err_status_cnt[44];
2397}
2398
2399static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2400                                const struct cntr_entry *entry,
2401                                void *context, int vl, int mode, u64 data)
2402{
2403        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2404
2405        return dd->rcv_err_status_cnt[43];
2406}
2407
2408static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2409                                const struct cntr_entry *entry,
2410                                void *context, int vl, int mode, u64 data)
2411{
2412        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2413
2414        return dd->rcv_err_status_cnt[42];
2415}
2416
2417static u64 access_rx_lookup_des_part2_parity_err_cnt(
2418                                const struct cntr_entry *entry,
2419                                void *context, int vl, int mode, u64 data)
2420{
2421        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2422
2423        return dd->rcv_err_status_cnt[41];
2424}
2425
2426static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2427                                const struct cntr_entry *entry,
2428                                void *context, int vl, int mode, u64 data)
2429{
2430        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2431
2432        return dd->rcv_err_status_cnt[40];
2433}
2434
2435static u64 access_rx_lookup_des_part1_unc_err_cnt(
2436                                const struct cntr_entry *entry,
2437                                void *context, int vl, int mode, u64 data)
2438{
2439        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2440
2441        return dd->rcv_err_status_cnt[39];
2442}
2443
2444static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2445                                const struct cntr_entry *entry,
2446                                void *context, int vl, int mode, u64 data)
2447{
2448        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2449
2450        return dd->rcv_err_status_cnt[38];
2451}
2452
2453static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2454                                const struct cntr_entry *entry,
2455                                void *context, int vl, int mode, u64 data)
2456{
2457        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2458
2459        return dd->rcv_err_status_cnt[37];
2460}
2461
2462static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2463                                const struct cntr_entry *entry,
2464                                void *context, int vl, int mode, u64 data)
2465{
2466        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2467
2468        return dd->rcv_err_status_cnt[36];
2469}
2470
2471static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2472                                const struct cntr_entry *entry,
2473                                void *context, int vl, int mode, u64 data)
2474{
2475        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2476
2477        return dd->rcv_err_status_cnt[35];
2478}
2479
2480static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2481                                const struct cntr_entry *entry,
2482                                void *context, int vl, int mode, u64 data)
2483{
2484        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2485
2486        return dd->rcv_err_status_cnt[34];
2487}
2488
2489static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2490                                const struct cntr_entry *entry,
2491                                void *context, int vl, int mode, u64 data)
2492{
2493        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2494
2495        return dd->rcv_err_status_cnt[33];
2496}
2497
2498static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2499                                        void *context, int vl, int mode,
2500                                        u64 data)
2501{
2502        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2503
2504        return dd->rcv_err_status_cnt[32];
2505}
2506
2507static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2508                                       void *context, int vl, int mode,
2509                                       u64 data)
2510{
2511        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2512
2513        return dd->rcv_err_status_cnt[31];
2514}
2515
2516static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2517                                          void *context, int vl, int mode,
2518                                          u64 data)
2519{
2520        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2521
2522        return dd->rcv_err_status_cnt[30];
2523}
2524
2525static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2526                                             void *context, int vl, int mode,
2527                                             u64 data)
2528{
2529        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2530
2531        return dd->rcv_err_status_cnt[29];
2532}
2533
2534static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2535                                                 void *context, int vl,
2536                                                 int mode, u64 data)
2537{
2538        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2539
2540        return dd->rcv_err_status_cnt[28];
2541}
2542
2543static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2544                                const struct cntr_entry *entry,
2545                                void *context, int vl, int mode, u64 data)
2546{
2547        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2548
2549        return dd->rcv_err_status_cnt[27];
2550}
2551
2552static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2553                                const struct cntr_entry *entry,
2554                                void *context, int vl, int mode, u64 data)
2555{
2556        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2557
2558        return dd->rcv_err_status_cnt[26];
2559}
2560
2561static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2562                                const struct cntr_entry *entry,
2563                                void *context, int vl, int mode, u64 data)
2564{
2565        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2566
2567        return dd->rcv_err_status_cnt[25];
2568}
2569
2570static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2571                                const struct cntr_entry *entry,
2572                                void *context, int vl, int mode, u64 data)
2573{
2574        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2575
2576        return dd->rcv_err_status_cnt[24];
2577}
2578
2579static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2580                                const struct cntr_entry *entry,
2581                                void *context, int vl, int mode, u64 data)
2582{
2583        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2584
2585        return dd->rcv_err_status_cnt[23];
2586}
2587
2588static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2589                                const struct cntr_entry *entry,
2590                                void *context, int vl, int mode, u64 data)
2591{
2592        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2593
2594        return dd->rcv_err_status_cnt[22];
2595}
2596
2597static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2598                                const struct cntr_entry *entry,
2599                                void *context, int vl, int mode, u64 data)
2600{
2601        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2602
2603        return dd->rcv_err_status_cnt[21];
2604}
2605
2606static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2607                                const struct cntr_entry *entry,
2608                                void *context, int vl, int mode, u64 data)
2609{
2610        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2611
2612        return dd->rcv_err_status_cnt[20];
2613}
2614
2615static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2616                                const struct cntr_entry *entry,
2617                                void *context, int vl, int mode, u64 data)
2618{
2619        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2620
2621        return dd->rcv_err_status_cnt[19];
2622}
2623
2624static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2625                                                 void *context, int vl,
2626                                                 int mode, u64 data)
2627{
2628        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2629
2630        return dd->rcv_err_status_cnt[18];
2631}
2632
2633static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2634                                                 void *context, int vl,
2635                                                 int mode, u64 data)
2636{
2637        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2638
2639        return dd->rcv_err_status_cnt[17];
2640}
2641
2642static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2643                                const struct cntr_entry *entry,
2644                                void *context, int vl, int mode, u64 data)
2645{
2646        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2647
2648        return dd->rcv_err_status_cnt[16];
2649}
2650
2651static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2652                                const struct cntr_entry *entry,
2653                                void *context, int vl, int mode, u64 data)
2654{
2655        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2656
2657        return dd->rcv_err_status_cnt[15];
2658}
2659
2660static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2661                                                void *context, int vl,
2662                                                int mode, u64 data)
2663{
2664        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2665
2666        return dd->rcv_err_status_cnt[14];
2667}
2668
2669static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2670                                                void *context, int vl,
2671                                                int mode, u64 data)
2672{
2673        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2674
2675        return dd->rcv_err_status_cnt[13];
2676}
2677
2678static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2679                                              void *context, int vl, int mode,
2680                                              u64 data)
2681{
2682        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2683
2684        return dd->rcv_err_status_cnt[12];
2685}
2686
2687static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2688                                          void *context, int vl, int mode,
2689                                          u64 data)
2690{
2691        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2692
2693        return dd->rcv_err_status_cnt[11];
2694}
2695
2696static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2697                                          void *context, int vl, int mode,
2698                                          u64 data)
2699{
2700        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2701
2702        return dd->rcv_err_status_cnt[10];
2703}
2704
2705static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2706                                               void *context, int vl, int mode,
2707                                               u64 data)
2708{
2709        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2710
2711        return dd->rcv_err_status_cnt[9];
2712}
2713
2714static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2715                                            void *context, int vl, int mode,
2716                                            u64 data)
2717{
2718        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2719
2720        return dd->rcv_err_status_cnt[8];
2721}
2722
2723static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2724                                const struct cntr_entry *entry,
2725                                void *context, int vl, int mode, u64 data)
2726{
2727        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2728
2729        return dd->rcv_err_status_cnt[7];
2730}
2731
2732static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2733                                const struct cntr_entry *entry,
2734                                void *context, int vl, int mode, u64 data)
2735{
2736        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2737
2738        return dd->rcv_err_status_cnt[6];
2739}
2740
2741static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2742                                          void *context, int vl, int mode,
2743                                          u64 data)
2744{
2745        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2746
2747        return dd->rcv_err_status_cnt[5];
2748}
2749
2750static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2751                                          void *context, int vl, int mode,
2752                                          u64 data)
2753{
2754        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2755
2756        return dd->rcv_err_status_cnt[4];
2757}
2758
2759static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2760                                         void *context, int vl, int mode,
2761                                         u64 data)
2762{
2763        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2764
2765        return dd->rcv_err_status_cnt[3];
2766}
2767
2768static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2769                                         void *context, int vl, int mode,
2770                                         u64 data)
2771{
2772        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2773
2774        return dd->rcv_err_status_cnt[2];
2775}
2776
2777static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2778                                            void *context, int vl, int mode,
2779                                            u64 data)
2780{
2781        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2782
2783        return dd->rcv_err_status_cnt[1];
2784}
2785
2786static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2787                                         void *context, int vl, int mode,
2788                                         u64 data)
2789{
2790        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791
2792        return dd->rcv_err_status_cnt[0];
2793}
2794
2795/*
2796 * Software counters corresponding to each of the
2797 * error status bits within SendPioErrStatus
2798 */
2799static u64 access_pio_pec_sop_head_parity_err_cnt(
2800                                const struct cntr_entry *entry,
2801                                void *context, int vl, int mode, u64 data)
2802{
2803        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2804
2805        return dd->send_pio_err_status_cnt[35];
2806}
2807
2808static u64 access_pio_pcc_sop_head_parity_err_cnt(
2809                                const struct cntr_entry *entry,
2810                                void *context, int vl, int mode, u64 data)
2811{
2812        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2813
2814        return dd->send_pio_err_status_cnt[34];
2815}
2816
2817static u64 access_pio_last_returned_cnt_parity_err_cnt(
2818                                const struct cntr_entry *entry,
2819                                void *context, int vl, int mode, u64 data)
2820{
2821        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2822
2823        return dd->send_pio_err_status_cnt[33];
2824}
2825
2826static u64 access_pio_current_free_cnt_parity_err_cnt(
2827                                const struct cntr_entry *entry,
2828                                void *context, int vl, int mode, u64 data)
2829{
2830        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2831
2832        return dd->send_pio_err_status_cnt[32];
2833}
2834
2835static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2836                                          void *context, int vl, int mode,
2837                                          u64 data)
2838{
2839        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2840
2841        return dd->send_pio_err_status_cnt[31];
2842}
2843
2844static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2845                                          void *context, int vl, int mode,
2846                                          u64 data)
2847{
2848        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2849
2850        return dd->send_pio_err_status_cnt[30];
2851}
2852
2853static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2854                                           void *context, int vl, int mode,
2855                                           u64 data)
2856{
2857        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2858
2859        return dd->send_pio_err_status_cnt[29];
2860}
2861
2862static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2863                                const struct cntr_entry *entry,
2864                                void *context, int vl, int mode, u64 data)
2865{
2866        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2867
2868        return dd->send_pio_err_status_cnt[28];
2869}
2870
2871static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2872                                             void *context, int vl, int mode,
2873                                             u64 data)
2874{
2875        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2876
2877        return dd->send_pio_err_status_cnt[27];
2878}
2879
2880static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2881                                             void *context, int vl, int mode,
2882                                             u64 data)
2883{
2884        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2885
2886        return dd->send_pio_err_status_cnt[26];
2887}
2888
2889static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2890                                                void *context, int vl,
2891                                                int mode, u64 data)
2892{
2893        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2894
2895        return dd->send_pio_err_status_cnt[25];
2896}
2897
2898static u64 access_pio_block_qw_count_parity_err_cnt(
2899                                const struct cntr_entry *entry,
2900                                void *context, int vl, int mode, u64 data)
2901{
2902        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2903
2904        return dd->send_pio_err_status_cnt[24];
2905}
2906
2907static u64 access_pio_write_qw_valid_parity_err_cnt(
2908                                const struct cntr_entry *entry,
2909                                void *context, int vl, int mode, u64 data)
2910{
2911        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2912
2913        return dd->send_pio_err_status_cnt[23];
2914}
2915
2916static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2917                                            void *context, int vl, int mode,
2918                                            u64 data)
2919{
2920        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2921
2922        return dd->send_pio_err_status_cnt[22];
2923}
2924
2925static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2926                                                void *context, int vl,
2927                                                int mode, u64 data)
2928{
2929        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2930
2931        return dd->send_pio_err_status_cnt[21];
2932}
2933
2934static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2935                                                void *context, int vl,
2936                                                int mode, u64 data)
2937{
2938        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2939
2940        return dd->send_pio_err_status_cnt[20];
2941}
2942
2943static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2944                                                void *context, int vl,
2945                                                int mode, u64 data)
2946{
2947        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2948
2949        return dd->send_pio_err_status_cnt[19];
2950}
2951
2952static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2953                                const struct cntr_entry *entry,
2954                                void *context, int vl, int mode, u64 data)
2955{
2956        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2957
2958        return dd->send_pio_err_status_cnt[18];
2959}
2960
2961static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2962                                         void *context, int vl, int mode,
2963                                         u64 data)
2964{
2965        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2966
2967        return dd->send_pio_err_status_cnt[17];
2968}
2969
2970static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2971                                            void *context, int vl, int mode,
2972                                            u64 data)
2973{
2974        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2975
2976        return dd->send_pio_err_status_cnt[16];
2977}
2978
2979static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2980                                const struct cntr_entry *entry,
2981                                void *context, int vl, int mode, u64 data)
2982{
2983        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2984
2985        return dd->send_pio_err_status_cnt[15];
2986}
2987
2988static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2989                                const struct cntr_entry *entry,
2990                                void *context, int vl, int mode, u64 data)
2991{
2992        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2993
2994        return dd->send_pio_err_status_cnt[14];
2995}
2996
2997static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2998                                const struct cntr_entry *entry,
2999                                void *context, int vl, int mode, u64 data)
3000{
3001        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3002
3003        return dd->send_pio_err_status_cnt[13];
3004}
3005
3006static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3007                                const struct cntr_entry *entry,
3008                                void *context, int vl, int mode, u64 data)
3009{
3010        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3011
3012        return dd->send_pio_err_status_cnt[12];
3013}
3014
3015static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3016                                const struct cntr_entry *entry,
3017                                void *context, int vl, int mode, u64 data)
3018{
3019        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3020
3021        return dd->send_pio_err_status_cnt[11];
3022}
3023
3024static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3025                                const struct cntr_entry *entry,
3026                                void *context, int vl, int mode, u64 data)
3027{
3028        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3029
3030        return dd->send_pio_err_status_cnt[10];
3031}
3032
3033static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3034                                const struct cntr_entry *entry,
3035                                void *context, int vl, int mode, u64 data)
3036{
3037        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3038
3039        return dd->send_pio_err_status_cnt[9];
3040}
3041
3042static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3043                                const struct cntr_entry *entry,
3044                                void *context, int vl, int mode, u64 data)
3045{
3046        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047
3048        return dd->send_pio_err_status_cnt[8];
3049}
3050
3051static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3052                                const struct cntr_entry *entry,
3053                                void *context, int vl, int mode, u64 data)
3054{
3055        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056
3057        return dd->send_pio_err_status_cnt[7];
3058}
3059
3060static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3061                                              void *context, int vl, int mode,
3062                                              u64 data)
3063{
3064        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065
3066        return dd->send_pio_err_status_cnt[6];
3067}
3068
3069static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3070                                              void *context, int vl, int mode,
3071                                              u64 data)
3072{
3073        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075        return dd->send_pio_err_status_cnt[5];
3076}
3077
3078static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3079                                           void *context, int vl, int mode,
3080                                           u64 data)
3081{
3082        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3083
3084        return dd->send_pio_err_status_cnt[4];
3085}
3086
3087static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3088                                           void *context, int vl, int mode,
3089                                           u64 data)
3090{
3091        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3092
3093        return dd->send_pio_err_status_cnt[3];
3094}
3095
3096static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3097                                         void *context, int vl, int mode,
3098                                         u64 data)
3099{
3100        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3101
3102        return dd->send_pio_err_status_cnt[2];
3103}
3104
3105static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3106                                                void *context, int vl,
3107                                                int mode, u64 data)
3108{
3109        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3110
3111        return dd->send_pio_err_status_cnt[1];
3112}
3113
3114static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3115                                             void *context, int vl, int mode,
3116                                             u64 data)
3117{
3118        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119
3120        return dd->send_pio_err_status_cnt[0];
3121}
3122
3123/*
3124 * Software counters corresponding to each of the
3125 * error status bits within SendDmaErrStatus
3126 */
3127static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3128                                const struct cntr_entry *entry,
3129                                void *context, int vl, int mode, u64 data)
3130{
3131        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3132
3133        return dd->send_dma_err_status_cnt[3];
3134}
3135
3136static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3137                                const struct cntr_entry *entry,
3138                                void *context, int vl, int mode, u64 data)
3139{
3140        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3141
3142        return dd->send_dma_err_status_cnt[2];
3143}
3144
3145static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3146                                          void *context, int vl, int mode,
3147                                          u64 data)
3148{
3149        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3150
3151        return dd->send_dma_err_status_cnt[1];
3152}
3153
3154static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3155                                       void *context, int vl, int mode,
3156                                       u64 data)
3157{
3158        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159
3160        return dd->send_dma_err_status_cnt[0];
3161}
3162
3163/*
3164 * Software counters corresponding to each of the
3165 * error status bits within SendEgressErrStatus
3166 */
3167static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3168                                const struct cntr_entry *entry,
3169                                void *context, int vl, int mode, u64 data)
3170{
3171        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3172
3173        return dd->send_egress_err_status_cnt[63];
3174}
3175
3176static u64 access_tx_read_sdma_memory_csr_err_cnt(
3177                                const struct cntr_entry *entry,
3178                                void *context, int vl, int mode, u64 data)
3179{
3180        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3181
3182        return dd->send_egress_err_status_cnt[62];
3183}
3184
3185static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3186                                             void *context, int vl, int mode,
3187                                             u64 data)
3188{
3189        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3190
3191        return dd->send_egress_err_status_cnt[61];
3192}
3193
3194static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3195                                                 void *context, int vl,
3196                                                 int mode, u64 data)
3197{
3198        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3199
3200        return dd->send_egress_err_status_cnt[60];
3201}
3202
3203static u64 access_tx_read_sdma_memory_cor_err_cnt(
3204                                const struct cntr_entry *entry,
3205                                void *context, int vl, int mode, u64 data)
3206{
3207        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3208
3209        return dd->send_egress_err_status_cnt[59];
3210}
3211
3212static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3213                                        void *context, int vl, int mode,
3214                                        u64 data)
3215{
3216        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3217
3218        return dd->send_egress_err_status_cnt[58];
3219}
3220
3221static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3222                                            void *context, int vl, int mode,
3223                                            u64 data)
3224{
3225        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3226
3227        return dd->send_egress_err_status_cnt[57];
3228}
3229
3230static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3231                                              void *context, int vl, int mode,
3232                                              u64 data)
3233{
3234        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3235
3236        return dd->send_egress_err_status_cnt[56];
3237}
3238
3239static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3240                                              void *context, int vl, int mode,
3241                                              u64 data)
3242{
3243        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3244
3245        return dd->send_egress_err_status_cnt[55];
3246}
3247
3248static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3249                                              void *context, int vl, int mode,
3250                                              u64 data)
3251{
3252        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3253
3254        return dd->send_egress_err_status_cnt[54];
3255}
3256
3257static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3258                                              void *context, int vl, int mode,
3259                                              u64 data)
3260{
3261        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3262
3263        return dd->send_egress_err_status_cnt[53];
3264}
3265
3266static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3267                                              void *context, int vl, int mode,
3268                                              u64 data)
3269{
3270        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3271
3272        return dd->send_egress_err_status_cnt[52];
3273}
3274
3275static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3276                                              void *context, int vl, int mode,
3277                                              u64 data)
3278{
3279        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3280
3281        return dd->send_egress_err_status_cnt[51];
3282}
3283
3284static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3285                                              void *context, int vl, int mode,
3286                                              u64 data)
3287{
3288        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3289
3290        return dd->send_egress_err_status_cnt[50];
3291}
3292
3293static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3294                                              void *context, int vl, int mode,
3295                                              u64 data)
3296{
3297        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3298
3299        return dd->send_egress_err_status_cnt[49];
3300}
3301
3302static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3303                                              void *context, int vl, int mode,
3304                                              u64 data)
3305{
3306        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3307
3308        return dd->send_egress_err_status_cnt[48];
3309}
3310
3311static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3312                                              void *context, int vl, int mode,
3313                                              u64 data)
3314{
3315        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3316
3317        return dd->send_egress_err_status_cnt[47];
3318}
3319
3320static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3321                                            void *context, int vl, int mode,
3322                                            u64 data)
3323{
3324        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3325
3326        return dd->send_egress_err_status_cnt[46];
3327}
3328
3329static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3330                                             void *context, int vl, int mode,
3331                                             u64 data)
3332{
3333        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3334
3335        return dd->send_egress_err_status_cnt[45];
3336}
3337
3338static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3339                                                 void *context, int vl,
3340                                                 int mode, u64 data)
3341{
3342        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3343
3344        return dd->send_egress_err_status_cnt[44];
3345}
3346
3347static u64 access_tx_read_sdma_memory_unc_err_cnt(
3348                                const struct cntr_entry *entry,
3349                                void *context, int vl, int mode, u64 data)
3350{
3351        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3352
3353        return dd->send_egress_err_status_cnt[43];
3354}
3355
3356static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3357                                        void *context, int vl, int mode,
3358                                        u64 data)
3359{
3360        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3361
3362        return dd->send_egress_err_status_cnt[42];
3363}
3364
3365static u64 access_tx_credit_return_partiy_err_cnt(
3366                                const struct cntr_entry *entry,
3367                                void *context, int vl, int mode, u64 data)
3368{
3369        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3370
3371        return dd->send_egress_err_status_cnt[41];
3372}
3373
3374static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3375                                const struct cntr_entry *entry,
3376                                void *context, int vl, int mode, u64 data)
3377{
3378        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3379
3380        return dd->send_egress_err_status_cnt[40];
3381}
3382
3383static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3384                                const struct cntr_entry *entry,
3385                                void *context, int vl, int mode, u64 data)
3386{
3387        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3388
3389        return dd->send_egress_err_status_cnt[39];
3390}
3391
3392static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3393                                const struct cntr_entry *entry,
3394                                void *context, int vl, int mode, u64 data)
3395{
3396        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3397
3398        return dd->send_egress_err_status_cnt[38];
3399}
3400
3401static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3402                                const struct cntr_entry *entry,
3403                                void *context, int vl, int mode, u64 data)
3404{
3405        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3406
3407        return dd->send_egress_err_status_cnt[37];
3408}
3409
3410static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3411                                const struct cntr_entry *entry,
3412                                void *context, int vl, int mode, u64 data)
3413{
3414        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3415
3416        return dd->send_egress_err_status_cnt[36];
3417}
3418
3419static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3420                                const struct cntr_entry *entry,
3421                                void *context, int vl, int mode, u64 data)
3422{
3423        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3424
3425        return dd->send_egress_err_status_cnt[35];
3426}
3427
3428static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3429                                const struct cntr_entry *entry,
3430                                void *context, int vl, int mode, u64 data)
3431{
3432        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3433
3434        return dd->send_egress_err_status_cnt[34];
3435}
3436
3437static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3438                                const struct cntr_entry *entry,
3439                                void *context, int vl, int mode, u64 data)
3440{
3441        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3442
3443        return dd->send_egress_err_status_cnt[33];
3444}
3445
3446static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3447                                const struct cntr_entry *entry,
3448                                void *context, int vl, int mode, u64 data)
3449{
3450        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3451
3452        return dd->send_egress_err_status_cnt[32];
3453}
3454
3455static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3456                                const struct cntr_entry *entry,
3457                                void *context, int vl, int mode, u64 data)
3458{
3459        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3460
3461        return dd->send_egress_err_status_cnt[31];
3462}
3463
3464static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3465                                const struct cntr_entry *entry,
3466                                void *context, int vl, int mode, u64 data)
3467{
3468        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3469
3470        return dd->send_egress_err_status_cnt[30];
3471}
3472
3473static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3474                                const struct cntr_entry *entry,
3475                                void *context, int vl, int mode, u64 data)
3476{
3477        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3478
3479        return dd->send_egress_err_status_cnt[29];
3480}
3481
3482static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3483                                const struct cntr_entry *entry,
3484                                void *context, int vl, int mode, u64 data)
3485{
3486        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3487
3488        return dd->send_egress_err_status_cnt[28];
3489}
3490
3491static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3492                                const struct cntr_entry *entry,
3493                                void *context, int vl, int mode, u64 data)
3494{
3495        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3496
3497        return dd->send_egress_err_status_cnt[27];
3498}
3499
3500static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3501                                const struct cntr_entry *entry,
3502                                void *context, int vl, int mode, u64 data)
3503{
3504        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3505
3506        return dd->send_egress_err_status_cnt[26];
3507}
3508
3509static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3510                                const struct cntr_entry *entry,
3511                                void *context, int vl, int mode, u64 data)
3512{
3513        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3514
3515        return dd->send_egress_err_status_cnt[25];
3516}
3517
3518static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3519                                const struct cntr_entry *entry,
3520                                void *context, int vl, int mode, u64 data)
3521{
3522        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3523
3524        return dd->send_egress_err_status_cnt[24];
3525}
3526
3527static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3528                                const struct cntr_entry *entry,
3529                                void *context, int vl, int mode, u64 data)
3530{
3531        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3532
3533        return dd->send_egress_err_status_cnt[23];
3534}
3535
3536static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3537                                const struct cntr_entry *entry,
3538                                void *context, int vl, int mode, u64 data)
3539{
3540        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3541
3542        return dd->send_egress_err_status_cnt[22];
3543}
3544
3545static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3546                                const struct cntr_entry *entry,
3547                                void *context, int vl, int mode, u64 data)
3548{
3549        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3550
3551        return dd->send_egress_err_status_cnt[21];
3552}
3553
3554static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3555                                const struct cntr_entry *entry,
3556                                void *context, int vl, int mode, u64 data)
3557{
3558        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3559
3560        return dd->send_egress_err_status_cnt[20];
3561}
3562
3563static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3564                                const struct cntr_entry *entry,
3565                                void *context, int vl, int mode, u64 data)
3566{
3567        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3568
3569        return dd->send_egress_err_status_cnt[19];
3570}
3571
3572static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3573                                const struct cntr_entry *entry,
3574                                void *context, int vl, int mode, u64 data)
3575{
3576        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3577
3578        return dd->send_egress_err_status_cnt[18];
3579}
3580
3581static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3582                                const struct cntr_entry *entry,
3583                                void *context, int vl, int mode, u64 data)
3584{
3585        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3586
3587        return dd->send_egress_err_status_cnt[17];
3588}
3589
3590static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3591                                const struct cntr_entry *entry,
3592                                void *context, int vl, int mode, u64 data)
3593{
3594        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3595
3596        return dd->send_egress_err_status_cnt[16];
3597}
3598
3599static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3600                                           void *context, int vl, int mode,
3601                                           u64 data)
3602{
3603        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3604
3605        return dd->send_egress_err_status_cnt[15];
3606}
3607
3608static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3609                                                 void *context, int vl,
3610                                                 int mode, u64 data)
3611{
3612        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3613
3614        return dd->send_egress_err_status_cnt[14];
3615}
3616
3617static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3618                                               void *context, int vl, int mode,
3619                                               u64 data)
3620{
3621        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3622
3623        return dd->send_egress_err_status_cnt[13];
3624}
3625
3626static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3627                                        void *context, int vl, int mode,
3628                                        u64 data)
3629{
3630        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3631
3632        return dd->send_egress_err_status_cnt[12];
3633}
3634
3635static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3636                                const struct cntr_entry *entry,
3637                                void *context, int vl, int mode, u64 data)
3638{
3639        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3640
3641        return dd->send_egress_err_status_cnt[11];
3642}
3643
3644static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3645                                             void *context, int vl, int mode,
3646                                             u64 data)
3647{
3648        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3649
3650        return dd->send_egress_err_status_cnt[10];
3651}
3652
3653static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3654                                            void *context, int vl, int mode,
3655                                            u64 data)
3656{
3657        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3658
3659        return dd->send_egress_err_status_cnt[9];
3660}
3661
3662static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3663                                const struct cntr_entry *entry,
3664                                void *context, int vl, int mode, u64 data)
3665{
3666        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3667
3668        return dd->send_egress_err_status_cnt[8];
3669}
3670
3671static u64 access_tx_pio_launch_intf_parity_err_cnt(
3672                                const struct cntr_entry *entry,
3673                                void *context, int vl, int mode, u64 data)
3674{
3675        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3676
3677        return dd->send_egress_err_status_cnt[7];
3678}
3679
3680static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3681                                            void *context, int vl, int mode,
3682                                            u64 data)
3683{
3684        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685
3686        return dd->send_egress_err_status_cnt[6];
3687}
3688
3689static u64 access_tx_incorrect_link_state_err_cnt(
3690                                const struct cntr_entry *entry,
3691                                void *context, int vl, int mode, u64 data)
3692{
3693        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3694
3695        return dd->send_egress_err_status_cnt[5];
3696}
3697
3698static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3699                                      void *context, int vl, int mode,
3700                                      u64 data)
3701{
3702        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3703
3704        return dd->send_egress_err_status_cnt[4];
3705}
3706
3707static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3708                                const struct cntr_entry *entry,
3709                                void *context, int vl, int mode, u64 data)
3710{
3711        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3712
3713        return dd->send_egress_err_status_cnt[3];
3714}
3715
3716static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3717                                            void *context, int vl, int mode,
3718                                            u64 data)
3719{
3720        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721
3722        return dd->send_egress_err_status_cnt[2];
3723}
3724
3725static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3726                                const struct cntr_entry *entry,
3727                                void *context, int vl, int mode, u64 data)
3728{
3729        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3730
3731        return dd->send_egress_err_status_cnt[1];
3732}
3733
3734static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3735                                const struct cntr_entry *entry,
3736                                void *context, int vl, int mode, u64 data)
3737{
3738        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739
3740        return dd->send_egress_err_status_cnt[0];
3741}
3742
3743/*
3744 * Software counters corresponding to each of the
3745 * error status bits within SendErrStatus
3746 */
3747static u64 access_send_csr_write_bad_addr_err_cnt(
3748                                const struct cntr_entry *entry,
3749                                void *context, int vl, int mode, u64 data)
3750{
3751        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3752
3753        return dd->send_err_status_cnt[2];
3754}
3755
3756static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3757                                                 void *context, int vl,
3758                                                 int mode, u64 data)
3759{
3760        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3761
3762        return dd->send_err_status_cnt[1];
3763}
3764
3765static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3766                                      void *context, int vl, int mode,
3767                                      u64 data)
3768{
3769        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770
3771        return dd->send_err_status_cnt[0];
3772}
3773
3774/*
3775 * Software counters corresponding to each of the
3776 * error status bits within SendCtxtErrStatus
3777 */
3778static u64 access_pio_write_out_of_bounds_err_cnt(
3779                                const struct cntr_entry *entry,
3780                                void *context, int vl, int mode, u64 data)
3781{
3782        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3783
3784        return dd->sw_ctxt_err_status_cnt[4];
3785}
3786
3787static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3788                                             void *context, int vl, int mode,
3789                                             u64 data)
3790{
3791        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3792
3793        return dd->sw_ctxt_err_status_cnt[3];
3794}
3795
3796static u64 access_pio_write_crosses_boundary_err_cnt(
3797                                const struct cntr_entry *entry,
3798                                void *context, int vl, int mode, u64 data)
3799{
3800        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3801
3802        return dd->sw_ctxt_err_status_cnt[2];
3803}
3804
3805static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3806                                                void *context, int vl,
3807                                                int mode, u64 data)
3808{
3809        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3810
3811        return dd->sw_ctxt_err_status_cnt[1];
3812}
3813
3814static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3815                                               void *context, int vl, int mode,
3816                                               u64 data)
3817{
3818        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819
3820        return dd->sw_ctxt_err_status_cnt[0];
3821}
3822
3823/*
3824 * Software counters corresponding to each of the
3825 * error status bits within SendDmaEngErrStatus
3826 */
3827static u64 access_sdma_header_request_fifo_cor_err_cnt(
3828                                const struct cntr_entry *entry,
3829                                void *context, int vl, int mode, u64 data)
3830{
3831        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3832
3833        return dd->sw_send_dma_eng_err_status_cnt[23];
3834}
3835
3836static u64 access_sdma_header_storage_cor_err_cnt(
3837                                const struct cntr_entry *entry,
3838                                void *context, int vl, int mode, u64 data)
3839{
3840        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3841
3842        return dd->sw_send_dma_eng_err_status_cnt[22];
3843}
3844
3845static u64 access_sdma_packet_tracking_cor_err_cnt(
3846                                const struct cntr_entry *entry,
3847                                void *context, int vl, int mode, u64 data)
3848{
3849        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3850
3851        return dd->sw_send_dma_eng_err_status_cnt[21];
3852}
3853
3854static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3855                                            void *context, int vl, int mode,
3856                                            u64 data)
3857{
3858        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3859
3860        return dd->sw_send_dma_eng_err_status_cnt[20];
3861}
3862
3863static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3864                                              void *context, int vl, int mode,
3865                                              u64 data)
3866{
3867        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3868
3869        return dd->sw_send_dma_eng_err_status_cnt[19];
3870}
3871
3872static u64 access_sdma_header_request_fifo_unc_err_cnt(
3873                                const struct cntr_entry *entry,
3874                                void *context, int vl, int mode, u64 data)
3875{
3876        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3877
3878        return dd->sw_send_dma_eng_err_status_cnt[18];
3879}
3880
3881static u64 access_sdma_header_storage_unc_err_cnt(
3882                                const struct cntr_entry *entry,
3883                                void *context, int vl, int mode, u64 data)
3884{
3885        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3886
3887        return dd->sw_send_dma_eng_err_status_cnt[17];
3888}
3889
3890static u64 access_sdma_packet_tracking_unc_err_cnt(
3891                                const struct cntr_entry *entry,
3892                                void *context, int vl, int mode, u64 data)
3893{
3894        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3895
3896        return dd->sw_send_dma_eng_err_status_cnt[16];
3897}
3898
3899static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3900                                            void *context, int vl, int mode,
3901                                            u64 data)
3902{
3903        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3904
3905        return dd->sw_send_dma_eng_err_status_cnt[15];
3906}
3907
3908static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3909                                              void *context, int vl, int mode,
3910                                              u64 data)
3911{
3912        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3913
3914        return dd->sw_send_dma_eng_err_status_cnt[14];
3915}
3916
3917static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3918                                       void *context, int vl, int mode,
3919                                       u64 data)
3920{
3921        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3922
3923        return dd->sw_send_dma_eng_err_status_cnt[13];
3924}
3925
3926static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3927                                             void *context, int vl, int mode,
3928                                             u64 data)
3929{
3930        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3931
3932        return dd->sw_send_dma_eng_err_status_cnt[12];
3933}
3934
3935static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3936                                              void *context, int vl, int mode,
3937                                              u64 data)
3938{
3939        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3940
3941        return dd->sw_send_dma_eng_err_status_cnt[11];
3942}
3943
3944static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3945                                             void *context, int vl, int mode,
3946                                             u64 data)
3947{
3948        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3949
3950        return dd->sw_send_dma_eng_err_status_cnt[10];
3951}
3952
3953static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3954                                          void *context, int vl, int mode,
3955                                          u64 data)
3956{
3957        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3958
3959        return dd->sw_send_dma_eng_err_status_cnt[9];
3960}
3961
3962static u64 access_sdma_packet_desc_overflow_err_cnt(
3963                                const struct cntr_entry *entry,
3964                                void *context, int vl, int mode, u64 data)
3965{
3966        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3967
3968        return dd->sw_send_dma_eng_err_status_cnt[8];
3969}
3970
3971static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3972                                               void *context, int vl,
3973                                               int mode, u64 data)
3974{
3975        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3976
3977        return dd->sw_send_dma_eng_err_status_cnt[7];
3978}
3979
3980static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3981                                    void *context, int vl, int mode, u64 data)
3982{
3983        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3984
3985        return dd->sw_send_dma_eng_err_status_cnt[6];
3986}
3987
3988static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3989                                        void *context, int vl, int mode,
3990                                        u64 data)
3991{
3992        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3993
3994        return dd->sw_send_dma_eng_err_status_cnt[5];
3995}
3996
3997static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3998                                          void *context, int vl, int mode,
3999                                          u64 data)
4000{
4001        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4002
4003        return dd->sw_send_dma_eng_err_status_cnt[4];
4004}
4005
4006static u64 access_sdma_tail_out_of_bounds_err_cnt(
4007                                const struct cntr_entry *entry,
4008                                void *context, int vl, int mode, u64 data)
4009{
4010        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4011
4012        return dd->sw_send_dma_eng_err_status_cnt[3];
4013}
4014
4015static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4016                                        void *context, int vl, int mode,
4017                                        u64 data)
4018{
4019        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4020
4021        return dd->sw_send_dma_eng_err_status_cnt[2];
4022}
4023
4024static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4025                                            void *context, int vl, int mode,
4026                                            u64 data)
4027{
4028        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4029
4030        return dd->sw_send_dma_eng_err_status_cnt[1];
4031}
4032
4033static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4034                                        void *context, int vl, int mode,
4035                                        u64 data)
4036{
4037        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4038
4039        return dd->sw_send_dma_eng_err_status_cnt[0];
4040}
4041
4042static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4043                                 void *context, int vl, int mode,
4044                                 u64 data)
4045{
4046        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4047
4048        u64 val = 0;
4049        u64 csr = entry->csr;
4050
4051        val = read_write_csr(dd, csr, mode, data);
4052        if (mode == CNTR_MODE_R) {
4053                val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4054                        CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4055        } else if (mode == CNTR_MODE_W) {
4056                dd->sw_rcv_bypass_packet_errors = 0;
4057        } else {
4058                dd_dev_err(dd, "Invalid cntr register access mode");
4059                return 0;
4060        }
4061        return val;
4062}
4063
4064#define def_access_sw_cpu(cntr) \
4065static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4066                              void *context, int vl, int mode, u64 data)      \
4067{                                                                             \
4068        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4069        return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4070                              ppd->ibport_data.rvp.cntr, vl,                  \
4071                              mode, data);                                    \
4072}
4073
4074def_access_sw_cpu(rc_acks);
4075def_access_sw_cpu(rc_qacks);
4076def_access_sw_cpu(rc_delayed_comp);
4077
4078#define def_access_ibp_counter(cntr) \
4079static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4080                                void *context, int vl, int mode, u64 data)    \
4081{                                                                             \
4082        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4083                                                                              \
4084        if (vl != CNTR_INVALID_VL)                                            \
4085                return 0;                                                     \
4086                                                                              \
4087        return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4088                             mode, data);                                     \
4089}
4090
4091def_access_ibp_counter(loop_pkts);
4092def_access_ibp_counter(rc_resends);
4093def_access_ibp_counter(rnr_naks);
4094def_access_ibp_counter(other_naks);
4095def_access_ibp_counter(rc_timeouts);
4096def_access_ibp_counter(pkt_drops);
4097def_access_ibp_counter(dmawait);
4098def_access_ibp_counter(rc_seqnak);
4099def_access_ibp_counter(rc_dupreq);
4100def_access_ibp_counter(rdma_seq);
4101def_access_ibp_counter(unaligned);
4102def_access_ibp_counter(seq_naks);
4103
4104static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4105[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4106[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4107                        CNTR_NORMAL),
4108[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4109                        CNTR_NORMAL),
4110[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4111                        RCV_TID_FLOW_GEN_MISMATCH_CNT,
4112                        CNTR_NORMAL),
4113[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4114                        CNTR_NORMAL),
4115[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4116                        RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4117[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4118                        CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4119[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4120                        CNTR_NORMAL),
4121[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4122                        CNTR_NORMAL),
4123[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4124                        CNTR_NORMAL),
4125[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4126                        CNTR_NORMAL),
4127[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4128                        CNTR_NORMAL),
4129[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4130                        CNTR_NORMAL),
4131[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4132                        CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4133[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4134                        CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4135[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4136                              CNTR_SYNTH),
4137[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4138                            access_dc_rcv_err_cnt),
4139[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4140                                 CNTR_SYNTH),
4141[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4142                                  CNTR_SYNTH),
4143[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4144                                  CNTR_SYNTH),
4145[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4146                                   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4147[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4148                                  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4149                                  CNTR_SYNTH),
4150[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4151                                DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4152[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4153                               CNTR_SYNTH),
4154[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4155                              CNTR_SYNTH),
4156[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4157                               CNTR_SYNTH),
4158[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4159                                 CNTR_SYNTH),
4160[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4161                                CNTR_SYNTH),
4162[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4163                                CNTR_SYNTH),
4164[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4165                               CNTR_SYNTH),
4166[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4167                                 CNTR_SYNTH | CNTR_VL),
4168[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4169                                CNTR_SYNTH | CNTR_VL),
4170[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4171[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4172                                 CNTR_SYNTH | CNTR_VL),
4173[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4174[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4175                                 CNTR_SYNTH | CNTR_VL),
4176[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4177                              CNTR_SYNTH),
4178[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4179                                 CNTR_SYNTH | CNTR_VL),
4180[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4181                                CNTR_SYNTH),
4182[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4183                                   CNTR_SYNTH | CNTR_VL),
4184[C_DC_TOTAL_CRC] =
4185        DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4186                         CNTR_SYNTH),
4187[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4188                                  CNTR_SYNTH),
4189[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4190                                  CNTR_SYNTH),
4191[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4192                                  CNTR_SYNTH),
4193[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4194                                  CNTR_SYNTH),
4195[C_DC_CRC_MULT_LN] =
4196        DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4197                         CNTR_SYNTH),
4198[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4199                                    CNTR_SYNTH),
4200[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4201                                    CNTR_SYNTH),
4202[C_DC_SEQ_CRC_CNT] =
4203        DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4204                         CNTR_SYNTH),
4205[C_DC_ESC0_ONLY_CNT] =
4206        DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4207                         CNTR_SYNTH),
4208[C_DC_ESC0_PLUS1_CNT] =
4209        DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4210                         CNTR_SYNTH),
4211[C_DC_ESC0_PLUS2_CNT] =
4212        DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4213                         CNTR_SYNTH),
4214[C_DC_REINIT_FROM_PEER_CNT] =
4215        DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4216                         CNTR_SYNTH),
4217[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4218                                  CNTR_SYNTH),
4219[C_DC_MISC_FLG_CNT] =
4220        DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4221                         CNTR_SYNTH),
4222[C_DC_PRF_GOOD_LTP_CNT] =
4223        DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4224[C_DC_PRF_ACCEPTED_LTP_CNT] =
4225        DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4226                         CNTR_SYNTH),
4227[C_DC_PRF_RX_FLIT_CNT] =
4228        DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4229[C_DC_PRF_TX_FLIT_CNT] =
4230        DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4231[C_DC_PRF_CLK_CNTR] =
4232        DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4233[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4234        DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4235[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4236        DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4237                         CNTR_SYNTH),
4238[C_DC_PG_STS_TX_SBE_CNT] =
4239        DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4240[C_DC_PG_STS_TX_MBE_CNT] =
4241        DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4242                         CNTR_SYNTH),
4243[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4244                            access_sw_cpu_intr),
4245[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4246                            access_sw_cpu_rcv_limit),
4247[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4248                            access_sw_vtx_wait),
4249[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4250                            access_sw_pio_wait),
4251[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4252                            access_sw_pio_drain),
4253[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4254                            access_sw_kmem_wait),
4255[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4256                            access_sw_send_schedule),
4257[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4258                                      SEND_DMA_DESC_FETCHED_CNT, 0,
4259                                      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4260                                      dev_access_u32_csr),
4261[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4262                             CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4263                             access_sde_int_cnt),
4264[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4265                             CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4266                             access_sde_err_cnt),
4267[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4268                                  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4269                                  access_sde_idle_int_cnt),
4270[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4271                                      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272                                      access_sde_progress_int_cnt),
4273/* MISC_ERR_STATUS */
4274[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4275                                CNTR_NORMAL,
4276                                access_misc_pll_lock_fail_err_cnt),
4277[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4278                                CNTR_NORMAL,
4279                                access_misc_mbist_fail_err_cnt),
4280[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4281                                CNTR_NORMAL,
4282                                access_misc_invalid_eep_cmd_err_cnt),
4283[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4284                                CNTR_NORMAL,
4285                                access_misc_efuse_done_parity_err_cnt),
4286[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4287                                CNTR_NORMAL,
4288                                access_misc_efuse_write_err_cnt),
4289[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4290                                0, CNTR_NORMAL,
4291                                access_misc_efuse_read_bad_addr_err_cnt),
4292[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4293                                CNTR_NORMAL,
4294                                access_misc_efuse_csr_parity_err_cnt),
4295[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4296                                CNTR_NORMAL,
4297                                access_misc_fw_auth_failed_err_cnt),
4298[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4299                                CNTR_NORMAL,
4300                                access_misc_key_mismatch_err_cnt),
4301[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4302                                CNTR_NORMAL,
4303                                access_misc_sbus_write_failed_err_cnt),
4304[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4305                                CNTR_NORMAL,
4306                                access_misc_csr_write_bad_addr_err_cnt),
4307[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4308                                CNTR_NORMAL,
4309                                access_misc_csr_read_bad_addr_err_cnt),
4310[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4311                                CNTR_NORMAL,
4312                                access_misc_csr_parity_err_cnt),
4313/* CceErrStatus */
4314[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4315                                CNTR_NORMAL,
4316                                access_sw_cce_err_status_aggregated_cnt),
4317[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4318                                CNTR_NORMAL,
4319                                access_cce_msix_csr_parity_err_cnt),
4320[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4321                                CNTR_NORMAL,
4322                                access_cce_int_map_unc_err_cnt),
4323[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4324                                CNTR_NORMAL,
4325                                access_cce_int_map_cor_err_cnt),
4326[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4327                                CNTR_NORMAL,
4328                                access_cce_msix_table_unc_err_cnt),
4329[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4330                                CNTR_NORMAL,
4331                                access_cce_msix_table_cor_err_cnt),
4332[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4333                                0, CNTR_NORMAL,
4334                                access_cce_rxdma_conv_fifo_parity_err_cnt),
4335[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4336                                0, CNTR_NORMAL,
4337                                access_cce_rcpl_async_fifo_parity_err_cnt),
4338[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4339                                CNTR_NORMAL,
4340                                access_cce_seg_write_bad_addr_err_cnt),
4341[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4342                                CNTR_NORMAL,
4343                                access_cce_seg_read_bad_addr_err_cnt),
4344[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4345                                CNTR_NORMAL,
4346                                access_la_triggered_cnt),
4347[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4348                                CNTR_NORMAL,
4349                                access_cce_trgt_cpl_timeout_err_cnt),
4350[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4351                                CNTR_NORMAL,
4352                                access_pcic_receive_parity_err_cnt),
4353[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4354                                CNTR_NORMAL,
4355                                access_pcic_transmit_back_parity_err_cnt),
4356[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4357                                0, CNTR_NORMAL,
4358                                access_pcic_transmit_front_parity_err_cnt),
4359[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4360                                CNTR_NORMAL,
4361                                access_pcic_cpl_dat_q_unc_err_cnt),
4362[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4363                                CNTR_NORMAL,
4364                                access_pcic_cpl_hd_q_unc_err_cnt),
4365[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4366                                CNTR_NORMAL,
4367                                access_pcic_post_dat_q_unc_err_cnt),
4368[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4369                                CNTR_NORMAL,
4370                                access_pcic_post_hd_q_unc_err_cnt),
4371[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4372                                CNTR_NORMAL,
4373                                access_pcic_retry_sot_mem_unc_err_cnt),
4374[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4375                                CNTR_NORMAL,
4376                                access_pcic_retry_mem_unc_err),
4377[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4378                                CNTR_NORMAL,
4379                                access_pcic_n_post_dat_q_parity_err_cnt),
4380[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4381                                CNTR_NORMAL,
4382                                access_pcic_n_post_h_q_parity_err_cnt),
4383[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4384                                CNTR_NORMAL,
4385                                access_pcic_cpl_dat_q_cor_err_cnt),
4386[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4387                                CNTR_NORMAL,
4388                                access_pcic_cpl_hd_q_cor_err_cnt),
4389[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4390                                CNTR_NORMAL,
4391                                access_pcic_post_dat_q_cor_err_cnt),
4392[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4393                                CNTR_NORMAL,
4394                                access_pcic_post_hd_q_cor_err_cnt),
4395[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4396                                CNTR_NORMAL,
4397                                access_pcic_retry_sot_mem_cor_err_cnt),
4398[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4399                                CNTR_NORMAL,
4400                                access_pcic_retry_mem_cor_err_cnt),
4401[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4402                                "CceCli1AsyncFifoDbgParityError", 0, 0,
4403                                CNTR_NORMAL,
4404                                access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4405[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4406                                "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4407                                CNTR_NORMAL,
4408                                access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4409                                ),
4410[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4411                        "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4412                        CNTR_NORMAL,
4413                        access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4414[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4415                        "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4416                        CNTR_NORMAL,
4417                        access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4418[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4419                        0, CNTR_NORMAL,
4420                        access_cce_cli2_async_fifo_parity_err_cnt),
4421[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4422                        CNTR_NORMAL,
4423                        access_cce_csr_cfg_bus_parity_err_cnt),
4424[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4425                        0, CNTR_NORMAL,
4426                        access_cce_cli0_async_fifo_parity_err_cnt),
4427[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4428                        CNTR_NORMAL,
4429                        access_cce_rspd_data_parity_err_cnt),
4430[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4431                        CNTR_NORMAL,
4432                        access_cce_trgt_access_err_cnt),
4433[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4434                        0, CNTR_NORMAL,
4435                        access_cce_trgt_async_fifo_parity_err_cnt),
4436[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4437                        CNTR_NORMAL,
4438                        access_cce_csr_write_bad_addr_err_cnt),
4439[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4440                        CNTR_NORMAL,
4441                        access_cce_csr_read_bad_addr_err_cnt),
4442[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4443                        CNTR_NORMAL,
4444                        access_ccs_csr_parity_err_cnt),
4445
4446/* RcvErrStatus */
4447[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4448                        CNTR_NORMAL,
4449                        access_rx_csr_parity_err_cnt),
4450[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4451                        CNTR_NORMAL,
4452                        access_rx_csr_write_bad_addr_err_cnt),
4453[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4454                        CNTR_NORMAL,
4455                        access_rx_csr_read_bad_addr_err_cnt),
4456[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4457                        CNTR_NORMAL,
4458                        access_rx_dma_csr_unc_err_cnt),
4459[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4460                        CNTR_NORMAL,
4461                        access_rx_dma_dq_fsm_encoding_err_cnt),
4462[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4463                        CNTR_NORMAL,
4464                        access_rx_dma_eq_fsm_encoding_err_cnt),
4465[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4466                        CNTR_NORMAL,
4467                        access_rx_dma_csr_parity_err_cnt),
4468[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4469                        CNTR_NORMAL,
4470                        access_rx_rbuf_data_cor_err_cnt),
4471[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4472                        CNTR_NORMAL,
4473                        access_rx_rbuf_data_unc_err_cnt),
4474[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4475                        CNTR_NORMAL,
4476                        access_rx_dma_data_fifo_rd_cor_err_cnt),
4477[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4478                        CNTR_NORMAL,
4479                        access_rx_dma_data_fifo_rd_unc_err_cnt),
4480[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4481                        CNTR_NORMAL,
4482                        access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4483[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4484                        CNTR_NORMAL,
4485                        access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4486[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4487                        CNTR_NORMAL,
4488                        access_rx_rbuf_desc_part2_cor_err_cnt),
4489[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4490                        CNTR_NORMAL,
4491                        access_rx_rbuf_desc_part2_unc_err_cnt),
4492[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4493                        CNTR_NORMAL,
4494                        access_rx_rbuf_desc_part1_cor_err_cnt),
4495[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4496                        CNTR_NORMAL,
4497                        access_rx_rbuf_desc_part1_unc_err_cnt),
4498[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4499                        CNTR_NORMAL,
4500                        access_rx_hq_intr_fsm_err_cnt),
4501[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4502                        CNTR_NORMAL,
4503                        access_rx_hq_intr_csr_parity_err_cnt),
4504[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4505                        CNTR_NORMAL,
4506                        access_rx_lookup_csr_parity_err_cnt),
4507[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4508                        CNTR_NORMAL,
4509                        access_rx_lookup_rcv_array_cor_err_cnt),
4510[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4511                        CNTR_NORMAL,
4512                        access_rx_lookup_rcv_array_unc_err_cnt),
4513[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4514                        0, CNTR_NORMAL,
4515                        access_rx_lookup_des_part2_parity_err_cnt),
4516[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4517                        0, CNTR_NORMAL,
4518                        access_rx_lookup_des_part1_unc_cor_err_cnt),
4519[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4520                        CNTR_NORMAL,
4521                        access_rx_lookup_des_part1_unc_err_cnt),
4522[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4523                        CNTR_NORMAL,
4524                        access_rx_rbuf_next_free_buf_cor_err_cnt),
4525[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4526                        CNTR_NORMAL,
4527                        access_rx_rbuf_next_free_buf_unc_err_cnt),
4528[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4529                        "RxRbufFlInitWrAddrParityErr", 0, 0,
4530                        CNTR_NORMAL,
4531                        access_rbuf_fl_init_wr_addr_parity_err_cnt),
4532[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4533                        0, CNTR_NORMAL,
4534                        access_rx_rbuf_fl_initdone_parity_err_cnt),
4535[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4536                        0, CNTR_NORMAL,
4537                        access_rx_rbuf_fl_write_addr_parity_err_cnt),
4538[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4539                        CNTR_NORMAL,
4540                        access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4541[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4542                        CNTR_NORMAL,
4543                        access_rx_rbuf_empty_err_cnt),
4544[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4545                        CNTR_NORMAL,
4546                        access_rx_rbuf_full_err_cnt),
4547[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4548                        CNTR_NORMAL,
4549                        access_rbuf_bad_lookup_err_cnt),
4550[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4551                        CNTR_NORMAL,
4552                        access_rbuf_ctx_id_parity_err_cnt),
4553[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4554                        CNTR_NORMAL,
4555                        access_rbuf_csr_qeopdw_parity_err_cnt),
4556[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4557                        "RxRbufCsrQNumOfPktParityErr", 0, 0,
4558                        CNTR_NORMAL,
4559                        access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4560[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4561                        "RxRbufCsrQTlPtrParityErr", 0, 0,
4562                        CNTR_NORMAL,
4563                        access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4564[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4565                        0, CNTR_NORMAL,
4566                        access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4567[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4568                        0, CNTR_NORMAL,
4569                        access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4570[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4571                        0, 0, CNTR_NORMAL,
4572                        access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4573[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4574                        0, CNTR_NORMAL,
4575                        access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4576[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4577                        "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4578                        CNTR_NORMAL,
4579                        access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4580[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4581                        0, CNTR_NORMAL,
4582                        access_rx_rbuf_block_list_read_cor_err_cnt),
4583[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4584                        0, CNTR_NORMAL,
4585                        access_rx_rbuf_block_list_read_unc_err_cnt),
4586[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4587                        CNTR_NORMAL,
4588                        access_rx_rbuf_lookup_des_cor_err_cnt),
4589[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4590                        CNTR_NORMAL,
4591                        access_rx_rbuf_lookup_des_unc_err_cnt),
4592[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4593                        "RxRbufLookupDesRegUncCorErr", 0, 0,
4594                        CNTR_NORMAL,
4595                        access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4596[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4597                        CNTR_NORMAL,
4598                        access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4599[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4600                        CNTR_NORMAL,
4601                        access_rx_rbuf_free_list_cor_err_cnt),
4602[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4603                        CNTR_NORMAL,
4604                        access_rx_rbuf_free_list_unc_err_cnt),
4605[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4606                        CNTR_NORMAL,
4607                        access_rx_rcv_fsm_encoding_err_cnt),
4608[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4609                        CNTR_NORMAL,
4610                        access_rx_dma_flag_cor_err_cnt),
4611[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4612                        CNTR_NORMAL,
4613                        access_rx_dma_flag_unc_err_cnt),
4614[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4615                        CNTR_NORMAL,
4616                        access_rx_dc_sop_eop_parity_err_cnt),
4617[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4618                        CNTR_NORMAL,
4619                        access_rx_rcv_csr_parity_err_cnt),
4620[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4621                        CNTR_NORMAL,
4622                        access_rx_rcv_qp_map_table_cor_err_cnt),
4623[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4624                        CNTR_NORMAL,
4625                        access_rx_rcv_qp_map_table_unc_err_cnt),
4626[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4627                        CNTR_NORMAL,
4628                        access_rx_rcv_data_cor_err_cnt),
4629[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4630                        CNTR_NORMAL,
4631                        access_rx_rcv_data_unc_err_cnt),
4632[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4633                        CNTR_NORMAL,
4634                        access_rx_rcv_hdr_cor_err_cnt),
4635[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4636                        CNTR_NORMAL,
4637                        access_rx_rcv_hdr_unc_err_cnt),
4638[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4639                        CNTR_NORMAL,
4640                        access_rx_dc_intf_parity_err_cnt),
4641[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4642                        CNTR_NORMAL,
4643                        access_rx_dma_csr_cor_err_cnt),
4644/* SendPioErrStatus */
4645[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4646                        CNTR_NORMAL,
4647                        access_pio_pec_sop_head_parity_err_cnt),
4648[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4649                        CNTR_NORMAL,
4650                        access_pio_pcc_sop_head_parity_err_cnt),
4651[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4652                        0, 0, CNTR_NORMAL,
4653                        access_pio_last_returned_cnt_parity_err_cnt),
4654[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4655                        0, CNTR_NORMAL,
4656                        access_pio_current_free_cnt_parity_err_cnt),
4657[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4658                        CNTR_NORMAL,
4659                        access_pio_reserved_31_err_cnt),
4660[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4661                        CNTR_NORMAL,
4662                        access_pio_reserved_30_err_cnt),
4663[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4664                        CNTR_NORMAL,
4665                        access_pio_ppmc_sop_len_err_cnt),
4666[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4667                        CNTR_NORMAL,
4668                        access_pio_ppmc_bqc_mem_parity_err_cnt),
4669[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4670                        CNTR_NORMAL,
4671                        access_pio_vl_fifo_parity_err_cnt),
4672[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4673                        CNTR_NORMAL,
4674                        access_pio_vlf_sop_parity_err_cnt),
4675[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4676                        CNTR_NORMAL,
4677                        access_pio_vlf_v1_len_parity_err_cnt),
4678[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4679                        CNTR_NORMAL,
4680                        access_pio_block_qw_count_parity_err_cnt),
4681[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4682                        CNTR_NORMAL,
4683                        access_pio_write_qw_valid_parity_err_cnt),
4684[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4685                        CNTR_NORMAL,
4686                        access_pio_state_machine_err_cnt),
4687[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4688                        CNTR_NORMAL,
4689                        access_pio_write_data_parity_err_cnt),
4690[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4691                        CNTR_NORMAL,
4692                        access_pio_host_addr_mem_cor_err_cnt),
4693[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4694                        CNTR_NORMAL,
4695                        access_pio_host_addr_mem_unc_err_cnt),
4696[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4697                        CNTR_NORMAL,
4698                        access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4699[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4700                        CNTR_NORMAL,
4701                        access_pio_init_sm_in_err_cnt),
4702[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4703                        CNTR_NORMAL,
4704                        access_pio_ppmc_pbl_fifo_err_cnt),
4705[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4706                        0, CNTR_NORMAL,
4707                        access_pio_credit_ret_fifo_parity_err_cnt),
4708[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4709                        CNTR_NORMAL,
4710                        access_pio_v1_len_mem_bank1_cor_err_cnt),
4711[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4712                        CNTR_NORMAL,
4713                        access_pio_v1_len_mem_bank0_cor_err_cnt),
4714[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4715                        CNTR_NORMAL,
4716                        access_pio_v1_len_mem_bank1_unc_err_cnt),
4717[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4718                        CNTR_NORMAL,
4719                        access_pio_v1_len_mem_bank0_unc_err_cnt),
4720[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4721                        CNTR_NORMAL,
4722                        access_pio_sm_pkt_reset_parity_err_cnt),
4723[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4724                        CNTR_NORMAL,
4725                        access_pio_pkt_evict_fifo_parity_err_cnt),
4726[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4727                        "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4728                        CNTR_NORMAL,
4729                        access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4730[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4731                        CNTR_NORMAL,
4732                        access_pio_sbrdctl_crrel_parity_err_cnt),
4733[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4734                        CNTR_NORMAL,
4735                        access_pio_pec_fifo_parity_err_cnt),
4736[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4737                        CNTR_NORMAL,
4738                        access_pio_pcc_fifo_parity_err_cnt),
4739[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4740                        CNTR_NORMAL,
4741                        access_pio_sb_mem_fifo1_err_cnt),
4742[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4743                        CNTR_NORMAL,
4744                        access_pio_sb_mem_fifo0_err_cnt),
4745[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4746                        CNTR_NORMAL,
4747                        access_pio_csr_parity_err_cnt),
4748[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4749                        CNTR_NORMAL,
4750                        access_pio_write_addr_parity_err_cnt),
4751[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4752                        CNTR_NORMAL,
4753                        access_pio_write_bad_ctxt_err_cnt),
4754/* SendDmaErrStatus */
4755[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4756                        0, CNTR_NORMAL,
4757                        access_sdma_pcie_req_tracking_cor_err_cnt),
4758[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4759                        0, CNTR_NORMAL,
4760                        access_sdma_pcie_req_tracking_unc_err_cnt),
4761[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4762                        CNTR_NORMAL,
4763                        access_sdma_csr_parity_err_cnt),
4764[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4765                        CNTR_NORMAL,
4766                        access_sdma_rpy_tag_err_cnt),
4767/* SendEgressErrStatus */
4768[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4769                        CNTR_NORMAL,
4770                        access_tx_read_pio_memory_csr_unc_err_cnt),
4771[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4772                        0, CNTR_NORMAL,
4773                        access_tx_read_sdma_memory_csr_err_cnt),
4774[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4775                        CNTR_NORMAL,
4776                        access_tx_egress_fifo_cor_err_cnt),
4777[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4778                        CNTR_NORMAL,
4779                        access_tx_read_pio_memory_cor_err_cnt),
4780[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4781                        CNTR_NORMAL,
4782                        access_tx_read_sdma_memory_cor_err_cnt),
4783[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4784                        CNTR_NORMAL,
4785                        access_tx_sb_hdr_cor_err_cnt),
4786[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4787                        CNTR_NORMAL,
4788                        access_tx_credit_overrun_err_cnt),
4789[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4790                        CNTR_NORMAL,
4791                        access_tx_launch_fifo8_cor_err_cnt),
4792[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4793                        CNTR_NORMAL,
4794                        access_tx_launch_fifo7_cor_err_cnt),
4795[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4796                        CNTR_NORMAL,
4797                        access_tx_launch_fifo6_cor_err_cnt),
4798[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4799                        CNTR_NORMAL,
4800                        access_tx_launch_fifo5_cor_err_cnt),
4801[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4802                        CNTR_NORMAL,
4803                        access_tx_launch_fifo4_cor_err_cnt),
4804[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4805                        CNTR_NORMAL,
4806                        access_tx_launch_fifo3_cor_err_cnt),
4807[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4808                        CNTR_NORMAL,
4809                        access_tx_launch_fifo2_cor_err_cnt),
4810[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4811                        CNTR_NORMAL,
4812                        access_tx_launch_fifo1_cor_err_cnt),
4813[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4814                        CNTR_NORMAL,
4815                        access_tx_launch_fifo0_cor_err_cnt),
4816[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4817                        CNTR_NORMAL,
4818                        access_tx_credit_return_vl_err_cnt),
4819[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4820                        CNTR_NORMAL,
4821                        access_tx_hcrc_insertion_err_cnt),
4822[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4823                        CNTR_NORMAL,
4824                        access_tx_egress_fifo_unc_err_cnt),
4825[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4826                        CNTR_NORMAL,
4827                        access_tx_read_pio_memory_unc_err_cnt),
4828[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4829                        CNTR_NORMAL,
4830                        access_tx_read_sdma_memory_unc_err_cnt),
4831[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4832                        CNTR_NORMAL,
4833                        access_tx_sb_hdr_unc_err_cnt),
4834[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4835                        CNTR_NORMAL,
4836                        access_tx_credit_return_partiy_err_cnt),
4837[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4838                        0, 0, CNTR_NORMAL,
4839                        access_tx_launch_fifo8_unc_or_parity_err_cnt),
4840[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4841                        0, 0, CNTR_NORMAL,
4842                        access_tx_launch_fifo7_unc_or_parity_err_cnt),
4843[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4844                        0, 0, CNTR_NORMAL,
4845                        access_tx_launch_fifo6_unc_or_parity_err_cnt),
4846[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4847                        0, 0, CNTR_NORMAL,
4848                        access_tx_launch_fifo5_unc_or_parity_err_cnt),
4849[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4850                        0, 0, CNTR_NORMAL,
4851                        access_tx_launch_fifo4_unc_or_parity_err_cnt),
4852[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4853                        0, 0, CNTR_NORMAL,
4854                        access_tx_launch_fifo3_unc_or_parity_err_cnt),
4855[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4856                        0, 0, CNTR_NORMAL,
4857                        access_tx_launch_fifo2_unc_or_parity_err_cnt),
4858[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4859                        0, 0, CNTR_NORMAL,
4860                        access_tx_launch_fifo1_unc_or_parity_err_cnt),
4861[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4862                        0, 0, CNTR_NORMAL,
4863                        access_tx_launch_fifo0_unc_or_parity_err_cnt),
4864[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4865                        0, 0, CNTR_NORMAL,
4866                        access_tx_sdma15_disallowed_packet_err_cnt),
4867[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4868                        0, 0, CNTR_NORMAL,
4869                        access_tx_sdma14_disallowed_packet_err_cnt),
4870[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4871                        0, 0, CNTR_NORMAL,
4872                        access_tx_sdma13_disallowed_packet_err_cnt),
4873[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4874                        0, 0, CNTR_NORMAL,
4875                        access_tx_sdma12_disallowed_packet_err_cnt),
4876[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4877                        0, 0, CNTR_NORMAL,
4878                        access_tx_sdma11_disallowed_packet_err_cnt),
4879[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4880                        0, 0, CNTR_NORMAL,
4881                        access_tx_sdma10_disallowed_packet_err_cnt),
4882[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4883                        0, 0, CNTR_NORMAL,
4884                        access_tx_sdma9_disallowed_packet_err_cnt),
4885[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4886                        0, 0, CNTR_NORMAL,
4887                        access_tx_sdma8_disallowed_packet_err_cnt),
4888[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4889                        0, 0, CNTR_NORMAL,
4890                        access_tx_sdma7_disallowed_packet_err_cnt),
4891[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4892                        0, 0, CNTR_NORMAL,
4893                        access_tx_sdma6_disallowed_packet_err_cnt),
4894[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4895                        0, 0, CNTR_NORMAL,
4896                        access_tx_sdma5_disallowed_packet_err_cnt),
4897[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4898                        0, 0, CNTR_NORMAL,
4899                        access_tx_sdma4_disallowed_packet_err_cnt),
4900[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4901                        0, 0, CNTR_NORMAL,
4902                        access_tx_sdma3_disallowed_packet_err_cnt),
4903[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4904                        0, 0, CNTR_NORMAL,
4905                        access_tx_sdma2_disallowed_packet_err_cnt),
4906[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4907                        0, 0, CNTR_NORMAL,
4908                        access_tx_sdma1_disallowed_packet_err_cnt),
4909[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4910                        0, 0, CNTR_NORMAL,
4911                        access_tx_sdma0_disallowed_packet_err_cnt),
4912[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4913                        CNTR_NORMAL,
4914                        access_tx_config_parity_err_cnt),
4915[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4916                        CNTR_NORMAL,
4917                        access_tx_sbrd_ctl_csr_parity_err_cnt),
4918[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4919                        CNTR_NORMAL,
4920                        access_tx_launch_csr_parity_err_cnt),
4921[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4922                        CNTR_NORMAL,
4923                        access_tx_illegal_vl_err_cnt),
4924[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4925                        "TxSbrdCtlStateMachineParityErr", 0, 0,
4926                        CNTR_NORMAL,
4927                        access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4928[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4929                        CNTR_NORMAL,
4930                        access_egress_reserved_10_err_cnt),
4931[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4932                        CNTR_NORMAL,
4933                        access_egress_reserved_9_err_cnt),
4934[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4935                        0, 0, CNTR_NORMAL,
4936                        access_tx_sdma_launch_intf_parity_err_cnt),
4937[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4938                        CNTR_NORMAL,
4939                        access_tx_pio_launch_intf_parity_err_cnt),
4940[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4941                        CNTR_NORMAL,
4942                        access_egress_reserved_6_err_cnt),
4943[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4944                        CNTR_NORMAL,
4945                        access_tx_incorrect_link_state_err_cnt),
4946[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4947                        CNTR_NORMAL,
4948                        access_tx_linkdown_err_cnt),
4949[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4950                        "EgressFifoUnderrunOrParityErr", 0, 0,
4951                        CNTR_NORMAL,
4952                        access_tx_egress_fifi_underrun_or_parity_err_cnt),
4953[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4954                        CNTR_NORMAL,
4955                        access_egress_reserved_2_err_cnt),
4956[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4957                        CNTR_NORMAL,
4958                        access_tx_pkt_integrity_mem_unc_err_cnt),
4959[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4960                        CNTR_NORMAL,
4961                        access_tx_pkt_integrity_mem_cor_err_cnt),
4962/* SendErrStatus */
4963[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4964                        CNTR_NORMAL,
4965                        access_send_csr_write_bad_addr_err_cnt),
4966[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4967                        CNTR_NORMAL,
4968                        access_send_csr_read_bad_addr_err_cnt),
4969[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4970                        CNTR_NORMAL,
4971                        access_send_csr_parity_cnt),
4972/* SendCtxtErrStatus */
4973[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4974                        CNTR_NORMAL,
4975                        access_pio_write_out_of_bounds_err_cnt),
4976[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4977                        CNTR_NORMAL,
4978                        access_pio_write_overflow_err_cnt),
4979[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4980                        0, 0, CNTR_NORMAL,
4981                        access_pio_write_crosses_boundary_err_cnt),
4982[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4983                        CNTR_NORMAL,
4984                        access_pio_disallowed_packet_err_cnt),
4985[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4986                        CNTR_NORMAL,
4987                        access_pio_inconsistent_sop_err_cnt),
4988/* SendDmaEngErrStatus */
4989[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4990                        0, 0, CNTR_NORMAL,
4991                        access_sdma_header_request_fifo_cor_err_cnt),
4992[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4993                        CNTR_NORMAL,
4994                        access_sdma_header_storage_cor_err_cnt),
4995[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4996                        CNTR_NORMAL,
4997                        access_sdma_packet_tracking_cor_err_cnt),
4998[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4999                        CNTR_NORMAL,
5000                        access_sdma_assembly_cor_err_cnt),
5001[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5002                        CNTR_NORMAL,
5003                        access_sdma_desc_table_cor_err_cnt),
5004[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5005                        0, 0, CNTR_NORMAL,
5006                        access_sdma_header_request_fifo_unc_err_cnt),
5007[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5008                        CNTR_NORMAL,
5009                        access_sdma_header_storage_unc_err_cnt),
5010[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5011                        CNTR_NORMAL,
5012                        access_sdma_packet_tracking_unc_err_cnt),
5013[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5014                        CNTR_NORMAL,
5015                        access_sdma_assembly_unc_err_cnt),
5016[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5017                        CNTR_NORMAL,
5018                        access_sdma_desc_table_unc_err_cnt),
5019[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5020                        CNTR_NORMAL,
5021                        access_sdma_timeout_err_cnt),
5022[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5023                        CNTR_NORMAL,
5024                        access_sdma_header_length_err_cnt),
5025[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5026                        CNTR_NORMAL,
5027                        access_sdma_header_address_err_cnt),
5028[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5029                        CNTR_NORMAL,
5030                        access_sdma_header_select_err_cnt),
5031[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5032                        CNTR_NORMAL,
5033                        access_sdma_reserved_9_err_cnt),
5034[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5035                        CNTR_NORMAL,
5036                        access_sdma_packet_desc_overflow_err_cnt),
5037[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5038                        CNTR_NORMAL,
5039                        access_sdma_length_mismatch_err_cnt),
5040[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5041                        CNTR_NORMAL,
5042                        access_sdma_halt_err_cnt),
5043[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5044                        CNTR_NORMAL,
5045                        access_sdma_mem_read_err_cnt),
5046[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5047                        CNTR_NORMAL,
5048                        access_sdma_first_desc_err_cnt),
5049[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5050                        CNTR_NORMAL,
5051                        access_sdma_tail_out_of_bounds_err_cnt),
5052[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5053                        CNTR_NORMAL,
5054                        access_sdma_too_long_err_cnt),
5055[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5056                        CNTR_NORMAL,
5057                        access_sdma_gen_mismatch_err_cnt),
5058[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5059                        CNTR_NORMAL,
5060                        access_sdma_wrong_dw_err_cnt),
5061};
5062
5063static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5064[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5065                        CNTR_NORMAL),
5066[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5067                        CNTR_NORMAL),
5068[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5069                        CNTR_NORMAL),
5070[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5071                        CNTR_NORMAL),
5072[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5073                        CNTR_NORMAL),
5074[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5075                        CNTR_NORMAL),
5076[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5077                        CNTR_NORMAL),
5078[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5079[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5080[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5081[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5082                                      CNTR_SYNTH | CNTR_VL),
5083[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5084                                     CNTR_SYNTH | CNTR_VL),
5085[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5086                                      CNTR_SYNTH | CNTR_VL),
5087[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5088[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5089[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5090                             access_sw_link_dn_cnt),
5091[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5092                           access_sw_link_up_cnt),
5093[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5094                                 access_sw_unknown_frame_cnt),
5095[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5096                             access_sw_xmit_discards),
5097[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5098                                CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5099                                access_sw_xmit_discards),
5100[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5101                                 access_xmit_constraint_errs),
5102[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5103                                access_rcv_constraint_errs),
5104[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5105[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5106[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5107[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5108[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5109[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5110[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5111[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5112[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5113[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5114[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5115[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5116[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5117                               access_sw_cpu_rc_acks),
5118[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5119                                access_sw_cpu_rc_qacks),
5120[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5121                                       access_sw_cpu_rc_delayed_comp),
5122[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5123[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5124[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5125[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5126[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5127[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5128[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5129[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5130[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5131[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5132[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5133[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5134[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5135[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5136[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5137[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5138[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5139[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5140[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5141[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5142[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5143[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5144[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5145[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5146[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5147[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5148[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5149[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5150[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5151[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5152[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5153[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5154[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5155[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5156[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5157[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5158[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5159[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5160[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5161[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5162[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5163[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5164[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5165[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5166[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5167[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5168[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5169[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5170[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5171[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5172[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5173[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5174[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5175[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5176[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5177[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5178[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5179[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5180[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5181[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5182[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5183[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5184[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5185[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5186[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5187[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5188[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5189[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5190[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5191[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5192[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5193[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5194[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5195[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5196[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5197[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5198[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5199[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5200[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5201[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5202};
5203
5204/* ======================================================================== */
5205
5206/* return true if this is chip revision revision a */
5207int is_ax(struct hfi1_devdata *dd)
5208{
5209        u8 chip_rev_minor =
5210                dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5211                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
5212        return (chip_rev_minor & 0xf0) == 0;
5213}
5214
5215/* return true if this is chip revision revision b */
5216int is_bx(struct hfi1_devdata *dd)
5217{
5218        u8 chip_rev_minor =
5219                dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5220                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
5221        return (chip_rev_minor & 0xF0) == 0x10;
5222}
5223
5224/*
5225 * Append string s to buffer buf.  Arguments curp and len are the current
5226 * position and remaining length, respectively.
5227 *
5228 * return 0 on success, 1 on out of room
5229 */
5230static int append_str(char *buf, char **curp, int *lenp, const char *s)
5231{
5232        char *p = *curp;
5233        int len = *lenp;
5234        int result = 0; /* success */
5235        char c;
5236
5237        /* add a comma, if first in the buffer */
5238        if (p != buf) {
5239                if (len == 0) {
5240                        result = 1; /* out of room */
5241                        goto done;
5242                }
5243                *p++ = ',';
5244                len--;
5245        }
5246
5247        /* copy the string */
5248        while ((c = *s++) != 0) {
5249                if (len == 0) {
5250                        result = 1; /* out of room */
5251                        goto done;
5252                }
5253                *p++ = c;
5254                len--;
5255        }
5256
5257done:
5258        /* write return values */
5259        *curp = p;
5260        *lenp = len;
5261
5262        return result;
5263}
5264
5265/*
5266 * Using the given flag table, print a comma separated string into
5267 * the buffer.  End in '*' if the buffer is too short.
5268 */
5269static char *flag_string(char *buf, int buf_len, u64 flags,
5270                         struct flag_table *table, int table_size)
5271{
5272        char extra[32];
5273        char *p = buf;
5274        int len = buf_len;
5275        int no_room = 0;
5276        int i;
5277
5278        /* make sure there is at least 2 so we can form "*" */
5279        if (len < 2)
5280                return "";
5281
5282        len--;  /* leave room for a nul */
5283        for (i = 0; i < table_size; i++) {
5284                if (flags & table[i].flag) {
5285                        no_room = append_str(buf, &p, &len, table[i].str);
5286                        if (no_room)
5287                                break;
5288                        flags &= ~table[i].flag;
5289                }
5290        }
5291
5292        /* any undocumented bits left? */
5293        if (!no_room && flags) {
5294                snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5295                no_room = append_str(buf, &p, &len, extra);
5296        }
5297
5298        /* add * if ran out of room */
5299        if (no_room) {
5300                /* may need to back up to add space for a '*' */
5301                if (len == 0)
5302                        --p;
5303                *p++ = '*';
5304        }
5305
5306        /* add final nul - space already allocated above */
5307        *p = 0;
5308        return buf;
5309}
5310
5311/* first 8 CCE error interrupt source names */
5312static const char * const cce_misc_names[] = {
5313        "CceErrInt",            /* 0 */
5314        "RxeErrInt",            /* 1 */
5315        "MiscErrInt",           /* 2 */
5316        "Reserved3",            /* 3 */
5317        "PioErrInt",            /* 4 */
5318        "SDmaErrInt",           /* 5 */
5319        "EgressErrInt",         /* 6 */
5320        "TxeErrInt"             /* 7 */
5321};
5322
5323/*
5324 * Return the miscellaneous error interrupt name.
5325 */
5326static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5327{
5328        if (source < ARRAY_SIZE(cce_misc_names))
5329                strncpy(buf, cce_misc_names[source], bsize);
5330        else
5331                snprintf(buf, bsize, "Reserved%u",
5332                         source + IS_GENERAL_ERR_START);
5333
5334        return buf;
5335}
5336
5337/*
5338 * Return the SDMA engine error interrupt name.
5339 */
5340static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5341{
5342        snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5343        return buf;
5344}
5345
5346/*
5347 * Return the send context error interrupt name.
5348 */
5349static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5350{
5351        snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5352        return buf;
5353}
5354
5355static const char * const various_names[] = {
5356        "PbcInt",
5357        "GpioAssertInt",
5358        "Qsfp1Int",
5359        "Qsfp2Int",
5360        "TCritInt"
5361};
5362
5363/*
5364 * Return the various interrupt name.
5365 */
5366static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5367{
5368        if (source < ARRAY_SIZE(various_names))
5369                strncpy(buf, various_names[source], bsize);
5370        else
5371                snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5372        return buf;
5373}
5374
5375/*
5376 * Return the DC interrupt name.
5377 */
5378static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5379{
5380        static const char * const dc_int_names[] = {
5381                "common",
5382                "lcb",
5383                "8051",
5384                "lbm"   /* local block merge */
5385        };
5386
5387        if (source < ARRAY_SIZE(dc_int_names))
5388                snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5389        else
5390                snprintf(buf, bsize, "DCInt%u", source);
5391        return buf;
5392}
5393
5394static const char * const sdma_int_names[] = {
5395        "SDmaInt",
5396        "SdmaIdleInt",
5397        "SdmaProgressInt",
5398};
5399
5400/*
5401 * Return the SDMA engine interrupt name.
5402 */
5403static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5404{
5405        /* what interrupt */
5406        unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5407        /* which engine */
5408        unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5409
5410        if (likely(what < 3))
5411                snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5412        else
5413                snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5414        return buf;
5415}
5416
5417/*
5418 * Return the receive available interrupt name.
5419 */
5420static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5421{
5422        snprintf(buf, bsize, "RcvAvailInt%u", source);
5423        return buf;
5424}
5425
5426/*
5427 * Return the receive urgent interrupt name.
5428 */
5429static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5430{
5431        snprintf(buf, bsize, "RcvUrgentInt%u", source);
5432        return buf;
5433}
5434
5435/*
5436 * Return the send credit interrupt name.
5437 */
5438static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5439{
5440        snprintf(buf, bsize, "SendCreditInt%u", source);
5441        return buf;
5442}
5443
5444/*
5445 * Return the reserved interrupt name.
5446 */
5447static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5448{
5449        snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5450        return buf;
5451}
5452
5453static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5454{
5455        return flag_string(buf, buf_len, flags,
5456                           cce_err_status_flags,
5457                           ARRAY_SIZE(cce_err_status_flags));
5458}
5459
5460static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5461{
5462        return flag_string(buf, buf_len, flags,
5463                           rxe_err_status_flags,
5464                           ARRAY_SIZE(rxe_err_status_flags));
5465}
5466
5467static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5468{
5469        return flag_string(buf, buf_len, flags, misc_err_status_flags,
5470                           ARRAY_SIZE(misc_err_status_flags));
5471}
5472
5473static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5474{
5475        return flag_string(buf, buf_len, flags,
5476                           pio_err_status_flags,
5477                           ARRAY_SIZE(pio_err_status_flags));
5478}
5479
5480static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5481{
5482        return flag_string(buf, buf_len, flags,
5483                           sdma_err_status_flags,
5484                           ARRAY_SIZE(sdma_err_status_flags));
5485}
5486
5487static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5488{
5489        return flag_string(buf, buf_len, flags,
5490                           egress_err_status_flags,
5491                           ARRAY_SIZE(egress_err_status_flags));
5492}
5493
5494static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5495{
5496        return flag_string(buf, buf_len, flags,
5497                           egress_err_info_flags,
5498                           ARRAY_SIZE(egress_err_info_flags));
5499}
5500
5501static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5502{
5503        return flag_string(buf, buf_len, flags,
5504                           send_err_status_flags,
5505                           ARRAY_SIZE(send_err_status_flags));
5506}
5507
5508static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5509{
5510        char buf[96];
5511        int i = 0;
5512
5513        /*
5514         * For most these errors, there is nothing that can be done except
5515         * report or record it.
5516         */
5517        dd_dev_info(dd, "CCE Error: %s\n",
5518                    cce_err_status_string(buf, sizeof(buf), reg));
5519
5520        if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5521            is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5522                /* this error requires a manual drop into SPC freeze mode */
5523                /* then a fix up */
5524                start_freeze_handling(dd->pport, FREEZE_SELF);
5525        }
5526
5527        for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5528                if (reg & (1ull << i)) {
5529                        incr_cntr64(&dd->cce_err_status_cnt[i]);
5530                        /* maintain a counter over all cce_err_status errors */
5531                        incr_cntr64(&dd->sw_cce_err_status_aggregate);
5532                }
5533        }
5534}
5535
5536/*
5537 * Check counters for receive errors that do not have an interrupt
5538 * associated with them.
5539 */
5540#define RCVERR_CHECK_TIME 10
5541static void update_rcverr_timer(unsigned long opaque)
5542{
5543        struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5544        struct hfi1_pportdata *ppd = dd->pport;
5545        u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5546
5547        if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5548            ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5549                dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5550                set_link_down_reason(
5551                ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5552                OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5553                queue_work(ppd->link_wq, &ppd->link_bounce_work);
5554        }
5555        dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5556
5557        mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5558}
5559
5560static int init_rcverr(struct hfi1_devdata *dd)
5561{
5562        setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5563        /* Assume the hardware counter has been reset */
5564        dd->rcv_ovfl_cnt = 0;
5565        return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5566}
5567
5568static void free_rcverr(struct hfi1_devdata *dd)
5569{
5570        if (dd->rcverr_timer.data)
5571                del_timer_sync(&dd->rcverr_timer);
5572        dd->rcverr_timer.data = 0;
5573}
5574
5575static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5576{
5577        char buf[96];
5578        int i = 0;
5579
5580        dd_dev_info(dd, "Receive Error: %s\n",
5581                    rxe_err_status_string(buf, sizeof(buf), reg));
5582
5583        if (reg & ALL_RXE_FREEZE_ERR) {
5584                int flags = 0;
5585
5586                /*
5587                 * Freeze mode recovery is disabled for the errors
5588                 * in RXE_FREEZE_ABORT_MASK
5589                 */
5590                if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5591                        flags = FREEZE_ABORT;
5592
5593                start_freeze_handling(dd->pport, flags);
5594        }
5595
5596        for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5597                if (reg & (1ull << i))
5598                        incr_cntr64(&dd->rcv_err_status_cnt[i]);
5599        }
5600}
5601
5602static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5603{
5604        char buf[96];
5605        int i = 0;
5606
5607        dd_dev_info(dd, "Misc Error: %s",
5608                    misc_err_status_string(buf, sizeof(buf), reg));
5609        for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5610                if (reg & (1ull << i))
5611                        incr_cntr64(&dd->misc_err_status_cnt[i]);
5612        }
5613}
5614
5615static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5616{
5617        char buf[96];
5618        int i = 0;
5619
5620        dd_dev_info(dd, "PIO Error: %s\n",
5621                    pio_err_status_string(buf, sizeof(buf), reg));
5622
5623        if (reg & ALL_PIO_FREEZE_ERR)
5624                start_freeze_handling(dd->pport, 0);
5625
5626        for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5627                if (reg & (1ull << i))
5628                        incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5629        }
5630}
5631
5632static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5633{
5634        char buf[96];
5635        int i = 0;
5636
5637        dd_dev_info(dd, "SDMA Error: %s\n",
5638                    sdma_err_status_string(buf, sizeof(buf), reg));
5639
5640        if (reg & ALL_SDMA_FREEZE_ERR)
5641                start_freeze_handling(dd->pport, 0);
5642
5643        for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5644                if (reg & (1ull << i))
5645                        incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5646        }
5647}
5648
5649static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5650{
5651        incr_cntr64(&ppd->port_xmit_discards);
5652}
5653
5654static void count_port_inactive(struct hfi1_devdata *dd)
5655{
5656        __count_port_discards(dd->pport);
5657}
5658
5659/*
5660 * We have had a "disallowed packet" error during egress. Determine the
5661 * integrity check which failed, and update relevant error counter, etc.
5662 *
5663 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5664 * bit of state per integrity check, and so we can miss the reason for an
5665 * egress error if more than one packet fails the same integrity check
5666 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5667 */
5668static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5669                                        int vl)
5670{
5671        struct hfi1_pportdata *ppd = dd->pport;
5672        u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5673        u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5674        char buf[96];
5675
5676        /* clear down all observed info as quickly as possible after read */
5677        write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5678
5679        dd_dev_info(dd,
5680                    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5681                    info, egress_err_info_string(buf, sizeof(buf), info), src);
5682
5683        /* Eventually add other counters for each bit */
5684        if (info & PORT_DISCARD_EGRESS_ERRS) {
5685                int weight, i;
5686
5687                /*
5688                 * Count all applicable bits as individual errors and
5689                 * attribute them to the packet that triggered this handler.
5690                 * This may not be completely accurate due to limitations
5691                 * on the available hardware error information.  There is
5692                 * a single information register and any number of error
5693                 * packets may have occurred and contributed to it before
5694                 * this routine is called.  This means that:
5695                 * a) If multiple packets with the same error occur before
5696                 *    this routine is called, earlier packets are missed.
5697                 *    There is only a single bit for each error type.
5698                 * b) Errors may not be attributed to the correct VL.
5699                 *    The driver is attributing all bits in the info register
5700                 *    to the packet that triggered this call, but bits
5701                 *    could be an accumulation of different packets with
5702                 *    different VLs.
5703                 * c) A single error packet may have multiple counts attached
5704                 *    to it.  There is no way for the driver to know if
5705                 *    multiple bits set in the info register are due to a
5706                 *    single packet or multiple packets.  The driver assumes
5707                 *    multiple packets.
5708                 */
5709                weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5710                for (i = 0; i < weight; i++) {
5711                        __count_port_discards(ppd);
5712                        if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5713                                incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5714                        else if (vl == 15)
5715                                incr_cntr64(&ppd->port_xmit_discards_vl
5716                                            [C_VL_15]);
5717                }
5718        }
5719}
5720
5721/*
5722 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5723 * register. Does it represent a 'port inactive' error?
5724 */
5725static inline int port_inactive_err(u64 posn)
5726{
5727        return (posn >= SEES(TX_LINKDOWN) &&
5728                posn <= SEES(TX_INCORRECT_LINK_STATE));
5729}
5730
5731/*
5732 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5733 * register. Does it represent a 'disallowed packet' error?
5734 */
5735static inline int disallowed_pkt_err(int posn)
5736{
5737        return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5738                posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5739}
5740
5741/*
5742 * Input value is a bit position of one of the SDMA engine disallowed
5743 * packet errors.  Return which engine.  Use of this must be guarded by
5744 * disallowed_pkt_err().
5745 */
5746static inline int disallowed_pkt_engine(int posn)
5747{
5748        return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5749}
5750
5751/*
5752 * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5753 * be done.
5754 */
5755static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5756{
5757        struct sdma_vl_map *m;
5758        int vl;
5759
5760        /* range check */
5761        if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5762                return -1;
5763
5764        rcu_read_lock();
5765        m = rcu_dereference(dd->sdma_map);
5766        vl = m->engine_to_vl[engine];
5767        rcu_read_unlock();
5768
5769        return vl;
5770}
5771
5772/*
5773 * Translate the send context (sofware index) into a VL.  Return -1 if the
5774 * translation cannot be done.
5775 */
5776static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5777{
5778        struct send_context_info *sci;
5779        struct send_context *sc;
5780        int i;
5781
5782        sci = &dd->send_contexts[sw_index];
5783
5784        /* there is no information for user (PSM) and ack contexts */
5785        if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5786                return -1;
5787
5788        sc = sci->sc;
5789        if (!sc)
5790                return -1;
5791        if (dd->vld[15].sc == sc)
5792                return 15;
5793        for (i = 0; i < num_vls; i++)
5794                if (dd->vld[i].sc == sc)
5795                        return i;
5796
5797        return -1;
5798}
5799
5800static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5801{
5802        u64 reg_copy = reg, handled = 0;
5803        char buf[96];
5804        int i = 0;
5805
5806        if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5807                start_freeze_handling(dd->pport, 0);
5808        else if (is_ax(dd) &&
5809                 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5810                 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5811                start_freeze_handling(dd->pport, 0);
5812
5813        while (reg_copy) {
5814                int posn = fls64(reg_copy);
5815                /* fls64() returns a 1-based offset, we want it zero based */
5816                int shift = posn - 1;
5817                u64 mask = 1ULL << shift;
5818
5819                if (port_inactive_err(shift)) {
5820                        count_port_inactive(dd);
5821                        handled |= mask;
5822                } else if (disallowed_pkt_err(shift)) {
5823                        int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5824
5825                        handle_send_egress_err_info(dd, vl);
5826                        handled |= mask;
5827                }
5828                reg_copy &= ~mask;
5829        }
5830
5831        reg &= ~handled;
5832
5833        if (reg)
5834                dd_dev_info(dd, "Egress Error: %s\n",
5835                            egress_err_status_string(buf, sizeof(buf), reg));
5836
5837        for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5838                if (reg & (1ull << i))
5839                        incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5840        }
5841}
5842
5843static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5844{
5845        char buf[96];
5846        int i = 0;
5847
5848        dd_dev_info(dd, "Send Error: %s\n",
5849                    send_err_status_string(buf, sizeof(buf), reg));
5850
5851        for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5852                if (reg & (1ull << i))
5853                        incr_cntr64(&dd->send_err_status_cnt[i]);
5854        }
5855}
5856
5857/*
5858 * The maximum number of times the error clear down will loop before
5859 * blocking a repeating error.  This value is arbitrary.
5860 */
5861#define MAX_CLEAR_COUNT 20
5862
5863/*
5864 * Clear and handle an error register.  All error interrupts are funneled
5865 * through here to have a central location to correctly handle single-
5866 * or multi-shot errors.
5867 *
5868 * For non per-context registers, call this routine with a context value
5869 * of 0 so the per-context offset is zero.
5870 *
5871 * If the handler loops too many times, assume that something is wrong
5872 * and can't be fixed, so mask the error bits.
5873 */
5874static void interrupt_clear_down(struct hfi1_devdata *dd,
5875                                 u32 context,
5876                                 const struct err_reg_info *eri)
5877{
5878        u64 reg;
5879        u32 count;
5880
5881        /* read in a loop until no more errors are seen */
5882        count = 0;
5883        while (1) {
5884                reg = read_kctxt_csr(dd, context, eri->status);
5885                if (reg == 0)
5886                        break;
5887                write_kctxt_csr(dd, context, eri->clear, reg);
5888                if (likely(eri->handler))
5889                        eri->handler(dd, context, reg);
5890                count++;
5891                if (count > MAX_CLEAR_COUNT) {
5892                        u64 mask;
5893
5894                        dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5895                                   eri->desc, reg);
5896                        /*
5897                         * Read-modify-write so any other masked bits
5898                         * remain masked.
5899                         */
5900                        mask = read_kctxt_csr(dd, context, eri->mask);
5901                        mask &= ~reg;
5902                        write_kctxt_csr(dd, context, eri->mask, mask);
5903                        break;
5904                }
5905        }
5906}
5907
5908/*
5909 * CCE block "misc" interrupt.  Source is < 16.
5910 */
5911static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5912{
5913        const struct err_reg_info *eri = &misc_errs[source];
5914
5915        if (eri->handler) {
5916                interrupt_clear_down(dd, 0, eri);
5917        } else {
5918                dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5919                           source);
5920        }
5921}
5922
5923static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5924{
5925        return flag_string(buf, buf_len, flags,
5926                           sc_err_status_flags,
5927                           ARRAY_SIZE(sc_err_status_flags));
5928}
5929
5930/*
5931 * Send context error interrupt.  Source (hw_context) is < 160.
5932 *
5933 * All send context errors cause the send context to halt.  The normal
5934 * clear-down mechanism cannot be used because we cannot clear the
5935 * error bits until several other long-running items are done first.
5936 * This is OK because with the context halted, nothing else is going
5937 * to happen on it anyway.
5938 */
5939static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5940                                unsigned int hw_context)
5941{
5942        struct send_context_info *sci;
5943        struct send_context *sc;
5944        char flags[96];
5945        u64 status;
5946        u32 sw_index;
5947        int i = 0;
5948
5949        sw_index = dd->hw_to_sw[hw_context];
5950        if (sw_index >= dd->num_send_contexts) {
5951                dd_dev_err(dd,
5952                           "out of range sw index %u for send context %u\n",
5953                           sw_index, hw_context);
5954                return;
5955        }
5956        sci = &dd->send_contexts[sw_index];
5957        sc = sci->sc;
5958        if (!sc) {
5959                dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5960                           sw_index, hw_context);
5961                return;
5962        }
5963
5964        /* tell the software that a halt has begun */
5965        sc_stop(sc, SCF_HALTED);
5966
5967        status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5968
5969        dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5970                    send_context_err_status_string(flags, sizeof(flags),
5971                                                   status));
5972
5973        if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5974                handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5975
5976        /*
5977         * Automatically restart halted kernel contexts out of interrupt
5978         * context.  User contexts must ask the driver to restart the context.
5979         */
5980        if (sc->type != SC_USER)
5981                queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5982
5983        /*
5984         * Update the counters for the corresponding status bits.
5985         * Note that these particular counters are aggregated over all
5986         * 160 contexts.
5987         */
5988        for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5989                if (status & (1ull << i))
5990                        incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5991        }
5992}
5993
5994static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5995                                unsigned int source, u64 status)
5996{
5997        struct sdma_engine *sde;
5998        int i = 0;
5999
6000        sde = &dd->per_sdma[source];
6001#ifdef CONFIG_SDMA_VERBOSITY
6002        dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6003                   slashstrip(__FILE__), __LINE__, __func__);
6004        dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6005                   sde->this_idx, source, (unsigned long long)status);
6006#endif
6007        sde->err_cnt++;
6008        sdma_engine_error(sde, status);
6009
6010        /*
6011        * Update the counters for the corresponding status bits.
6012        * Note that these particular counters are aggregated over
6013        * all 16 DMA engines.
6014        */
6015        for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6016                if (status & (1ull << i))
6017                        incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6018        }
6019}
6020
6021/*
6022 * CCE block SDMA error interrupt.  Source is < 16.
6023 */
6024static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6025{
6026#ifdef CONFIG_SDMA_VERBOSITY
6027        struct sdma_engine *sde = &dd->per_sdma[source];
6028
6029        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6030                   slashstrip(__FILE__), __LINE__, __func__);
6031        dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6032                   source);
6033        sdma_dumpstate(sde);
6034#endif
6035        interrupt_clear_down(dd, source, &sdma_eng_err);
6036}
6037
6038/*
6039 * CCE block "various" interrupt.  Source is < 8.
6040 */
6041static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6042{
6043        const struct err_reg_info *eri = &various_err[source];
6044
6045        /*
6046         * TCritInt cannot go through interrupt_clear_down()
6047         * because it is not a second tier interrupt. The handler
6048         * should be called directly.
6049         */
6050        if (source == TCRIT_INT_SOURCE)
6051                handle_temp_err(dd);
6052        else if (eri->handler)
6053                interrupt_clear_down(dd, 0, eri);
6054        else
6055                dd_dev_info(dd,
6056                            "%s: Unimplemented/reserved interrupt %d\n",
6057                            __func__, source);
6058}
6059
6060static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6061{
6062        /* src_ctx is always zero */
6063        struct hfi1_pportdata *ppd = dd->pport;
6064        unsigned long flags;
6065        u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6066
6067        if (reg & QSFP_HFI0_MODPRST_N) {
6068                if (!qsfp_mod_present(ppd)) {
6069                        dd_dev_info(dd, "%s: QSFP module removed\n",
6070                                    __func__);
6071
6072                        ppd->driver_link_ready = 0;
6073                        /*
6074                         * Cable removed, reset all our information about the
6075                         * cache and cable capabilities
6076                         */
6077
6078                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6079                        /*
6080                         * We don't set cache_refresh_required here as we expect
6081                         * an interrupt when a cable is inserted
6082                         */
6083                        ppd->qsfp_info.cache_valid = 0;
6084                        ppd->qsfp_info.reset_needed = 0;
6085                        ppd->qsfp_info.limiting_active = 0;
6086                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6087                                               flags);
6088                        /* Invert the ModPresent pin now to detect plug-in */
6089                        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6090                                  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6091
6092                        if ((ppd->offline_disabled_reason >
6093                          HFI1_ODR_MASK(
6094                          OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6095                          (ppd->offline_disabled_reason ==
6096                          HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6097                                ppd->offline_disabled_reason =
6098                                HFI1_ODR_MASK(
6099                                OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6100
6101                        if (ppd->host_link_state == HLS_DN_POLL) {
6102                                /*
6103                                 * The link is still in POLL. This means
6104                                 * that the normal link down processing
6105                                 * will not happen. We have to do it here
6106                                 * before turning the DC off.
6107                                 */
6108                                queue_work(ppd->link_wq, &ppd->link_down_work);
6109                        }
6110                } else {
6111                        dd_dev_info(dd, "%s: QSFP module inserted\n",
6112                                    __func__);
6113
6114                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6115                        ppd->qsfp_info.cache_valid = 0;
6116                        ppd->qsfp_info.cache_refresh_required = 1;
6117                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6118                                               flags);
6119
6120                        /*
6121                         * Stop inversion of ModPresent pin to detect
6122                         * removal of the cable
6123                         */
6124                        qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6125                        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6126                                  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6127
6128                        ppd->offline_disabled_reason =
6129                                HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6130                }
6131        }
6132
6133        if (reg & QSFP_HFI0_INT_N) {
6134                dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6135                            __func__);
6136                spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6137                ppd->qsfp_info.check_interrupt_flags = 1;
6138                spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6139        }
6140
6141        /* Schedule the QSFP work only if there is a cable attached. */
6142        if (qsfp_mod_present(ppd))
6143                queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6144}
6145
6146static int request_host_lcb_access(struct hfi1_devdata *dd)
6147{
6148        int ret;
6149
6150        ret = do_8051_command(dd, HCMD_MISC,
6151                              (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6152                              LOAD_DATA_FIELD_ID_SHIFT, NULL);
6153        if (ret != HCMD_SUCCESS) {
6154                dd_dev_err(dd, "%s: command failed with error %d\n",
6155                           __func__, ret);
6156        }
6157        return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6158}
6159
6160static int request_8051_lcb_access(struct hfi1_devdata *dd)
6161{
6162        int ret;
6163
6164        ret = do_8051_command(dd, HCMD_MISC,
6165                              (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6166                              LOAD_DATA_FIELD_ID_SHIFT, NULL);
6167        if (ret != HCMD_SUCCESS) {
6168                dd_dev_err(dd, "%s: command failed with error %d\n",
6169                           __func__, ret);
6170        }
6171        return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6172}
6173
6174/*
6175 * Set the LCB selector - allow host access.  The DCC selector always
6176 * points to the host.
6177 */
6178static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6179{
6180        write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6181                  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6182                  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6183}
6184
6185/*
6186 * Clear the LCB selector - allow 8051 access.  The DCC selector always
6187 * points to the host.
6188 */
6189static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6190{
6191        write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6192                  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6193}
6194
6195/*
6196 * Acquire LCB access from the 8051.  If the host already has access,
6197 * just increment a counter.  Otherwise, inform the 8051 that the
6198 * host is taking access.
6199 *
6200 * Returns:
6201 *      0 on success
6202 *      -EBUSY if the 8051 has control and cannot be disturbed
6203 *      -errno if unable to acquire access from the 8051
6204 */
6205int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6206{
6207        struct hfi1_pportdata *ppd = dd->pport;
6208        int ret = 0;
6209
6210        /*
6211         * Use the host link state lock so the operation of this routine
6212         * { link state check, selector change, count increment } can occur
6213         * as a unit against a link state change.  Otherwise there is a
6214         * race between the state change and the count increment.
6215         */
6216        if (sleep_ok) {
6217                mutex_lock(&ppd->hls_lock);
6218        } else {
6219                while (!mutex_trylock(&ppd->hls_lock))
6220                        udelay(1);
6221        }
6222
6223        /* this access is valid only when the link is up */
6224        if (ppd->host_link_state & HLS_DOWN) {
6225                dd_dev_info(dd, "%s: link state %s not up\n",
6226                            __func__, link_state_name(ppd->host_link_state));
6227                ret = -EBUSY;
6228                goto done;
6229        }
6230
6231        if (dd->lcb_access_count == 0) {
6232                ret = request_host_lcb_access(dd);
6233                if (ret) {
6234                        dd_dev_err(dd,
6235                                   "%s: unable to acquire LCB access, err %d\n",
6236                                   __func__, ret);
6237                        goto done;
6238                }
6239                set_host_lcb_access(dd);
6240        }
6241        dd->lcb_access_count++;
6242done:
6243        mutex_unlock(&ppd->hls_lock);
6244        return ret;
6245}
6246
6247/*
6248 * Release LCB access by decrementing the use count.  If the count is moving
6249 * from 1 to 0, inform 8051 that it has control back.
6250 *
6251 * Returns:
6252 *      0 on success
6253 *      -errno if unable to release access to the 8051
6254 */
6255int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6256{
6257        int ret = 0;
6258
6259        /*
6260         * Use the host link state lock because the acquire needed it.
6261         * Here, we only need to keep { selector change, count decrement }
6262         * as a unit.
6263         */
6264        if (sleep_ok) {
6265                mutex_lock(&dd->pport->hls_lock);
6266        } else {
6267                while (!mutex_trylock(&dd->pport->hls_lock))
6268                        udelay(1);
6269        }
6270
6271        if (dd->lcb_access_count == 0) {
6272                dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6273                           __func__);
6274                goto done;
6275        }
6276
6277        if (dd->lcb_access_count == 1) {
6278                set_8051_lcb_access(dd);
6279                ret = request_8051_lcb_access(dd);
6280                if (ret) {
6281                        dd_dev_err(dd,
6282                                   "%s: unable to release LCB access, err %d\n",
6283                                   __func__, ret);
6284                        /* restore host access if the grant didn't work */
6285                        set_host_lcb_access(dd);
6286                        goto done;
6287                }
6288        }
6289        dd->lcb_access_count--;
6290done:
6291        mutex_unlock(&dd->pport->hls_lock);
6292        return ret;
6293}
6294
6295/*
6296 * Initialize LCB access variables and state.  Called during driver load,
6297 * after most of the initialization is finished.
6298 *
6299 * The DC default is LCB access on for the host.  The driver defaults to
6300 * leaving access to the 8051.  Assign access now - this constrains the call
6301 * to this routine to be after all LCB set-up is done.  In particular, after
6302 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6303 */
6304static void init_lcb_access(struct hfi1_devdata *dd)
6305{
6306        dd->lcb_access_count = 0;
6307}
6308
6309/*
6310 * Write a response back to a 8051 request.
6311 */
6312static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6313{
6314        write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6315                  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6316                  (u64)return_code <<
6317                  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6318                  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6319}
6320
6321/*
6322 * Handle host requests from the 8051.
6323 */
6324static void handle_8051_request(struct hfi1_pportdata *ppd)
6325{
6326        struct hfi1_devdata *dd = ppd->dd;
6327        u64 reg;
6328        u16 data = 0;
6329        u8 type;
6330
6331        reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6332        if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6333                return; /* no request */
6334
6335        /* zero out COMPLETED so the response is seen */
6336        write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6337
6338        /* extract request details */
6339        type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6340                        & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6341        data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6342                        & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6343
6344        switch (type) {
6345        case HREQ_LOAD_CONFIG:
6346        case HREQ_SAVE_CONFIG:
6347        case HREQ_READ_CONFIG:
6348        case HREQ_SET_TX_EQ_ABS:
6349        case HREQ_SET_TX_EQ_REL:
6350        case HREQ_ENABLE:
6351                dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6352                            type);
6353                hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6354                break;
6355        case HREQ_CONFIG_DONE:
6356                hreq_response(dd, HREQ_SUCCESS, 0);
6357                break;
6358
6359        case HREQ_INTERFACE_TEST:
6360                hreq_response(dd, HREQ_SUCCESS, data);
6361                break;
6362        default:
6363                dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6364                hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6365                break;
6366        }
6367}
6368
6369/*
6370 * Set up allocation unit vaulue.
6371 */
6372void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6373{
6374        u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6375
6376        /* do not modify other values in the register */
6377        reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6378        reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6379        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6380}
6381
6382/*
6383 * Set up initial VL15 credits of the remote.  Assumes the rest of
6384 * the CM credit registers are zero from a previous global or credit reset.
6385 * Shared limit for VL15 will always be 0.
6386 */
6387void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6388{
6389        u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6390
6391        /* set initial values for total and shared credit limit */
6392        reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6393                 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6394
6395        /*
6396         * Set total limit to be equal to VL15 credits.
6397         * Leave shared limit at 0.
6398         */
6399        reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6400        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6401
6402        write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6403                  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6404}
6405
6406/*
6407 * Zero all credit details from the previous connection and
6408 * reset the CM manager's internal counters.
6409 */
6410void reset_link_credits(struct hfi1_devdata *dd)
6411{
6412        int i;
6413
6414        /* remove all previous VL credit limits */
6415        for (i = 0; i < TXE_NUM_DATA_VL; i++)
6416                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6417        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6418        write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6419        /* reset the CM block */
6420        pio_send_control(dd, PSC_CM_RESET);
6421        /* reset cached value */
6422        dd->vl15buf_cached = 0;
6423}
6424
6425/* convert a vCU to a CU */
6426static u32 vcu_to_cu(u8 vcu)
6427{
6428        return 1 << vcu;
6429}
6430
6431/* convert a CU to a vCU */
6432static u8 cu_to_vcu(u32 cu)
6433{
6434        return ilog2(cu);
6435}
6436
6437/* convert a vAU to an AU */
6438static u32 vau_to_au(u8 vau)
6439{
6440        return 8 * (1 << vau);
6441}
6442
6443static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6444{
6445        ppd->sm_trap_qp = 0x0;
6446        ppd->sa_qp = 0x1;
6447}
6448
6449/*
6450 * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6451 */
6452static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6453{
6454        u64 reg;
6455
6456        /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6457        write_csr(dd, DC_LCB_CFG_RUN, 0);
6458        /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6459        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6460                  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6461        /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6462        dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6463        reg = read_csr(dd, DCC_CFG_RESET);
6464        write_csr(dd, DCC_CFG_RESET, reg |
6465                  (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6466                  (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6467        (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6468        if (!abort) {
6469                udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6470                write_csr(dd, DCC_CFG_RESET, reg);
6471                write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6472        }
6473}
6474
6475/*
6476 * This routine should be called after the link has been transitioned to
6477 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6478 * reset).
6479 *
6480 * The expectation is that the caller of this routine would have taken
6481 * care of properly transitioning the link into the correct state.
6482 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6483 *       before calling this function.
6484 */
6485static void _dc_shutdown(struct hfi1_devdata *dd)
6486{
6487        lockdep_assert_held(&dd->dc8051_lock);
6488
6489        if (dd->dc_shutdown)
6490                return;
6491
6492        dd->dc_shutdown = 1;
6493        /* Shutdown the LCB */
6494        lcb_shutdown(dd, 1);
6495        /*
6496         * Going to OFFLINE would have causes the 8051 to put the
6497         * SerDes into reset already. Just need to shut down the 8051,
6498         * itself.
6499         */
6500        write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6501}
6502
6503static void dc_shutdown(struct hfi1_devdata *dd)
6504{
6505        mutex_lock(&dd->dc8051_lock);
6506        _dc_shutdown(dd);
6507        mutex_unlock(&dd->dc8051_lock);
6508}
6509
6510/*
6511 * Calling this after the DC has been brought out of reset should not
6512 * do any damage.
6513 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6514 *       before calling this function.
6515 */
6516static void _dc_start(struct hfi1_devdata *dd)
6517{
6518        lockdep_assert_held(&dd->dc8051_lock);
6519
6520        if (!dd->dc_shutdown)
6521                return;
6522
6523        /* Take the 8051 out of reset */
6524        write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6525        /* Wait until 8051 is ready */
6526        if (wait_fm_ready(dd, TIMEOUT_8051_START))
6527                dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6528                           __func__);
6529
6530        /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6531        write_csr(dd, DCC_CFG_RESET, 0x10);
6532        /* lcb_shutdown() with abort=1 does not restore these */
6533        write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6534        dd->dc_shutdown = 0;
6535}
6536
6537static void dc_start(struct hfi1_devdata *dd)
6538{
6539        mutex_lock(&dd->dc8051_lock);
6540        _dc_start(dd);
6541        mutex_unlock(&dd->dc8051_lock);
6542}
6543
6544/*
6545 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6546 */
6547static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6548{
6549        u64 rx_radr, tx_radr;
6550        u32 version;
6551
6552        if (dd->icode != ICODE_FPGA_EMULATION)
6553                return;
6554
6555        /*
6556         * These LCB defaults on emulator _s are good, nothing to do here:
6557         *      LCB_CFG_TX_FIFOS_RADR
6558         *      LCB_CFG_RX_FIFOS_RADR
6559         *      LCB_CFG_LN_DCLK
6560         *      LCB_CFG_IGNORE_LOST_RCLK
6561         */
6562        if (is_emulator_s(dd))
6563                return;
6564        /* else this is _p */
6565
6566        version = emulator_rev(dd);
6567        if (!is_ax(dd))
6568                version = 0x2d; /* all B0 use 0x2d or higher settings */
6569
6570        if (version <= 0x12) {
6571                /* release 0x12 and below */
6572
6573                /*
6574                 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6575                 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6576                 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6577                 */
6578                rx_radr =
6579                      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6580                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6581                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6582                /*
6583                 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6584                 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6585                 */
6586                tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6587        } else if (version <= 0x18) {
6588                /* release 0x13 up to 0x18 */
6589                /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6590                rx_radr =
6591                      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6592                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6593                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6594                tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6595        } else if (version == 0x19) {
6596                /* release 0x19 */
6597                /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6598                rx_radr =
6599                      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6600                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6601                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6602                tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6603        } else if (version == 0x1a) {
6604                /* release 0x1a */
6605                /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6606                rx_radr =
6607                      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6608                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6609                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6610                tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6611                write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6612        } else {
6613                /* release 0x1b and higher */
6614                /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6615                rx_radr =
6616                      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6617                    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6618                    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6619                tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6620        }
6621
6622        write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6623        /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6624        write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6625                  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6626        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6627}
6628
6629/*
6630 * Handle a SMA idle message
6631 *
6632 * This is a work-queue function outside of the interrupt.
6633 */
6634void handle_sma_message(struct work_struct *work)
6635{
6636        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6637                                                        sma_message_work);
6638        struct hfi1_devdata *dd = ppd->dd;
6639        u64 msg;
6640        int ret;
6641
6642        /*
6643         * msg is bytes 1-4 of the 40-bit idle message - the command code
6644         * is stripped off
6645         */
6646        ret = read_idle_sma(dd, &msg);
6647        if (ret)
6648                return;
6649        dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6650        /*
6651         * React to the SMA message.  Byte[1] (0 for us) is the command.
6652         */
6653        switch (msg & 0xff) {
6654        case SMA_IDLE_ARM:
6655                /*
6656                 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6657                 * State Transitions
6658                 *
6659                 * Only expected in INIT or ARMED, discard otherwise.
6660                 */
6661                if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6662                        ppd->neighbor_normal = 1;
6663                break;
6664        case SMA_IDLE_ACTIVE:
6665                /*
6666                 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6667                 * State Transitions
6668                 *
6669                 * Can activate the node.  Discard otherwise.
6670                 */
6671                if (ppd->host_link_state == HLS_UP_ARMED &&
6672                    ppd->is_active_optimize_enabled) {
6673                        ppd->neighbor_normal = 1;
6674                        ret = set_link_state(ppd, HLS_UP_ACTIVE);
6675                        if (ret)
6676                                dd_dev_err(
6677                                        dd,
6678                                        "%s: received Active SMA idle message, couldn't set link to Active\n",
6679                                        __func__);
6680                }
6681                break;
6682        default:
6683                dd_dev_err(dd,
6684                           "%s: received unexpected SMA idle message 0x%llx\n",
6685                           __func__, msg);
6686                break;
6687        }
6688}
6689
6690static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6691{
6692        u64 rcvctrl;
6693        unsigned long flags;
6694
6695        spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6696        rcvctrl = read_csr(dd, RCV_CTRL);
6697        rcvctrl |= add;
6698        rcvctrl &= ~clear;
6699        write_csr(dd, RCV_CTRL, rcvctrl);
6700        spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6701}
6702
6703static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6704{
6705        adjust_rcvctrl(dd, add, 0);
6706}
6707
6708static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6709{
6710        adjust_rcvctrl(dd, 0, clear);
6711}
6712
6713/*
6714 * Called from all interrupt handlers to start handling an SPC freeze.
6715 */
6716void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6717{
6718        struct hfi1_devdata *dd = ppd->dd;
6719        struct send_context *sc;
6720        int i;
6721
6722        if (flags & FREEZE_SELF)
6723                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6724
6725        /* enter frozen mode */
6726        dd->flags |= HFI1_FROZEN;
6727
6728        /* notify all SDMA engines that they are going into a freeze */
6729        sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6730
6731        /* do halt pre-handling on all enabled send contexts */
6732        for (i = 0; i < dd->num_send_contexts; i++) {
6733                sc = dd->send_contexts[i].sc;
6734                if (sc && (sc->flags & SCF_ENABLED))
6735                        sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6736        }
6737
6738        /* Send context are frozen. Notify user space */
6739        hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6740
6741        if (flags & FREEZE_ABORT) {
6742                dd_dev_err(dd,
6743                           "Aborted freeze recovery. Please REBOOT system\n");
6744                return;
6745        }
6746        /* queue non-interrupt handler */
6747        queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6748}
6749
6750/*
6751 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6752 * depending on the "freeze" parameter.
6753 *
6754 * No need to return an error if it times out, our only option
6755 * is to proceed anyway.
6756 */
6757static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6758{
6759        unsigned long timeout;
6760        u64 reg;
6761
6762        timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6763        while (1) {
6764                reg = read_csr(dd, CCE_STATUS);
6765                if (freeze) {
6766                        /* waiting until all indicators are set */
6767                        if ((reg & ALL_FROZE) == ALL_FROZE)
6768                                return; /* all done */
6769                } else {
6770                        /* waiting until all indicators are clear */
6771                        if ((reg & ALL_FROZE) == 0)
6772                                return; /* all done */
6773                }
6774
6775                if (time_after(jiffies, timeout)) {
6776                        dd_dev_err(dd,
6777                                   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6778                                   freeze ? "" : "un", reg & ALL_FROZE,
6779                                   freeze ? ALL_FROZE : 0ull);
6780                        return;
6781                }
6782                usleep_range(80, 120);
6783        }
6784}
6785
6786/*
6787 * Do all freeze handling for the RXE block.
6788 */
6789static void rxe_freeze(struct hfi1_devdata *dd)
6790{
6791        int i;
6792        struct hfi1_ctxtdata *rcd;
6793
6794        /* disable port */
6795        clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6796
6797        /* disable all receive contexts */
6798        for (i = 0; i < dd->num_rcv_contexts; i++) {
6799                rcd = hfi1_rcd_get_by_index(dd, i);
6800                hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6801                hfi1_rcd_put(rcd);
6802        }
6803}
6804
6805/*
6806 * Unfreeze handling for the RXE block - kernel contexts only.
6807 * This will also enable the port.  User contexts will do unfreeze
6808 * handling on a per-context basis as they call into the driver.
6809 *
6810 */
6811static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6812{
6813        u32 rcvmask;
6814        u16 i;
6815        struct hfi1_ctxtdata *rcd;
6816
6817        /* enable all kernel contexts */
6818        for (i = 0; i < dd->num_rcv_contexts; i++) {
6819                rcd = hfi1_rcd_get_by_index(dd, i);
6820
6821                /* Ensure all non-user contexts(including vnic) are enabled */
6822                if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
6823                        hfi1_rcd_put(rcd);
6824                        continue;
6825                }
6826                rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6827                /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6828                rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
6829                        HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6830                hfi1_rcvctrl(dd, rcvmask, rcd);
6831                hfi1_rcd_put(rcd);
6832        }
6833
6834        /* enable port */
6835        add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6836}
6837
6838/*
6839 * Non-interrupt SPC freeze handling.
6840 *
6841 * This is a work-queue function outside of the triggering interrupt.
6842 */
6843void handle_freeze(struct work_struct *work)
6844{
6845        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6846                                                                freeze_work);
6847        struct hfi1_devdata *dd = ppd->dd;
6848
6849        /* wait for freeze indicators on all affected blocks */
6850        wait_for_freeze_status(dd, 1);
6851
6852        /* SPC is now frozen */
6853
6854        /* do send PIO freeze steps */
6855        pio_freeze(dd);
6856
6857        /* do send DMA freeze steps */
6858        sdma_freeze(dd);
6859
6860        /* do send egress freeze steps - nothing to do */
6861
6862        /* do receive freeze steps */
6863        rxe_freeze(dd);
6864
6865        /*
6866         * Unfreeze the hardware - clear the freeze, wait for each
6867         * block's frozen bit to clear, then clear the frozen flag.
6868         */
6869        write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6870        wait_for_freeze_status(dd, 0);
6871
6872        if (is_ax(dd)) {
6873                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6874                wait_for_freeze_status(dd, 1);
6875                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6876                wait_for_freeze_status(dd, 0);
6877        }
6878
6879        /* do send PIO unfreeze steps for kernel contexts */
6880        pio_kernel_unfreeze(dd);
6881
6882        /* do send DMA unfreeze steps */
6883        sdma_unfreeze(dd);
6884
6885        /* do send egress unfreeze steps - nothing to do */
6886
6887        /* do receive unfreeze steps for kernel contexts */
6888        rxe_kernel_unfreeze(dd);
6889
6890        /*
6891         * The unfreeze procedure touches global device registers when
6892         * it disables and re-enables RXE. Mark the device unfrozen
6893         * after all that is done so other parts of the driver waiting
6894         * for the device to unfreeze don't do things out of order.
6895         *
6896         * The above implies that the meaning of HFI1_FROZEN flag is
6897         * "Device has gone into freeze mode and freeze mode handling
6898         * is still in progress."
6899         *
6900         * The flag will be removed when freeze mode processing has
6901         * completed.
6902         */
6903        dd->flags &= ~HFI1_FROZEN;
6904        wake_up(&dd->event_queue);
6905
6906        /* no longer frozen */
6907}
6908
6909/*
6910 * Handle a link up interrupt from the 8051.
6911 *
6912 * This is a work-queue function outside of the interrupt.
6913 */
6914void handle_link_up(struct work_struct *work)
6915{
6916        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6917                                                  link_up_work);
6918        struct hfi1_devdata *dd = ppd->dd;
6919
6920        set_link_state(ppd, HLS_UP_INIT);
6921
6922        /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6923        read_ltp_rtt(dd);
6924        /*
6925         * OPA specifies that certain counters are cleared on a transition
6926         * to link up, so do that.
6927         */
6928        clear_linkup_counters(dd);
6929        /*
6930         * And (re)set link up default values.
6931         */
6932        set_linkup_defaults(ppd);
6933
6934        /*
6935         * Set VL15 credits. Use cached value from verify cap interrupt.
6936         * In case of quick linkup or simulator, vl15 value will be set by
6937         * handle_linkup_change. VerifyCap interrupt handler will not be
6938         * called in those scenarios.
6939         */
6940        if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6941                set_up_vl15(dd, dd->vl15buf_cached);
6942
6943        /* enforce link speed enabled */
6944        if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6945                /* oops - current speed is not enabled, bounce */
6946                dd_dev_err(dd,
6947                           "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6948                           ppd->link_speed_active, ppd->link_speed_enabled);
6949                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6950                                     OPA_LINKDOWN_REASON_SPEED_POLICY);
6951                set_link_state(ppd, HLS_DN_OFFLINE);
6952                start_link(ppd);
6953        }
6954}
6955
6956/*
6957 * Several pieces of LNI information were cached for SMA in ppd.
6958 * Reset these on link down
6959 */
6960static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6961{
6962        ppd->neighbor_guid = 0;
6963        ppd->neighbor_port_number = 0;
6964        ppd->neighbor_type = 0;
6965        ppd->neighbor_fm_security = 0;
6966}
6967
6968static const char * const link_down_reason_strs[] = {
6969        [OPA_LINKDOWN_REASON_NONE] = "None",
6970        [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
6971        [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6972        [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6973        [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6974        [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6975        [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6976        [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6977        [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6978        [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6979        [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6980        [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6981        [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6982        [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6983        [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6984        [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6985        [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6986        [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6987        [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6988        [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6989        [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6990        [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6991        [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6992        [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6993        [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6994        [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6995        [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6996        [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6997        [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6998        [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6999        [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7000        [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7001        [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7002                                        "Excessive buffer overrun",
7003        [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7004        [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7005        [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7006        [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7007        [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7008        [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7009        [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7010        [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7011                                        "Local media not installed",
7012        [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7013        [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7014        [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7015                                        "End to end not installed",
7016        [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7017        [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7018        [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7019        [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7020        [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7021        [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7022};
7023
7024/* return the neighbor link down reason string */
7025static const char *link_down_reason_str(u8 reason)
7026{
7027        const char *str = NULL;
7028
7029        if (reason < ARRAY_SIZE(link_down_reason_strs))
7030                str = link_down_reason_strs[reason];
7031        if (!str)
7032                str = "(invalid)";
7033
7034        return str;
7035}
7036
7037/*
7038 * Handle a link down interrupt from the 8051.
7039 *
7040 * This is a work-queue function outside of the interrupt.
7041 */
7042void handle_link_down(struct work_struct *work)
7043{
7044        u8 lcl_reason, neigh_reason = 0;
7045        u8 link_down_reason;
7046        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7047                                                  link_down_work);
7048        int was_up;
7049        static const char ldr_str[] = "Link down reason: ";
7050
7051        if ((ppd->host_link_state &
7052             (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7053             ppd->port_type == PORT_TYPE_FIXED)
7054                ppd->offline_disabled_reason =
7055                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7056
7057        /* Go offline first, then deal with reading/writing through 8051 */
7058        was_up = !!(ppd->host_link_state & HLS_UP);
7059        set_link_state(ppd, HLS_DN_OFFLINE);
7060        xchg(&ppd->is_link_down_queued, 0);
7061
7062        if (was_up) {
7063                lcl_reason = 0;
7064                /* link down reason is only valid if the link was up */
7065                read_link_down_reason(ppd->dd, &link_down_reason);
7066                switch (link_down_reason) {
7067                case LDR_LINK_TRANSFER_ACTIVE_LOW:
7068                        /* the link went down, no idle message reason */
7069                        dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7070                                    ldr_str);
7071                        break;
7072                case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7073                        /*
7074                         * The neighbor reason is only valid if an idle message
7075                         * was received for it.
7076                         */
7077                        read_planned_down_reason_code(ppd->dd, &neigh_reason);
7078                        dd_dev_info(ppd->dd,
7079                                    "%sNeighbor link down message %d, %s\n",
7080                                    ldr_str, neigh_reason,
7081                                    link_down_reason_str(neigh_reason));
7082                        break;
7083                case LDR_RECEIVED_HOST_OFFLINE_REQ:
7084                        dd_dev_info(ppd->dd,
7085                                    "%sHost requested link to go offline\n",
7086                                    ldr_str);
7087                        break;
7088                default:
7089                        dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7090                                    ldr_str, link_down_reason);
7091                        break;
7092                }
7093
7094                /*
7095                 * If no reason, assume peer-initiated but missed
7096                 * LinkGoingDown idle flits.
7097                 */
7098                if (neigh_reason == 0)
7099                        lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7100        } else {
7101                /* went down while polling or going up */
7102                lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7103        }
7104
7105        set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7106
7107        /* inform the SMA when the link transitions from up to down */
7108        if (was_up && ppd->local_link_down_reason.sma == 0 &&
7109            ppd->neigh_link_down_reason.sma == 0) {
7110                ppd->local_link_down_reason.sma =
7111                                        ppd->local_link_down_reason.latest;
7112                ppd->neigh_link_down_reason.sma =
7113                                        ppd->neigh_link_down_reason.latest;
7114        }
7115
7116        reset_neighbor_info(ppd);
7117
7118        /* disable the port */
7119        clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7120
7121        /*
7122         * If there is no cable attached, turn the DC off. Otherwise,
7123         * start the link bring up.
7124         */
7125        if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7126                dc_shutdown(ppd->dd);
7127        else
7128                start_link(ppd);
7129}
7130
7131void handle_link_bounce(struct work_struct *work)
7132{
7133        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7134                                                        link_bounce_work);
7135
7136        /*
7137         * Only do something if the link is currently up.
7138         */
7139        if (ppd->host_link_state & HLS_UP) {
7140                set_link_state(ppd, HLS_DN_OFFLINE);
7141                start_link(ppd);
7142        } else {
7143                dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7144                            __func__, link_state_name(ppd->host_link_state));
7145        }
7146}
7147
7148/*
7149 * Mask conversion: Capability exchange to Port LTP.  The capability
7150 * exchange has an implicit 16b CRC that is mandatory.
7151 */
7152static int cap_to_port_ltp(int cap)
7153{
7154        int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7155
7156        if (cap & CAP_CRC_14B)
7157                port_ltp |= PORT_LTP_CRC_MODE_14;
7158        if (cap & CAP_CRC_48B)
7159                port_ltp |= PORT_LTP_CRC_MODE_48;
7160        if (cap & CAP_CRC_12B_16B_PER_LANE)
7161                port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7162
7163        return port_ltp;
7164}
7165
7166/*
7167 * Convert an OPA Port LTP mask to capability mask
7168 */
7169int port_ltp_to_cap(int port_ltp)
7170{
7171        int cap_mask = 0;
7172
7173        if (port_ltp & PORT_LTP_CRC_MODE_14)
7174                cap_mask |= CAP_CRC_14B;
7175        if (port_ltp & PORT_LTP_CRC_MODE_48)
7176                cap_mask |= CAP_CRC_48B;
7177        if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7178                cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7179
7180        return cap_mask;
7181}
7182
7183/*
7184 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7185 */
7186static int lcb_to_port_ltp(int lcb_crc)
7187{
7188        int port_ltp = 0;
7189
7190        if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7191                port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7192        else if (lcb_crc == LCB_CRC_48B)
7193                port_ltp = PORT_LTP_CRC_MODE_48;
7194        else if (lcb_crc == LCB_CRC_14B)
7195                port_ltp = PORT_LTP_CRC_MODE_14;
7196        else
7197                port_ltp = PORT_LTP_CRC_MODE_16;
7198
7199        return port_ltp;
7200}
7201
7202/*
7203 * Our neighbor has indicated that we are allowed to act as a fabric
7204 * manager, so place the full management partition key in the second
7205 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7206 * that we should already have the limited management partition key in
7207 * array element 1, and also that the port is not yet up when
7208 * add_full_mgmt_pkey() is invoked.
7209 */
7210static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7211{
7212        struct hfi1_devdata *dd = ppd->dd;
7213
7214        /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
7215        if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7216                dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7217                            __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7218        ppd->pkeys[2] = FULL_MGMT_P_KEY;
7219        (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7220        hfi1_event_pkey_change(ppd->dd, ppd->port);
7221}
7222
7223static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7224{
7225        if (ppd->pkeys[2] != 0) {
7226                ppd->pkeys[2] = 0;
7227                (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7228                hfi1_event_pkey_change(ppd->dd, ppd->port);
7229        }
7230}
7231
7232/*
7233 * Convert the given link width to the OPA link width bitmask.
7234 */
7235static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7236{
7237        switch (width) {
7238        case 0:
7239                /*
7240                 * Simulator and quick linkup do not set the width.
7241                 * Just set it to 4x without complaint.
7242                 */
7243                if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7244                        return OPA_LINK_WIDTH_4X;
7245                return 0; /* no lanes up */
7246        case 1: return OPA_LINK_WIDTH_1X;
7247        case 2: return OPA_LINK_WIDTH_2X;
7248        case 3: return OPA_LINK_WIDTH_3X;
7249        default:
7250                dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7251                            __func__, width);
7252                /* fall through */
7253        case 4: return OPA_LINK_WIDTH_4X;
7254        }
7255}
7256
7257/*
7258 * Do a population count on the bottom nibble.
7259 */
7260static const u8 bit_counts[16] = {
7261        0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7262};
7263
7264static inline u8 nibble_to_count(u8 nibble)
7265{
7266        return bit_counts[nibble & 0xf];
7267}
7268
7269/*
7270 * Read the active lane information from the 8051 registers and return
7271 * their widths.
7272 *
7273 * Active lane information is found in these 8051 registers:
7274 *      enable_lane_tx
7275 *      enable_lane_rx
7276 */
7277static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7278                            u16 *rx_width)
7279{
7280        u16 tx, rx;
7281        u8 enable_lane_rx;
7282        u8 enable_lane_tx;
7283        u8 tx_polarity_inversion;
7284        u8 rx_polarity_inversion;
7285        u8 max_rate;
7286
7287        /* read the active lanes */
7288        read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7289                         &rx_polarity_inversion, &max_rate);
7290        read_local_lni(dd, &enable_lane_rx);
7291
7292        /* convert to counts */
7293        tx = nibble_to_count(enable_lane_tx);
7294        rx = nibble_to_count(enable_lane_rx);
7295
7296        /*
7297         * Set link_speed_active here, overriding what was set in
7298         * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7299         * set the max_rate field in handle_verify_cap until v0.19.
7300         */
7301        if ((dd->icode == ICODE_RTL_SILICON) &&
7302            (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7303                /* max_rate: 0 = 12.5G, 1 = 25G */
7304                switch (max_rate) {
7305                case 0:
7306                        dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7307                        break;
7308                default:
7309                        dd_dev_err(dd,
7310                                   "%s: unexpected max rate %d, using 25Gb\n",
7311                                   __func__, (int)max_rate);
7312                        /* fall through */
7313                case 1:
7314                        dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7315                        break;
7316                }
7317        }
7318
7319        dd_dev_info(dd,
7320                    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7321                    enable_lane_tx, tx, enable_lane_rx, rx);
7322        *tx_width = link_width_to_bits(dd, tx);
7323        *rx_width = link_width_to_bits(dd, rx);
7324}
7325
7326/*
7327 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7328 * Valid after the end of VerifyCap and during LinkUp.  Does not change
7329 * after link up.  I.e. look elsewhere for downgrade information.
7330 *
7331 * Bits are:
7332 *      + bits [7:4] contain the number of active transmitters
7333 *      + bits [3:0] contain the number of active receivers
7334 * These are numbers 1 through 4 and can be different values if the
7335 * link is asymmetric.
7336 *
7337 * verify_cap_local_fm_link_width[0] retains its original value.
7338 */
7339static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7340                              u16 *rx_width)
7341{
7342        u16 widths, tx, rx;
7343        u8 misc_bits, local_flags;
7344        u16 active_tx, active_rx;
7345
7346        read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7347        tx = widths >> 12;
7348        rx = (widths >> 8) & 0xf;
7349
7350        *tx_width = link_width_to_bits(dd, tx);
7351        *rx_width = link_width_to_bits(dd, rx);
7352
7353        /* print the active widths */
7354        get_link_widths(dd, &active_tx, &active_rx);
7355}
7356
7357/*
7358 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7359 * hardware information when the link first comes up.
7360 *
7361 * The link width is not available until after VerifyCap.AllFramesReceived
7362 * (the trigger for handle_verify_cap), so this is outside that routine
7363 * and should be called when the 8051 signals linkup.
7364 */
7365void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7366{
7367        u16 tx_width, rx_width;
7368
7369        /* get end-of-LNI link widths */
7370        get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7371
7372        /* use tx_width as the link is supposed to be symmetric on link up */
7373        ppd->link_width_active = tx_width;
7374        /* link width downgrade active (LWD.A) starts out matching LW.A */
7375        ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7376        ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7377        /* per OPA spec, on link up LWD.E resets to LWD.S */
7378        ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7379        /* cache the active egress rate (units {10^6 bits/sec]) */
7380        ppd->current_egress_rate = active_egress_rate(ppd);
7381}
7382
7383/*
7384 * Handle a verify capabilities interrupt from the 8051.
7385 *
7386 * This is a work-queue function outside of the interrupt.
7387 */
7388void handle_verify_cap(struct work_struct *work)
7389{
7390        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7391                                                                link_vc_work);
7392        struct hfi1_devdata *dd = ppd->dd;
7393        u64 reg;
7394        u8 power_management;
7395        u8 continuous;
7396        u8 vcu;
7397        u8 vau;
7398        u8 z;
7399        u16 vl15buf;
7400        u16 link_widths;
7401        u16 crc_mask;
7402        u16 crc_val;
7403        u16 device_id;
7404        u16 active_tx, active_rx;
7405        u8 partner_supported_crc;
7406        u8 remote_tx_rate;
7407        u8 device_rev;
7408
7409        set_link_state(ppd, HLS_VERIFY_CAP);
7410
7411        lcb_shutdown(dd, 0);
7412        adjust_lcb_for_fpga_serdes(dd);
7413
7414        read_vc_remote_phy(dd, &power_management, &continuous);
7415        read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7416                              &partner_supported_crc);
7417        read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7418        read_remote_device_id(dd, &device_id, &device_rev);
7419        /*
7420         * And the 'MgmtAllowed' information, which is exchanged during
7421         * LNI, is also be available at this point.
7422         */
7423        read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7424        /* print the active widths */
7425        get_link_widths(dd, &active_tx, &active_rx);
7426        dd_dev_info(dd,
7427                    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7428                    (int)power_management, (int)continuous);
7429        dd_dev_info(dd,
7430                    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7431                    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7432                    (int)partner_supported_crc);
7433        dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7434                    (u32)remote_tx_rate, (u32)link_widths);
7435        dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7436                    (u32)device_id, (u32)device_rev);
7437        /*
7438         * The peer vAU value just read is the peer receiver value.  HFI does
7439         * not support a transmit vAU of 0 (AU == 8).  We advertised that
7440         * with Z=1 in the fabric capabilities sent to the peer.  The peer
7441         * will see our Z=1, and, if it advertised a vAU of 0, will move its
7442         * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7443         * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7444         * subject to the Z value exception.
7445         */
7446        if (vau == 0)
7447                vau = 1;
7448        set_up_vau(dd, vau);
7449
7450        /*
7451         * Set VL15 credits to 0 in global credit register. Cache remote VL15
7452         * credits value and wait for link-up interrupt ot set it.
7453         */
7454        set_up_vl15(dd, 0);
7455        dd->vl15buf_cached = vl15buf;
7456
7457        /* set up the LCB CRC mode */
7458        crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7459
7460        /* order is important: use the lowest bit in common */
7461        if (crc_mask & CAP_CRC_14B)
7462                crc_val = LCB_CRC_14B;
7463        else if (crc_mask & CAP_CRC_48B)
7464                crc_val = LCB_CRC_48B;
7465        else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7466                crc_val = LCB_CRC_12B_16B_PER_LANE;
7467        else
7468                crc_val = LCB_CRC_16B;
7469
7470        dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7471        write_csr(dd, DC_LCB_CFG_CRC_MODE,
7472                  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7473
7474        /* set (14b only) or clear sideband credit */
7475        reg = read_csr(dd, SEND_CM_CTRL);
7476        if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7477                write_csr(dd, SEND_CM_CTRL,
7478                          reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7479        } else {
7480                write_csr(dd, SEND_CM_CTRL,
7481                          reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7482        }
7483
7484        ppd->link_speed_active = 0;     /* invalid value */
7485        if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7486                /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7487                switch (remote_tx_rate) {
7488                case 0:
7489                        ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7490                        break;
7491                case 1:
7492                        ppd->link_speed_active = OPA_LINK_SPEED_25G;
7493                        break;
7494                }
7495        } else {
7496                /* actual rate is highest bit of the ANDed rates */
7497                u8 rate = remote_tx_rate & ppd->local_tx_rate;
7498
7499                if (rate & 2)
7500                        ppd->link_speed_active = OPA_LINK_SPEED_25G;
7501                else if (rate & 1)
7502                        ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7503        }
7504        if (ppd->link_speed_active == 0) {
7505                dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7506                           __func__, (int)remote_tx_rate);
7507                ppd->link_speed_active = OPA_LINK_SPEED_25G;
7508        }
7509
7510        /*
7511         * Cache the values of the supported, enabled, and active
7512         * LTP CRC modes to return in 'portinfo' queries. But the bit
7513         * flags that are returned in the portinfo query differ from
7514         * what's in the link_crc_mask, crc_sizes, and crc_val
7515         * variables. Convert these here.
7516         */
7517        ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7518                /* supported crc modes */
7519        ppd->port_ltp_crc_mode |=
7520                cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7521                /* enabled crc modes */
7522        ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7523                /* active crc mode */
7524
7525        /* set up the remote credit return table */
7526        assign_remote_cm_au_table(dd, vcu);
7527
7528        /*
7529         * The LCB is reset on entry to handle_verify_cap(), so this must
7530         * be applied on every link up.
7531         *
7532         * Adjust LCB error kill enable to kill the link if
7533         * these RBUF errors are seen:
7534         *      REPLAY_BUF_MBE_SMASK
7535         *      FLIT_INPUT_BUF_MBE_SMASK
7536         */
7537        if (is_ax(dd)) {                        /* fixed in B0 */
7538                reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7539                reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7540                        | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7541                write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7542        }
7543
7544        /* pull LCB fifos out of reset - all fifo clocks must be stable */
7545        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7546
7547        /* give 8051 access to the LCB CSRs */
7548        write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7549        set_8051_lcb_access(dd);
7550
7551        if (ppd->mgmt_allowed)
7552                add_full_mgmt_pkey(ppd);
7553
7554        /* tell the 8051 to go to LinkUp */
7555        set_link_state(ppd, HLS_GOING_UP);
7556}
7557
7558/*
7559 * Apply the link width downgrade enabled policy against the current active
7560 * link widths.
7561 *
7562 * Called when the enabled policy changes or the active link widths change.
7563 */
7564void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7565{
7566        int do_bounce = 0;
7567        int tries;
7568        u16 lwde;
7569        u16 tx, rx;
7570
7571        /* use the hls lock to avoid a race with actual link up */
7572        tries = 0;
7573retry:
7574        mutex_lock(&ppd->hls_lock);
7575        /* only apply if the link is up */
7576        if (ppd->host_link_state & HLS_DOWN) {
7577                /* still going up..wait and retry */
7578                if (ppd->host_link_state & HLS_GOING_UP) {
7579                        if (++tries < 1000) {
7580                                mutex_unlock(&ppd->hls_lock);
7581                                usleep_range(100, 120); /* arbitrary */
7582                                goto retry;
7583                        }
7584                        dd_dev_err(ppd->dd,
7585                                   "%s: giving up waiting for link state change\n",
7586                                   __func__);
7587                }
7588                goto done;
7589        }
7590
7591        lwde = ppd->link_width_downgrade_enabled;
7592
7593        if (refresh_widths) {
7594                get_link_widths(ppd->dd, &tx, &rx);
7595                ppd->link_width_downgrade_tx_active = tx;
7596                ppd->link_width_downgrade_rx_active = rx;
7597        }
7598
7599        if (ppd->link_width_downgrade_tx_active == 0 ||
7600            ppd->link_width_downgrade_rx_active == 0) {
7601                /* the 8051 reported a dead link as a downgrade */
7602                dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7603        } else if (lwde == 0) {
7604                /* downgrade is disabled */
7605
7606                /* bounce if not at starting active width */
7607                if ((ppd->link_width_active !=
7608                     ppd->link_width_downgrade_tx_active) ||
7609                    (ppd->link_width_active !=
7610                     ppd->link_width_downgrade_rx_active)) {
7611                        dd_dev_err(ppd->dd,
7612                                   "Link downgrade is disabled and link has downgraded, downing link\n");
7613                        dd_dev_err(ppd->dd,
7614                                   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7615                                   ppd->link_width_active,
7616                                   ppd->link_width_downgrade_tx_active,
7617                                   ppd->link_width_downgrade_rx_active);
7618                        do_bounce = 1;
7619                }
7620        } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7621                   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7622                /* Tx or Rx is outside the enabled policy */
7623                dd_dev_err(ppd->dd,
7624                           "Link is outside of downgrade allowed, downing link\n");
7625                dd_dev_err(ppd->dd,
7626                           "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7627                           lwde, ppd->link_width_downgrade_tx_active,
7628                           ppd->link_width_downgrade_rx_active);
7629                do_bounce = 1;
7630        }
7631
7632done:
7633        mutex_unlock(&ppd->hls_lock);
7634
7635        if (do_bounce) {
7636                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7637                                     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7638                set_link_state(ppd, HLS_DN_OFFLINE);
7639                start_link(ppd);
7640        }
7641}
7642
7643/*
7644 * Handle a link downgrade interrupt from the 8051.
7645 *
7646 * This is a work-queue function outside of the interrupt.
7647 */
7648void handle_link_downgrade(struct work_struct *work)
7649{
7650        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7651                                                        link_downgrade_work);
7652
7653        dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7654        apply_link_downgrade_policy(ppd, 1);
7655}
7656
7657static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7658{
7659        return flag_string(buf, buf_len, flags, dcc_err_flags,
7660                ARRAY_SIZE(dcc_err_flags));
7661}
7662
7663static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7664{
7665        return flag_string(buf, buf_len, flags, lcb_err_flags,
7666                ARRAY_SIZE(lcb_err_flags));
7667}
7668
7669static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7670{
7671        return flag_string(buf, buf_len, flags, dc8051_err_flags,
7672                ARRAY_SIZE(dc8051_err_flags));
7673}
7674
7675static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7676{
7677        return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7678                ARRAY_SIZE(dc8051_info_err_flags));
7679}
7680
7681static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7682{
7683        return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7684                ARRAY_SIZE(dc8051_info_host_msg_flags));
7685}
7686
7687static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7688{
7689        struct hfi1_pportdata *ppd = dd->pport;
7690        u64 info, err, host_msg;
7691        int queue_link_down = 0;
7692        char buf[96];
7693
7694        /* look at the flags */
7695        if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7696                /* 8051 information set by firmware */
7697                /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7698                info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7699                err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7700                        & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7701                host_msg = (info >>
7702                        DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7703                        & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7704
7705                /*
7706                 * Handle error flags.
7707                 */
7708                if (err & FAILED_LNI) {
7709                        /*
7710                         * LNI error indications are cleared by the 8051
7711                         * only when starting polling.  Only pay attention
7712                         * to them when in the states that occur during
7713                         * LNI.
7714                         */
7715                        if (ppd->host_link_state
7716                            & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7717                                queue_link_down = 1;
7718                                dd_dev_info(dd, "Link error: %s\n",
7719                                            dc8051_info_err_string(buf,
7720                                                                   sizeof(buf),
7721                                                                   err &
7722                                                                   FAILED_LNI));
7723                        }
7724                        err &= ~(u64)FAILED_LNI;
7725                }
7726                /* unknown frames can happen durning LNI, just count */
7727                if (err & UNKNOWN_FRAME) {
7728                        ppd->unknown_frame_count++;
7729                        err &= ~(u64)UNKNOWN_FRAME;
7730                }
7731                if (err) {
7732                        /* report remaining errors, but do not do anything */
7733                        dd_dev_err(dd, "8051 info error: %s\n",
7734                                   dc8051_info_err_string(buf, sizeof(buf),
7735                                                          err));
7736                }
7737
7738                /*
7739                 * Handle host message flags.
7740                 */
7741                if (host_msg & HOST_REQ_DONE) {
7742                        /*
7743                         * Presently, the driver does a busy wait for
7744                         * host requests to complete.  This is only an
7745                         * informational message.
7746                         * NOTE: The 8051 clears the host message
7747                         * information *on the next 8051 command*.
7748                         * Therefore, when linkup is achieved,
7749                         * this flag will still be set.
7750                         */
7751                        host_msg &= ~(u64)HOST_REQ_DONE;
7752                }
7753                if (host_msg & BC_SMA_MSG) {
7754                        queue_work(ppd->link_wq, &ppd->sma_message_work);
7755                        host_msg &= ~(u64)BC_SMA_MSG;
7756                }
7757                if (host_msg & LINKUP_ACHIEVED) {
7758                        dd_dev_info(dd, "8051: Link up\n");
7759                        queue_work(ppd->link_wq, &ppd->link_up_work);
7760                        host_msg &= ~(u64)LINKUP_ACHIEVED;
7761                }
7762                if (host_msg & EXT_DEVICE_CFG_REQ) {
7763                        handle_8051_request(ppd);
7764                        host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7765                }
7766                if (host_msg & VERIFY_CAP_FRAME) {
7767                        queue_work(ppd->link_wq, &ppd->link_vc_work);
7768                        host_msg &= ~(u64)VERIFY_CAP_FRAME;
7769                }
7770                if (host_msg & LINK_GOING_DOWN) {
7771                        const char *extra = "";
7772                        /* no downgrade action needed if going down */
7773                        if (host_msg & LINK_WIDTH_DOWNGRADED) {
7774                                host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7775                                extra = " (ignoring downgrade)";
7776                        }
7777                        dd_dev_info(dd, "8051: Link down%s\n", extra);
7778                        queue_link_down = 1;
7779                        host_msg &= ~(u64)LINK_GOING_DOWN;
7780                }
7781                if (host_msg & LINK_WIDTH_DOWNGRADED) {
7782                        queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7783                        host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7784                }
7785                if (host_msg) {
7786                        /* report remaining messages, but do not do anything */
7787                        dd_dev_info(dd, "8051 info host message: %s\n",
7788                                    dc8051_info_host_msg_string(buf,
7789                                                                sizeof(buf),
7790                                                                host_msg));
7791                }
7792
7793                reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7794        }
7795        if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7796                /*
7797                 * Lost the 8051 heartbeat.  If this happens, we
7798                 * receive constant interrupts about it.  Disable
7799                 * the interrupt after the first.
7800                 */
7801                dd_dev_err(dd, "Lost 8051 heartbeat\n");
7802                write_csr(dd, DC_DC8051_ERR_EN,
7803                          read_csr(dd, DC_DC8051_ERR_EN) &
7804                          ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7805
7806                reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7807        }
7808        if (reg) {
7809                /* report the error, but do not do anything */
7810                dd_dev_err(dd, "8051 error: %s\n",
7811                           dc8051_err_string(buf, sizeof(buf), reg));
7812        }
7813
7814        if (queue_link_down) {
7815                /*
7816                 * if the link is already going down or disabled, do not
7817                 * queue another. If there's a link down entry already
7818                 * queued, don't queue another one.
7819                 */
7820                if ((ppd->host_link_state &
7821                    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7822                    ppd->link_enabled == 0) {
7823                        dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7824                                    __func__, ppd->host_link_state,
7825                                    ppd->link_enabled);
7826                } else {
7827                        if (xchg(&ppd->is_link_down_queued, 1) == 1)
7828                                dd_dev_info(dd,
7829                                            "%s: link down request already queued\n",
7830                                            __func__);
7831                        else
7832                                queue_work(ppd->link_wq, &ppd->link_down_work);
7833                }
7834        }
7835}
7836
7837static const char * const fm_config_txt[] = {
7838[0] =
7839        "BadHeadDist: Distance violation between two head flits",
7840[1] =
7841        "BadTailDist: Distance violation between two tail flits",
7842[2] =
7843        "BadCtrlDist: Distance violation between two credit control flits",
7844[3] =
7845        "BadCrdAck: Credits return for unsupported VL",
7846[4] =
7847        "UnsupportedVLMarker: Received VL Marker",
7848[5] =
7849        "BadPreempt: Exceeded the preemption nesting level",
7850[6] =
7851        "BadControlFlit: Received unsupported control flit",
7852/* no 7 */
7853[8] =
7854        "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7855};
7856
7857static const char * const port_rcv_txt[] = {
7858[1] =
7859        "BadPktLen: Illegal PktLen",
7860[2] =
7861        "PktLenTooLong: Packet longer than PktLen",
7862[3] =
7863        "PktLenTooShort: Packet shorter than PktLen",
7864[4] =
7865        "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7866[5] =
7867        "BadDLID: Illegal DLID (0, doesn't match HFI)",
7868[6] =
7869        "BadL2: Illegal L2 opcode",
7870[7] =
7871        "BadSC: Unsupported SC",
7872[9] =
7873        "BadRC: Illegal RC",
7874[11] =
7875        "PreemptError: Preempting with same VL",
7876[12] =
7877        "PreemptVL15: Preempting a VL15 packet",
7878};
7879
7880#define OPA_LDR_FMCONFIG_OFFSET 16
7881#define OPA_LDR_PORTRCV_OFFSET 0
7882static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7883{
7884        u64 info, hdr0, hdr1;
7885        const char *extra;
7886        char buf[96];
7887        struct hfi1_pportdata *ppd = dd->pport;
7888        u8 lcl_reason = 0;
7889        int do_bounce = 0;
7890
7891        if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7892                if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7893                        info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7894                        dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7895                        /* set status bit */
7896                        dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7897                }
7898                reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7899        }
7900
7901        if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7902                struct hfi1_pportdata *ppd = dd->pport;
7903                /* this counter saturates at (2^32) - 1 */
7904                if (ppd->link_downed < (u32)UINT_MAX)
7905                        ppd->link_downed++;
7906                reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7907        }
7908
7909        if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7910                u8 reason_valid = 1;
7911
7912                info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7913                if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7914                        dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7915                        /* set status bit */
7916                        dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7917                }
7918                switch (info) {
7919                case 0:
7920                case 1:
7921                case 2:
7922                case 3:
7923                case 4:
7924                case 5:
7925                case 6:
7926                        extra = fm_config_txt[info];
7927                        break;
7928                case 8:
7929                        extra = fm_config_txt[info];
7930                        if (ppd->port_error_action &
7931                            OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7932                                do_bounce = 1;
7933                                /*
7934                                 * lcl_reason cannot be derived from info
7935                                 * for this error
7936                                 */
7937                                lcl_reason =
7938                                  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7939                        }
7940                        break;
7941                default:
7942                        reason_valid = 0;
7943                        snprintf(buf, sizeof(buf), "reserved%lld", info);
7944                        extra = buf;
7945                        break;
7946                }
7947
7948                if (reason_valid && !do_bounce) {
7949                        do_bounce = ppd->port_error_action &
7950                                        (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7951                        lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7952                }
7953
7954                /* just report this */
7955                dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7956                                        extra);
7957                reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7958        }
7959
7960        if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7961                u8 reason_valid = 1;
7962
7963                info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7964                hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7965                hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7966                if (!(dd->err_info_rcvport.status_and_code &
7967                      OPA_EI_STATUS_SMASK)) {
7968                        dd->err_info_rcvport.status_and_code =
7969                                info & OPA_EI_CODE_SMASK;
7970                        /* set status bit */
7971                        dd->err_info_rcvport.status_and_code |=
7972                                OPA_EI_STATUS_SMASK;
7973                        /*
7974                         * save first 2 flits in the packet that caused
7975                         * the error
7976                         */
7977                        dd->err_info_rcvport.packet_flit1 = hdr0;
7978                        dd->err_info_rcvport.packet_flit2 = hdr1;
7979                }
7980                switch (info) {
7981                case 1:
7982                case 2:
7983                case 3:
7984                case 4:
7985                case 5:
7986                case 6:
7987                case 7:
7988                case 9:
7989                case 11:
7990                case 12:
7991                        extra = port_rcv_txt[info];
7992                        break;
7993                default:
7994                        reason_valid = 0;
7995                        snprintf(buf, sizeof(buf), "reserved%lld", info);
7996                        extra = buf;
7997                        break;
7998                }
7999
8000                if (reason_valid && !do_bounce) {
8001                        do_bounce = ppd->port_error_action &
8002                                        (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8003                        lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8004                }
8005
8006                /* just report this */
8007                dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8008                                        "               hdr0 0x%llx, hdr1 0x%llx\n",
8009                                        extra, hdr0, hdr1);
8010
8011                reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8012        }
8013
8014        if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8015                /* informative only */
8016                dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8017                reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8018        }
8019        if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8020                /* informative only */
8021                dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8022                reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8023        }
8024
8025        if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8026                reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8027
8028        /* report any remaining errors */
8029        if (reg)
8030                dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8031                                        dcc_err_string(buf, sizeof(buf), reg));
8032
8033        if (lcl_reason == 0)
8034                lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8035
8036        if (do_bounce) {
8037                dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8038                                        __func__);
8039                set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8040                queue_work(ppd->link_wq, &ppd->link_bounce_work);
8041        }
8042}
8043
8044static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8045{
8046        char buf[96];
8047
8048        dd_dev_info(dd, "LCB Error: %s\n",
8049                    lcb_err_string(buf, sizeof(buf), reg));
8050}
8051
8052/*
8053 * CCE block DC interrupt.  Source is < 8.
8054 */
8055static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8056{
8057        const struct err_reg_info *eri = &dc_errs[source];
8058
8059        if (eri->handler) {
8060                interrupt_clear_down(dd, 0, eri);
8061        } else if (source == 3 /* dc_lbm_int */) {
8062                /*
8063                 * This indicates that a parity error has occurred on the
8064                 * address/control lines presented to the LBM.  The error
8065                 * is a single pulse, there is no associated error flag,
8066                 * and it is non-maskable.  This is because if a parity
8067                 * error occurs on the request the request is dropped.
8068                 * This should never occur, but it is nice to know if it
8069                 * ever does.
8070                 */
8071                dd_dev_err(dd, "Parity error in DC LBM block\n");
8072        } else {
8073                dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8074        }
8075}
8076
8077/*
8078 * TX block send credit interrupt.  Source is < 160.
8079 */
8080static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8081{
8082        sc_group_release_update(dd, source);
8083}
8084
8085/*
8086 * TX block SDMA interrupt.  Source is < 48.
8087 *
8088 * SDMA interrupts are grouped by type:
8089 *
8090 *       0 -  N-1 = SDma
8091 *       N - 2N-1 = SDmaProgress
8092 *      2N - 3N-1 = SDmaIdle
8093 */
8094static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8095{
8096        /* what interrupt */
8097        unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8098        /* which engine */
8099        unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8100
8101#ifdef CONFIG_SDMA_VERBOSITY
8102        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8103                   slashstrip(__FILE__), __LINE__, __func__);
8104        sdma_dumpstate(&dd->per_sdma[which]);
8105#endif
8106
8107        if (likely(what < 3 && which < dd->num_sdma)) {
8108                sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8109        } else {
8110                /* should not happen */
8111                dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8112        }
8113}
8114
8115/*
8116 * RX block receive available interrupt.  Source is < 160.
8117 */
8118static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8119{
8120        struct hfi1_ctxtdata *rcd;
8121        char *err_detail;
8122
8123        if (likely(source < dd->num_rcv_contexts)) {
8124                rcd = hfi1_rcd_get_by_index(dd, source);
8125                if (rcd) {
8126                        /* Check for non-user contexts, including vnic */
8127                        if ((source < dd->first_dyn_alloc_ctxt) ||
8128                            (rcd->sc && (rcd->sc->type == SC_KERNEL)))
8129                                rcd->do_interrupt(rcd, 0);
8130                        else
8131                                handle_user_interrupt(rcd);
8132
8133                        hfi1_rcd_put(rcd);
8134                        return; /* OK */
8135                }
8136                /* received an interrupt, but no rcd */
8137                err_detail = "dataless";
8138        } else {
8139                /* received an interrupt, but are not using that context */
8140                err_detail = "out of range";
8141        }
8142        dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8143                   err_detail, source);
8144}
8145
8146/*
8147 * RX block receive urgent interrupt.  Source is < 160.
8148 */
8149static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8150{
8151        struct hfi1_ctxtdata *rcd;
8152        char *err_detail;
8153
8154        if (likely(source < dd->num_rcv_contexts)) {
8155                rcd = hfi1_rcd_get_by_index(dd, source);
8156                if (rcd) {
8157                        /* only pay attention to user urgent interrupts */
8158                        if ((source >= dd->first_dyn_alloc_ctxt) &&
8159                            (!rcd->sc || (rcd->sc->type == SC_USER)))
8160                                handle_user_interrupt(rcd);
8161
8162                        hfi1_rcd_put(rcd);
8163                        return; /* OK */
8164                }
8165                /* received an interrupt, but no rcd */
8166                err_detail = "dataless";
8167        } else {
8168                /* received an interrupt, but are not using that context */
8169                err_detail = "out of range";
8170        }
8171        dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8172                   err_detail, source);
8173}
8174
8175/*
8176 * Reserved range interrupt.  Should not be called in normal operation.
8177 */
8178static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8179{
8180        char name[64];
8181
8182        dd_dev_err(dd, "unexpected %s interrupt\n",
8183                   is_reserved_name(name, sizeof(name), source));
8184}
8185
8186static const struct is_table is_table[] = {
8187/*
8188 * start                 end
8189 *                              name func               interrupt func
8190 */
8191{ IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8192                                is_misc_err_name,       is_misc_err_int },
8193{ IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8194                                is_sdma_eng_err_name,   is_sdma_eng_err_int },
8195{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8196                                is_sendctxt_err_name,   is_sendctxt_err_int },
8197{ IS_SDMA_START,             IS_SDMA_END,
8198                                is_sdma_eng_name,       is_sdma_eng_int },
8199{ IS_VARIOUS_START,          IS_VARIOUS_END,
8200                                is_various_name,        is_various_int },
8201{ IS_DC_START,       IS_DC_END,
8202                                is_dc_name,             is_dc_int },
8203{ IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8204                                is_rcv_avail_name,      is_rcv_avail_int },
8205{ IS_RCVURGENT_START,    IS_RCVURGENT_END,
8206                                is_rcv_urgent_name,     is_rcv_urgent_int },
8207{ IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8208                                is_send_credit_name,    is_send_credit_int},
8209{ IS_RESERVED_START,     IS_RESERVED_END,
8210                                is_reserved_name,       is_reserved_int},
8211};
8212
8213/*
8214 * Interrupt source interrupt - called when the given source has an interrupt.
8215 * Source is a bit index into an array of 64-bit integers.
8216 */
8217static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8218{
8219        const struct is_table *entry;
8220
8221        /* avoids a double compare by walking the table in-order */
8222        for (entry = &is_table[0]; entry->is_name; entry++) {
8223                if (source < entry->end) {
8224                        trace_hfi1_interrupt(dd, entry, source);
8225                        entry->is_int(dd, source - entry->start);
8226                        return;
8227                }
8228        }
8229        /* fell off the end */
8230        dd_dev_err(dd, "invalid interrupt source %u\n", source);
8231}
8232
8233/*
8234 * General interrupt handler.  This is able to correctly handle
8235 * all interrupts in case INTx is used.
8236 */
8237static irqreturn_t general_interrupt(int irq, void *data)
8238{
8239        struct hfi1_devdata *dd = data;
8240        u64 regs[CCE_NUM_INT_CSRS];
8241        u32 bit;
8242        int i;
8243        irqreturn_t handled = IRQ_NONE;
8244
8245        this_cpu_inc(*dd->int_counter);
8246
8247        /* phase 1: scan and clear all handled interrupts */
8248        for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8249                if (dd->gi_mask[i] == 0) {
8250                        regs[i] = 0;    /* used later */
8251                        continue;
8252                }
8253                regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8254                                dd->gi_mask[i];
8255                /* only clear if anything is set */
8256                if (regs[i])
8257                        write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8258        }
8259
8260        /* phase 2: call the appropriate handler */
8261        for_each_set_bit(bit, (unsigned long *)&regs[0],
8262                         CCE_NUM_INT_CSRS * 64) {
8263                is_interrupt(dd, bit);
8264                handled = IRQ_HANDLED;
8265        }
8266
8267        return handled;
8268}
8269
8270static irqreturn_t sdma_interrupt(int irq, void *data)
8271{
8272        struct sdma_engine *sde = data;
8273        struct hfi1_devdata *dd = sde->dd;
8274        u64 status;
8275
8276#ifdef CONFIG_SDMA_VERBOSITY
8277        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8278                   slashstrip(__FILE__), __LINE__, __func__);
8279        sdma_dumpstate(sde);
8280#endif
8281
8282        this_cpu_inc(*dd->int_counter);
8283
8284        /* This read_csr is really bad in the hot path */
8285        status = read_csr(dd,
8286                          CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8287                          & sde->imask;
8288        if (likely(status)) {
8289                /* clear the interrupt(s) */
8290                write_csr(dd,
8291                          CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8292                          status);
8293
8294                /* handle the interrupt(s) */
8295                sdma_engine_interrupt(sde, status);
8296        } else {
8297                dd_dev_err_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8298                                       sde->this_idx);
8299        }
8300        return IRQ_HANDLED;
8301}
8302
8303/*
8304 * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8305 * to insure that the write completed.  This does NOT guarantee that
8306 * queued DMA writes to memory from the chip are pushed.
8307 */
8308static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8309{
8310        struct hfi1_devdata *dd = rcd->dd;
8311        u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8312
8313        mmiowb();       /* make sure everything before is written */
8314        write_csr(dd, addr, rcd->imask);
8315        /* force the above write on the chip and get a value back */
8316        (void)read_csr(dd, addr);
8317}
8318
8319/* force the receive interrupt */
8320void force_recv_intr(struct hfi1_ctxtdata *rcd)
8321{
8322        write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8323}
8324
8325/*
8326 * Return non-zero if a packet is present.
8327 *
8328 * This routine is called when rechecking for packets after the RcvAvail
8329 * interrupt has been cleared down.  First, do a quick check of memory for
8330 * a packet present.  If not found, use an expensive CSR read of the context
8331 * tail to determine the actual tail.  The CSR read is necessary because there
8332 * is no method to push pending DMAs to memory other than an interrupt and we
8333 * are trying to determine if we need to force an interrupt.
8334 */
8335static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8336{
8337        u32 tail;
8338        int present;
8339
8340        if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8341                present = (rcd->seq_cnt ==
8342                                rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8343        else /* is RDMA rtail */
8344                present = (rcd->head != get_rcvhdrtail(rcd));
8345
8346        if (present)
8347                return 1;
8348
8349        /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8350        tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8351        return rcd->head != tail;
8352}
8353
8354/*
8355 * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8356 * This routine will try to handle packets immediately (latency), but if
8357 * it finds too many, it will invoke the thread handler (bandwitdh).  The
8358 * chip receive interrupt is *not* cleared down until this or the thread (if
8359 * invoked) is finished.  The intent is to avoid extra interrupts while we
8360 * are processing packets anyway.
8361 */
8362static irqreturn_t receive_context_interrupt(int irq, void *data)
8363{
8364        struct hfi1_ctxtdata *rcd = data;
8365        struct hfi1_devdata *dd = rcd->dd;
8366        int disposition;
8367        int present;
8368
8369        trace_hfi1_receive_interrupt(dd, rcd);
8370        this_cpu_inc(*dd->int_counter);
8371        aspm_ctx_disable(rcd);
8372
8373        /* receive interrupt remains blocked while processing packets */
8374        disposition = rcd->do_interrupt(rcd, 0);
8375
8376        /*
8377         * Too many packets were seen while processing packets in this
8378         * IRQ handler.  Invoke the handler thread.  The receive interrupt
8379         * remains blocked.
8380         */
8381        if (disposition == RCV_PKT_LIMIT)
8382                return IRQ_WAKE_THREAD;
8383
8384        /*
8385         * The packet processor detected no more packets.  Clear the receive
8386         * interrupt and recheck for a packet packet that may have arrived
8387         * after the previous check and interrupt clear.  If a packet arrived,
8388         * force another interrupt.
8389         */
8390        clear_recv_intr(rcd);
8391        present = check_packet_present(rcd);
8392        if (present)
8393                force_recv_intr(rcd);
8394
8395        return IRQ_HANDLED;
8396}
8397
8398/*
8399 * Receive packet thread handler.  This expects to be invoked with the
8400 * receive interrupt still blocked.
8401 */
8402static irqreturn_t receive_context_thread(int irq, void *data)
8403{
8404        struct hfi1_ctxtdata *rcd = data;
8405        int present;
8406
8407        /* receive interrupt is still blocked from the IRQ handler */
8408        (void)rcd->do_interrupt(rcd, 1);
8409
8410        /*
8411         * The packet processor will only return if it detected no more
8412         * packets.  Hold IRQs here so we can safely clear the interrupt and
8413         * recheck for a packet that may have arrived after the previous
8414         * check and the interrupt clear.  If a packet arrived, force another
8415         * interrupt.
8416         */
8417        local_irq_disable();
8418        clear_recv_intr(rcd);
8419        present = check_packet_present(rcd);
8420        if (present)
8421                force_recv_intr(rcd);
8422        local_irq_enable();
8423
8424        return IRQ_HANDLED;
8425}
8426
8427/* ========================================================================= */
8428
8429u32 read_physical_state(struct hfi1_devdata *dd)
8430{
8431        u64 reg;
8432
8433        reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8434        return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8435                                & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8436}
8437
8438u32 read_logical_state(struct hfi1_devdata *dd)
8439{
8440        u64 reg;
8441
8442        reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8443        return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8444                                & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8445}
8446
8447static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8448{
8449        u64 reg;
8450
8451        reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8452        /* clear current state, set new state */
8453        reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8454        reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8455        write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8456}
8457
8458/*
8459 * Use the 8051 to read a LCB CSR.
8460 */
8461static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8462{
8463        u32 regno;
8464        int ret;
8465
8466        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8467                if (acquire_lcb_access(dd, 0) == 0) {
8468                        *data = read_csr(dd, addr);
8469                        release_lcb_access(dd, 0);
8470                        return 0;
8471                }
8472                return -EBUSY;
8473        }
8474
8475        /* register is an index of LCB registers: (offset - base) / 8 */
8476        regno = (addr - DC_LCB_CFG_RUN) >> 3;
8477        ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8478        if (ret != HCMD_SUCCESS)
8479                return -EBUSY;
8480        return 0;
8481}
8482
8483/*
8484 * Provide a cache for some of the LCB registers in case the LCB is
8485 * unavailable.
8486 * (The LCB is unavailable in certain link states, for example.)
8487 */
8488struct lcb_datum {
8489        u32 off;
8490        u64 val;
8491};
8492
8493static struct lcb_datum lcb_cache[] = {
8494        { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8495        { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8496        { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8497};
8498
8499static void update_lcb_cache(struct hfi1_devdata *dd)
8500{
8501        int i;
8502        int ret;
8503        u64 val;
8504
8505        for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8506                ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8507
8508                /* Update if we get good data */
8509                if (likely(ret != -EBUSY))
8510                        lcb_cache[i].val = val;
8511        }
8512}
8513
8514static int read_lcb_cache(u32 off, u64 *val)
8515{
8516        int i;
8517
8518        for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8519                if (lcb_cache[i].off == off) {
8520                        *val = lcb_cache[i].val;
8521                        return 0;
8522                }
8523        }
8524
8525        pr_warn("%s bad offset 0x%x\n", __func__, off);
8526        return -1;
8527}
8528
8529/*
8530 * Read an LCB CSR.  Access may not be in host control, so check.
8531 * Return 0 on success, -EBUSY on failure.
8532 */
8533int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8534{
8535        struct hfi1_pportdata *ppd = dd->pport;
8536
8537        /* if up, go through the 8051 for the value */
8538        if (ppd->host_link_state & HLS_UP)
8539                return read_lcb_via_8051(dd, addr, data);
8540        /* if going up or down, check the cache, otherwise, no access */
8541        if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8542                if (read_lcb_cache(addr, data))
8543                        return -EBUSY;
8544                return 0;
8545        }
8546
8547        /* otherwise, host has access */
8548        *data = read_csr(dd, addr);
8549        return 0;
8550}
8551
8552/*
8553 * Use the 8051 to write a LCB CSR.
8554 */
8555static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8556{
8557        u32 regno;
8558        int ret;
8559
8560        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8561            (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8562                if (acquire_lcb_access(dd, 0) == 0) {
8563                        write_csr(dd, addr, data);
8564                        release_lcb_access(dd, 0);
8565                        return 0;
8566                }
8567                return -EBUSY;
8568        }
8569
8570        /* register is an index of LCB registers: (offset - base) / 8 */
8571        regno = (addr - DC_LCB_CFG_RUN) >> 3;
8572        ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8573        if (ret != HCMD_SUCCESS)
8574                return -EBUSY;
8575        return 0;
8576}
8577
8578/*
8579 * Write an LCB CSR.  Access may not be in host control, so check.
8580 * Return 0 on success, -EBUSY on failure.
8581 */
8582int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8583{
8584        struct hfi1_pportdata *ppd = dd->pport;
8585
8586        /* if up, go through the 8051 for the value */
8587        if (ppd->host_link_state & HLS_UP)
8588                return write_lcb_via_8051(dd, addr, data);
8589        /* if going up or down, no access */
8590        if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8591                return -EBUSY;
8592        /* otherwise, host has access */
8593        write_csr(dd, addr, data);
8594        return 0;
8595}
8596
8597/*
8598 * Returns:
8599 *      < 0 = Linux error, not able to get access
8600 *      > 0 = 8051 command RETURN_CODE
8601 */
8602static int do_8051_command(
8603        struct hfi1_devdata *dd,
8604        u32 type,
8605        u64 in_data,
8606        u64 *out_data)
8607{
8608        u64 reg, completed;
8609        int return_code;
8610        unsigned long timeout;
8611
8612        hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8613
8614        mutex_lock(&dd->dc8051_lock);
8615
8616        /* We can't send any commands to the 8051 if it's in reset */
8617        if (dd->dc_shutdown) {
8618                return_code = -ENODEV;
8619                goto fail;
8620        }
8621
8622        /*
8623         * If an 8051 host command timed out previously, then the 8051 is
8624         * stuck.
8625         *
8626         * On first timeout, attempt to reset and restart the entire DC
8627         * block (including 8051). (Is this too big of a hammer?)
8628         *
8629         * If the 8051 times out a second time, the reset did not bring it
8630         * back to healthy life. In that case, fail any subsequent commands.
8631         */
8632        if (dd->dc8051_timed_out) {
8633                if (dd->dc8051_timed_out > 1) {
8634                        dd_dev_err(dd,
8635                                   "Previous 8051 host command timed out, skipping command %u\n",
8636                                   type);
8637                        return_code = -ENXIO;
8638                        goto fail;
8639                }
8640                _dc_shutdown(dd);
8641                _dc_start(dd);
8642        }
8643
8644        /*
8645         * If there is no timeout, then the 8051 command interface is
8646         * waiting for a command.
8647         */
8648
8649        /*
8650         * When writing a LCB CSR, out_data contains the full value to
8651         * to be written, while in_data contains the relative LCB
8652         * address in 7:0.  Do the work here, rather than the caller,
8653         * of distrubting the write data to where it needs to go:
8654         *
8655         * Write data
8656         *   39:00 -> in_data[47:8]
8657         *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8658         *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8659         */
8660        if (type == HCMD_WRITE_LCB_CSR) {
8661                in_data |= ((*out_data) & 0xffffffffffull) << 8;
8662                /* must preserve COMPLETED - it is tied to hardware */
8663                reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8664                reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8665                reg |= ((((*out_data) >> 40) & 0xff) <<
8666                                DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8667                      | ((((*out_data) >> 48) & 0xffff) <<
8668                                DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8669                write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8670        }
8671
8672        /*
8673         * Do two writes: the first to stabilize the type and req_data, the
8674         * second to activate.
8675         */
8676        reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8677                        << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8678                | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8679                        << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8680        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8681        reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8682        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8683
8684        /* wait for completion, alternate: interrupt */
8685        timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8686        while (1) {
8687                reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8688                completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8689                if (completed)
8690                        break;
8691                if (time_after(jiffies, timeout)) {
8692                        dd->dc8051_timed_out++;
8693                        dd_dev_err(dd, "8051 host command %u timeout\n", type);
8694                        if (out_data)
8695                                *out_data = 0;
8696                        return_code = -ETIMEDOUT;
8697                        goto fail;
8698                }
8699                udelay(2);
8700        }
8701
8702        if (out_data) {
8703                *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8704                                & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8705                if (type == HCMD_READ_LCB_CSR) {
8706                        /* top 16 bits are in a different register */
8707                        *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8708                                & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8709                                << (48
8710                                    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8711                }
8712        }
8713        return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8714                                & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8715        dd->dc8051_timed_out = 0;
8716        /*
8717         * Clear command for next user.
8718         */
8719        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8720
8721fail:
8722        mutex_unlock(&dd->dc8051_lock);
8723        return return_code;
8724}
8725
8726static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8727{
8728        return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8729}
8730
8731int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8732                     u8 lane_id, u32 config_data)
8733{
8734        u64 data;
8735        int ret;
8736
8737        data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8738                | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8739                | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8740        ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8741        if (ret != HCMD_SUCCESS) {
8742                dd_dev_err(dd,
8743                           "load 8051 config: field id %d, lane %d, err %d\n",
8744                           (int)field_id, (int)lane_id, ret);
8745        }
8746        return ret;
8747}
8748
8749/*
8750 * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8751 * set the result, even on error.
8752 * Return 0 on success, -errno on failure
8753 */
8754int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8755                     u32 *result)
8756{
8757        u64 big_data;
8758        u32 addr;
8759        int ret;
8760
8761        /* address start depends on the lane_id */
8762        if (lane_id < 4)
8763                addr = (4 * NUM_GENERAL_FIELDS)
8764                        + (lane_id * 4 * NUM_LANE_FIELDS);
8765        else
8766                addr = 0;
8767        addr += field_id * 4;
8768
8769        /* read is in 8-byte chunks, hardware will truncate the address down */
8770        ret = read_8051_data(dd, addr, 8, &big_data);
8771
8772        if (ret == 0) {
8773                /* extract the 4 bytes we want */
8774                if (addr & 0x4)
8775                        *result = (u32)(big_data >> 32);
8776                else
8777                        *result = (u32)big_data;
8778        } else {
8779                *result = 0;
8780                dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8781                           __func__, lane_id, field_id);
8782        }
8783
8784        return ret;
8785}
8786
8787static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8788                              u8 continuous)
8789{
8790        u32 frame;
8791
8792        frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8793                | power_management << POWER_MANAGEMENT_SHIFT;
8794        return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8795                                GENERAL_CONFIG, frame);
8796}
8797
8798static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8799                                 u16 vl15buf, u8 crc_sizes)
8800{
8801        u32 frame;
8802
8803        frame = (u32)vau << VAU_SHIFT
8804                | (u32)z << Z_SHIFT
8805                | (u32)vcu << VCU_SHIFT
8806                | (u32)vl15buf << VL15BUF_SHIFT
8807                | (u32)crc_sizes << CRC_SIZES_SHIFT;
8808        return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8809                                GENERAL_CONFIG, frame);
8810}
8811
8812static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8813                                     u8 *flag_bits, u16 *link_widths)
8814{
8815        u32 frame;
8816
8817        read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8818                         &frame);
8819        *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8820        *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8821        *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8822}
8823
8824static int write_vc_local_link_width(struct hfi1_devdata *dd,
8825                                     u8 misc_bits,
8826                                     u8 flag_bits,
8827                                     u16 link_widths)
8828{
8829        u32 frame;
8830
8831        frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8832                | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8833                | (u32)link_widths << LINK_WIDTH_SHIFT;
8834        return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8835                     frame);
8836}
8837
8838static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8839                                 u8 device_rev)
8840{
8841        u32 frame;
8842
8843        frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8844                | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8845        return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8846}
8847
8848static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8849                                  u8 *device_rev)
8850{
8851        u32 frame;
8852
8853        read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8854        *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8855        *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8856                        & REMOTE_DEVICE_REV_MASK;
8857}
8858
8859int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8860{
8861        u32 frame;
8862        u32 mask;
8863
8864        mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8865        read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8866        /* Clear, then set field */
8867        frame &= ~mask;
8868        frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8869        return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8870                                frame);
8871}
8872
8873void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8874                      u8 *ver_patch)
8875{
8876        u32 frame;
8877
8878        read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8879        *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8880                STS_FM_VERSION_MAJOR_MASK;
8881        *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8882                STS_FM_VERSION_MINOR_MASK;
8883
8884        read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8885        *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8886                STS_FM_VERSION_PATCH_MASK;
8887}
8888
8889static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8890                               u8 *continuous)
8891{
8892        u32 frame;
8893
8894        read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8895        *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8896                                        & POWER_MANAGEMENT_MASK;
8897        *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8898                                        & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8899}
8900
8901static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8902                                  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8903{
8904        u32 frame;
8905
8906        read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8907        *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8908        *z = (frame >> Z_SHIFT) & Z_MASK;
8909        *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8910        *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8911        *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8912}
8913
8914static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8915                                      u8 *remote_tx_rate,
8916                                      u16 *link_widths)
8917{
8918        u32 frame;
8919
8920        read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8921                         &frame);
8922        *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8923                                & REMOTE_TX_RATE_MASK;
8924        *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8925}
8926
8927static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8928{
8929        u32 frame;
8930
8931        read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8932        *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8933}
8934
8935static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8936{
8937        u32 frame;
8938
8939        read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8940        *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8941}
8942
8943static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8944{
8945        read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8946}
8947
8948static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8949{
8950        read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8951}
8952
8953void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8954{
8955        u32 frame;
8956        int ret;
8957
8958        *link_quality = 0;
8959        if (dd->pport->host_link_state & HLS_UP) {
8960                ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8961                                       &frame);
8962                if (ret == 0)
8963                        *link_quality = (frame >> LINK_QUALITY_SHIFT)
8964                                                & LINK_QUALITY_MASK;
8965        }
8966}
8967
8968static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8969{
8970        u32 frame;
8971
8972        read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8973        *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8974}
8975
8976static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8977{
8978        u32 frame;
8979
8980        read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8981        *ldr = (frame & 0xff);
8982}
8983
8984static int read_tx_settings(struct hfi1_devdata *dd,
8985                            u8 *enable_lane_tx,
8986                            u8 *tx_polarity_inversion,
8987                            u8 *rx_polarity_inversion,
8988                            u8 *max_rate)
8989{
8990        u32 frame;
8991        int ret;
8992
8993        ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8994        *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8995                                & ENABLE_LANE_TX_MASK;
8996        *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8997                                & TX_POLARITY_INVERSION_MASK;
8998        *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8999                                & RX_POLARITY_INVERSION_MASK;
9000        *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9001        return ret;
9002}
9003
9004static int write_tx_settings(struct hfi1_devdata *dd,
9005                             u8 enable_lane_tx,
9006                             u8 tx_polarity_inversion,
9007                             u8 rx_polarity_inversion,
9008                             u8 max_rate)
9009{
9010        u32 frame;
9011
9012        /* no need to mask, all variable sizes match field widths */
9013        frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9014                | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9015                | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9016                | max_rate << MAX_RATE_SHIFT;
9017        return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9018}
9019
9020/*
9021 * Read an idle LCB message.
9022 *
9023 * Returns 0 on success, -EINVAL on error
9024 */
9025static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9026{
9027        int ret;
9028
9029        ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9030        if (ret != HCMD_SUCCESS) {
9031                dd_dev_err(dd, "read idle message: type %d, err %d\n",
9032                           (u32)type, ret);
9033                return -EINVAL;
9034        }
9035        dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9036        /* return only the payload as we already know the type */
9037        *data_out >>= IDLE_PAYLOAD_SHIFT;
9038        return 0;
9039}
9040
9041/*
9042 * Read an idle SMA message.  To be done in response to a notification from
9043 * the 8051.
9044 *
9045 * Returns 0 on success, -EINVAL on error
9046 */
9047static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9048{
9049        return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9050                                 data);
9051}
9052
9053/*
9054 * Send an idle LCB message.
9055 *
9056 * Returns 0 on success, -EINVAL on error
9057 */
9058static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9059{
9060        int ret;
9061
9062        dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9063        ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9064        if (ret != HCMD_SUCCESS) {
9065                dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9066                           data, ret);
9067                return -EINVAL;
9068        }
9069        return 0;
9070}
9071
9072/*
9073 * Send an idle SMA message.
9074 *
9075 * Returns 0 on success, -EINVAL on error
9076 */
9077int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9078{
9079        u64 data;
9080
9081        data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9082                ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9083        return send_idle_message(dd, data);
9084}
9085
9086/*
9087 * Initialize the LCB then do a quick link up.  This may or may not be
9088 * in loopback.
9089 *
9090 * return 0 on success, -errno on error
9091 */
9092static int do_quick_linkup(struct hfi1_devdata *dd)
9093{
9094        int ret;
9095
9096        lcb_shutdown(dd, 0);
9097
9098        if (loopback) {
9099                /* LCB_CFG_LOOPBACK.VAL = 2 */
9100                /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9101                write_csr(dd, DC_LCB_CFG_LOOPBACK,
9102                          IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9103                write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9104        }
9105
9106        /* start the LCBs */
9107        /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9108        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9109
9110        /* simulator only loopback steps */
9111        if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9112                /* LCB_CFG_RUN.EN = 1 */
9113                write_csr(dd, DC_LCB_CFG_RUN,
9114                          1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9115
9116                ret = wait_link_transfer_active(dd, 10);
9117                if (ret)
9118                        return ret;
9119
9120                write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9121                          1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9122        }
9123
9124        if (!loopback) {
9125                /*
9126                 * When doing quick linkup and not in loopback, both
9127                 * sides must be done with LCB set-up before either
9128                 * starts the quick linkup.  Put a delay here so that
9129                 * both sides can be started and have a chance to be
9130                 * done with LCB set up before resuming.
9131                 */
9132                dd_dev_err(dd,
9133                           "Pausing for peer to be finished with LCB set up\n");
9134                msleep(5000);
9135                dd_dev_err(dd, "Continuing with quick linkup\n");
9136        }
9137
9138        write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9139        set_8051_lcb_access(dd);
9140
9141        /*
9142         * State "quick" LinkUp request sets the physical link state to
9143         * LinkUp without a verify capability sequence.
9144         * This state is in simulator v37 and later.
9145         */
9146        ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9147        if (ret != HCMD_SUCCESS) {
9148                dd_dev_err(dd,
9149                           "%s: set physical link state to quick LinkUp failed with return %d\n",
9150                           __func__, ret);
9151
9152                set_host_lcb_access(dd);
9153                write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9154
9155                if (ret >= 0)
9156                        ret = -EINVAL;
9157                return ret;
9158        }
9159
9160        return 0; /* success */
9161}
9162
9163/*
9164 * Set the SerDes to internal loopback mode.
9165 * Returns 0 on success, -errno on error.
9166 */
9167static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9168{
9169        int ret;
9170
9171        ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9172        if (ret == HCMD_SUCCESS)
9173                return 0;
9174        dd_dev_err(dd,
9175                   "Set physical link state to SerDes Loopback failed with return %d\n",
9176                   ret);
9177        if (ret >= 0)
9178                ret = -EINVAL;
9179        return ret;
9180}
9181
9182/*
9183 * Do all special steps to set up loopback.
9184 */
9185static int init_loopback(struct hfi1_devdata *dd)
9186{
9187        dd_dev_info(dd, "Entering loopback mode\n");
9188
9189        /* all loopbacks should disable self GUID check */
9190        write_csr(dd, DC_DC8051_CFG_MODE,
9191                  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9192
9193        /*
9194         * The simulator has only one loopback option - LCB.  Switch
9195         * to that option, which includes quick link up.
9196         *
9197         * Accept all valid loopback values.
9198         */
9199        if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9200            (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9201             loopback == LOOPBACK_CABLE)) {
9202                loopback = LOOPBACK_LCB;
9203                quick_linkup = 1;
9204                return 0;
9205        }
9206
9207        /* handle serdes loopback */
9208        if (loopback == LOOPBACK_SERDES) {
9209                /* internal serdes loopack needs quick linkup on RTL */
9210                if (dd->icode == ICODE_RTL_SILICON)
9211                        quick_linkup = 1;
9212                return set_serdes_loopback_mode(dd);
9213        }
9214
9215        /* LCB loopback - handled at poll time */
9216        if (loopback == LOOPBACK_LCB) {
9217                quick_linkup = 1; /* LCB is always quick linkup */
9218
9219                /* not supported in emulation due to emulation RTL changes */
9220                if (dd->icode == ICODE_FPGA_EMULATION) {
9221                        dd_dev_err(dd,
9222                                   "LCB loopback not supported in emulation\n");
9223                        return -EINVAL;
9224                }
9225                return 0;
9226        }
9227
9228        /* external cable loopback requires no extra steps */
9229        if (loopback == LOOPBACK_CABLE)
9230                return 0;
9231
9232        dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9233        return -EINVAL;
9234}
9235
9236/*
9237 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9238 * used in the Verify Capability link width attribute.
9239 */
9240static u16 opa_to_vc_link_widths(u16 opa_widths)
9241{
9242        int i;
9243        u16 result = 0;
9244
9245        static const struct link_bits {
9246                u16 from;
9247                u16 to;
9248        } opa_link_xlate[] = {
9249                { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9250                { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9251                { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9252                { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9253        };
9254
9255        for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9256                if (opa_widths & opa_link_xlate[i].from)
9257                        result |= opa_link_xlate[i].to;
9258        }
9259        return result;
9260}
9261
9262/*
9263 * Set link attributes before moving to polling.
9264 */
9265static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9266{
9267        struct hfi1_devdata *dd = ppd->dd;
9268        u8 enable_lane_tx;
9269        u8 tx_polarity_inversion;
9270        u8 rx_polarity_inversion;
9271        int ret;
9272
9273        /* reset our fabric serdes to clear any lingering problems */
9274        fabric_serdes_reset(dd);
9275
9276        /* set the local tx rate - need to read-modify-write */
9277        ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9278                               &rx_polarity_inversion, &ppd->local_tx_rate);
9279        if (ret)
9280                goto set_local_link_attributes_fail;
9281
9282        if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9283                /* set the tx rate to the fastest enabled */
9284                if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9285                        ppd->local_tx_rate = 1;
9286                else
9287                        ppd->local_tx_rate = 0;
9288        } else {
9289                /* set the tx rate to all enabled */
9290                ppd->local_tx_rate = 0;
9291                if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9292                        ppd->local_tx_rate |= 2;
9293                if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9294                        ppd->local_tx_rate |= 1;
9295        }
9296
9297        enable_lane_tx = 0xF; /* enable all four lanes */
9298        ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9299                                rx_polarity_inversion, ppd->local_tx_rate);
9300        if (ret != HCMD_SUCCESS)
9301                goto set_local_link_attributes_fail;
9302
9303        /*
9304         * DC supports continuous updates.
9305         */
9306        ret = write_vc_local_phy(dd,
9307                                 0 /* no power management */,
9308                                 1 /* continuous updates */);
9309        if (ret != HCMD_SUCCESS)
9310                goto set_local_link_attributes_fail;
9311
9312        /* z=1 in the next call: AU of 0 is not supported by the hardware */
9313        ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9314                                    ppd->port_crc_mode_enabled);
9315        if (ret != HCMD_SUCCESS)
9316                goto set_local_link_attributes_fail;
9317
9318        ret = write_vc_local_link_width(dd, 0, 0,
9319                                        opa_to_vc_link_widths(
9320                                                ppd->link_width_enabled));
9321        if (ret != HCMD_SUCCESS)
9322                goto set_local_link_attributes_fail;
9323
9324        /* let peer know who we are */
9325        ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9326        if (ret == HCMD_SUCCESS)
9327                return 0;
9328
9329set_local_link_attributes_fail:
9330        dd_dev_err(dd,
9331                   "Failed to set local link attributes, return 0x%x\n",
9332                   ret);
9333        return ret;
9334}
9335
9336/*
9337 * Call this to start the link.
9338 * Do not do anything if the link is disabled.
9339 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9340 */
9341int start_link(struct hfi1_pportdata *ppd)
9342{
9343        /*
9344         * Tune the SerDes to a ballpark setting for optimal signal and bit
9345         * error rate.  Needs to be done before starting the link.
9346         */
9347        tune_serdes(ppd);
9348
9349        if (!ppd->driver_link_ready) {
9350                dd_dev_info(ppd->dd,
9351                            "%s: stopping link start because driver is not ready\n",
9352                            __func__);
9353                return 0;
9354        }
9355
9356        /*
9357         * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9358         * pkey table can be configured properly if the HFI unit is connected
9359         * to switch port with MgmtAllowed=NO
9360         */
9361        clear_full_mgmt_pkey(ppd);
9362
9363        return set_link_state(ppd, HLS_DN_POLL);
9364}
9365
9366static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9367{
9368        struct hfi1_devdata *dd = ppd->dd;
9369        u64 mask;
9370        unsigned long timeout;
9371
9372        /*
9373         * Some QSFP cables have a quirk that asserts the IntN line as a side
9374         * effect of power up on plug-in. We ignore this false positive
9375         * interrupt until the module has finished powering up by waiting for
9376         * a minimum timeout of the module inrush initialization time of
9377         * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9378         * module have stabilized.
9379         */
9380        msleep(500);
9381
9382        /*
9383         * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9384         */
9385        timeout = jiffies + msecs_to_jiffies(2000);
9386        while (1) {
9387                mask = read_csr(dd, dd->hfi1_id ?
9388                                ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9389                if (!(mask & QSFP_HFI0_INT_N))
9390                        break;
9391                if (time_after(jiffies, timeout)) {
9392                        dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9393                                    __func__);
9394                        break;
9395                }
9396                udelay(2);
9397        }
9398}
9399
9400static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9401{
9402        struct hfi1_devdata *dd = ppd->dd;
9403        u64 mask;
9404
9405        mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9406        if (enable) {
9407                /*
9408                 * Clear the status register to avoid an immediate interrupt
9409                 * when we re-enable the IntN pin
9410                 */
9411                write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9412                          QSFP_HFI0_INT_N);
9413                mask |= (u64)QSFP_HFI0_INT_N;
9414        } else {
9415                mask &= ~(u64)QSFP_HFI0_INT_N;
9416        }
9417        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9418}
9419
9420int reset_qsfp(struct hfi1_pportdata *ppd)
9421{
9422        struct hfi1_devdata *dd = ppd->dd;
9423        u64 mask, qsfp_mask;
9424
9425        /* Disable INT_N from triggering QSFP interrupts */
9426        set_qsfp_int_n(ppd, 0);
9427
9428        /* Reset the QSFP */
9429        mask = (u64)QSFP_HFI0_RESET_N;
9430
9431        qsfp_mask = read_csr(dd,
9432                             dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9433        qsfp_mask &= ~mask;
9434        write_csr(dd,
9435                  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9436
9437        udelay(10);
9438
9439        qsfp_mask |= mask;
9440        write_csr(dd,
9441                  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9442
9443        wait_for_qsfp_init(ppd);
9444
9445        /*
9446         * Allow INT_N to trigger the QSFP interrupt to watch
9447         * for alarms and warnings
9448         */
9449        set_qsfp_int_n(ppd, 1);
9450
9451        /*
9452         * After the reset, AOC transmitters are enabled by default. They need
9453         * to be turned off to complete the QSFP setup before they can be
9454         * enabled again.
9455         */
9456        return set_qsfp_tx(ppd, 0);
9457}
9458
9459static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9460                                        u8 *qsfp_interrupt_status)
9461{
9462        struct hfi1_devdata *dd = ppd->dd;
9463
9464        if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9465            (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9466                dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9467                           __func__);
9468
9469        if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9470            (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9471                dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9472                           __func__);
9473
9474        /*
9475         * The remaining alarms/warnings don't matter if the link is down.
9476         */
9477        if (ppd->host_link_state & HLS_DOWN)
9478                return 0;
9479
9480        if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9481            (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9482                dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9483                           __func__);
9484
9485        if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9486            (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9487                dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9488                           __func__);
9489
9490        /* Byte 2 is vendor specific */
9491
9492        if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9493            (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9494                dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9495                           __func__);
9496
9497        if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9498            (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9499                dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9500                           __func__);
9501
9502        if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9503            (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9504                dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9505                           __func__);
9506
9507        if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9508            (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9509                dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9510                           __func__);
9511
9512        if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9513            (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9514                dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9515                           __func__);
9516
9517        if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9518            (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9519                dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9520                           __func__);
9521
9522        if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9523            (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9524                dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9525                           __func__);
9526
9527        if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9528            (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9529                dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9530                           __func__);
9531
9532        if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9533            (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9534                dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9535                           __func__);
9536
9537        if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9538            (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9539                dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9540                           __func__);
9541
9542        if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9543            (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9544                dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9545                           __func__);
9546
9547        if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9548            (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9549                dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9550                           __func__);
9551
9552        /* Bytes 9-10 and 11-12 are reserved */
9553        /* Bytes 13-15 are vendor specific */
9554
9555        return 0;
9556}
9557
9558/* This routine will only be scheduled if the QSFP module present is asserted */
9559void qsfp_event(struct work_struct *work)
9560{
9561        struct qsfp_data *qd;
9562        struct hfi1_pportdata *ppd;
9563        struct hfi1_devdata *dd;
9564
9565        qd = container_of(work, struct qsfp_data, qsfp_work);
9566        ppd = qd->ppd;
9567        dd = ppd->dd;
9568
9569        /* Sanity check */
9570        if (!qsfp_mod_present(ppd))
9571                return;
9572
9573        if (ppd->host_link_state == HLS_DN_DISABLE) {
9574                dd_dev_info(ppd->dd,
9575                            "%s: stopping link start because link is disabled\n",
9576                            __func__);
9577                return;
9578        }
9579
9580        /*
9581         * Turn DC back on after cable has been re-inserted. Up until
9582         * now, the DC has been in reset to save power.
9583         */
9584        dc_start(dd);
9585
9586        if (qd->cache_refresh_required) {
9587                set_qsfp_int_n(ppd, 0);
9588
9589                wait_for_qsfp_init(ppd);
9590
9591                /*
9592                 * Allow INT_N to trigger the QSFP interrupt to watch
9593                 * for alarms and warnings
9594                 */
9595                set_qsfp_int_n(ppd, 1);
9596
9597                start_link(ppd);
9598        }
9599
9600        if (qd->check_interrupt_flags) {
9601                u8 qsfp_interrupt_status[16] = {0,};
9602
9603                if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9604                                  &qsfp_interrupt_status[0], 16) != 16) {
9605                        dd_dev_info(dd,
9606                                    "%s: Failed to read status of QSFP module\n",
9607                                    __func__);
9608                } else {
9609                        unsigned long flags;
9610
9611                        handle_qsfp_error_conditions(
9612                                        ppd, qsfp_interrupt_status);
9613                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9614                        ppd->qsfp_info.check_interrupt_flags = 0;
9615                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9616                                               flags);
9617                }
9618        }
9619}
9620
9621static void init_qsfp_int(struct hfi1_devdata *dd)
9622{
9623        struct hfi1_pportdata *ppd = dd->pport;
9624        u64 qsfp_mask, cce_int_mask;
9625        const int qsfp1_int_smask = QSFP1_INT % 64;
9626        const int qsfp2_int_smask = QSFP2_INT % 64;
9627
9628        /*
9629         * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9630         * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9631         * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9632         * the index of the appropriate CSR in the CCEIntMask CSR array
9633         */
9634        cce_int_mask = read_csr(dd, CCE_INT_MASK +
9635                                (8 * (QSFP1_INT / 64)));
9636        if (dd->hfi1_id) {
9637                cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9638                write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9639                          cce_int_mask);
9640        } else {
9641                cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9642                write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9643                          cce_int_mask);
9644        }
9645
9646        qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9647        /* Clear current status to avoid spurious interrupts */
9648        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9649                  qsfp_mask);
9650        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9651                  qsfp_mask);
9652
9653        set_qsfp_int_n(ppd, 0);
9654
9655        /* Handle active low nature of INT_N and MODPRST_N pins */
9656        if (qsfp_mod_present(ppd))
9657                qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9658        write_csr(dd,
9659                  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9660                  qsfp_mask);
9661}
9662
9663/*
9664 * Do a one-time initialize of the LCB block.
9665 */
9666static void init_lcb(struct hfi1_devdata *dd)
9667{
9668        /* simulator does not correctly handle LCB cclk loopback, skip */
9669        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9670                return;
9671
9672        /* the DC has been reset earlier in the driver load */
9673
9674        /* set LCB for cclk loopback on the port */
9675        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9676        write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9677        write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9678        write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9679        write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9680        write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9681        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9682}
9683
9684/*
9685 * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9686 * on error.
9687 */
9688static int test_qsfp_read(struct hfi1_pportdata *ppd)
9689{
9690        int ret;
9691        u8 status;
9692
9693        /*
9694         * Report success if not a QSFP or, if it is a QSFP, but the cable is
9695         * not present
9696         */
9697        if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9698                return 0;
9699
9700        /* read byte 2, the status byte */
9701        ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9702        if (ret < 0)
9703                return ret;
9704        if (ret != 1)
9705                return -EIO;
9706
9707        return 0; /* success */
9708}
9709
9710/*
9711 * Values for QSFP retry.
9712 *
9713 * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9714 * arrived at from experience on a large cluster.
9715 */
9716#define MAX_QSFP_RETRIES 20
9717#define QSFP_RETRY_WAIT 500 /* msec */
9718
9719/*
9720 * Try a QSFP read.  If it fails, schedule a retry for later.
9721 * Called on first link activation after driver load.
9722 */
9723static void try_start_link(struct hfi1_pportdata *ppd)
9724{
9725        if (test_qsfp_read(ppd)) {
9726                /* read failed */
9727                if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9728                        dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9729                        return;
9730                }
9731                dd_dev_info(ppd->dd,
9732                            "QSFP not responding, waiting and retrying %d\n",
9733                            (int)ppd->qsfp_retry_count);
9734                ppd->qsfp_retry_count++;
9735                queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9736                                   msecs_to_jiffies(QSFP_RETRY_WAIT));
9737                return;
9738        }
9739        ppd->qsfp_retry_count = 0;
9740
9741        start_link(ppd);
9742}
9743
9744/*
9745 * Workqueue function to start the link after a delay.
9746 */
9747void handle_start_link(struct work_struct *work)
9748{
9749        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9750                                                  start_link_work.work);
9751        try_start_link(ppd);
9752}
9753
9754int bringup_serdes(struct hfi1_pportdata *ppd)
9755{
9756        struct hfi1_devdata *dd = ppd->dd;
9757        u64 guid;
9758        int ret;
9759
9760        if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9761                add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9762
9763        guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9764        if (!guid) {
9765                if (dd->base_guid)
9766                        guid = dd->base_guid + ppd->port - 1;
9767                ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9768        }
9769
9770        /* Set linkinit_reason on power up per OPA spec */
9771        ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9772
9773        /* one-time init of the LCB */
9774        init_lcb(dd);
9775
9776        if (loopback) {
9777                ret = init_loopback(dd);
9778                if (ret < 0)
9779                        return ret;
9780        }
9781
9782        get_port_type(ppd);
9783        if (ppd->port_type == PORT_TYPE_QSFP) {
9784                set_qsfp_int_n(ppd, 0);
9785                wait_for_qsfp_init(ppd);
9786                set_qsfp_int_n(ppd, 1);
9787        }
9788
9789        try_start_link(ppd);
9790        return 0;
9791}
9792
9793void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9794{
9795        struct hfi1_devdata *dd = ppd->dd;
9796
9797        /*
9798         * Shut down the link and keep it down.   First turn off that the
9799         * driver wants to allow the link to be up (driver_link_ready).
9800         * Then make sure the link is not automatically restarted
9801         * (link_enabled).  Cancel any pending restart.  And finally
9802         * go offline.
9803         */
9804        ppd->driver_link_ready = 0;
9805        ppd->link_enabled = 0;
9806
9807        ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9808        flush_delayed_work(&ppd->start_link_work);
9809        cancel_delayed_work_sync(&ppd->start_link_work);
9810
9811        ppd->offline_disabled_reason =
9812                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9813        set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9814                             OPA_LINKDOWN_REASON_SMA_DISABLED);
9815        set_link_state(ppd, HLS_DN_OFFLINE);
9816
9817        /* disable the port */
9818        clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9819}
9820
9821static inline int init_cpu_counters(struct hfi1_devdata *dd)
9822{
9823        struct hfi1_pportdata *ppd;
9824        int i;
9825
9826        ppd = (struct hfi1_pportdata *)(dd + 1);
9827        for (i = 0; i < dd->num_pports; i++, ppd++) {
9828                ppd->ibport_data.rvp.rc_acks = NULL;
9829                ppd->ibport_data.rvp.rc_qacks = NULL;
9830                ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9831                ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9832                ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9833                if (!ppd->ibport_data.rvp.rc_acks ||
9834                    !ppd->ibport_data.rvp.rc_delayed_comp ||
9835                    !ppd->ibport_data.rvp.rc_qacks)
9836                        return -ENOMEM;
9837        }
9838
9839        return 0;
9840}
9841
9842/*
9843 * index is the index into the receive array
9844 */
9845void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9846                  u32 type, unsigned long pa, u16 order)
9847{
9848        u64 reg;
9849
9850        if (!(dd->flags & HFI1_PRESENT))
9851                goto done;
9852
9853        if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9854                pa = 0;
9855                order = 0;
9856        } else if (type > PT_INVALID) {
9857                dd_dev_err(dd,
9858                           "unexpected receive array type %u for index %u, not handled\n",
9859                           type, index);
9860                goto done;
9861        }
9862        trace_hfi1_put_tid(dd, index, type, pa, order);
9863
9864#define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9865        reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9866                | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9867                | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9868                                        << RCV_ARRAY_RT_ADDR_SHIFT;
9869        trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9870        writeq(reg, dd->rcvarray_wc + (index * 8));
9871
9872        if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9873                /*
9874                 * Eager entries are written and flushed
9875                 *
9876                 * Expected entries are flushed every 4 writes
9877                 */
9878                flush_wc();
9879done:
9880        return;
9881}
9882
9883void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9884{
9885        struct hfi1_devdata *dd = rcd->dd;
9886        u32 i;
9887
9888        /* this could be optimized */
9889        for (i = rcd->eager_base; i < rcd->eager_base +
9890                     rcd->egrbufs.alloced; i++)
9891                hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9892
9893        for (i = rcd->expected_base;
9894                        i < rcd->expected_base + rcd->expected_count; i++)
9895                hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9896}
9897
9898static const char * const ib_cfg_name_strings[] = {
9899        "HFI1_IB_CFG_LIDLMC",
9900        "HFI1_IB_CFG_LWID_DG_ENB",
9901        "HFI1_IB_CFG_LWID_ENB",
9902        "HFI1_IB_CFG_LWID",
9903        "HFI1_IB_CFG_SPD_ENB",
9904        "HFI1_IB_CFG_SPD",
9905        "HFI1_IB_CFG_RXPOL_ENB",
9906        "HFI1_IB_CFG_LREV_ENB",
9907        "HFI1_IB_CFG_LINKLATENCY",
9908        "HFI1_IB_CFG_HRTBT",
9909        "HFI1_IB_CFG_OP_VLS",
9910        "HFI1_IB_CFG_VL_HIGH_CAP",
9911        "HFI1_IB_CFG_VL_LOW_CAP",
9912        "HFI1_IB_CFG_OVERRUN_THRESH",
9913        "HFI1_IB_CFG_PHYERR_THRESH",
9914        "HFI1_IB_CFG_LINKDEFAULT",
9915        "HFI1_IB_CFG_PKEYS",
9916        "HFI1_IB_CFG_MTU",
9917        "HFI1_IB_CFG_LSTATE",
9918        "HFI1_IB_CFG_VL_HIGH_LIMIT",
9919        "HFI1_IB_CFG_PMA_TICKS",
9920        "HFI1_IB_CFG_PORT"
9921};
9922
9923static const char *ib_cfg_name(int which)
9924{
9925        if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9926                return "invalid";
9927        return ib_cfg_name_strings[which];
9928}
9929
9930int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9931{
9932        struct hfi1_devdata *dd = ppd->dd;
9933        int val = 0;
9934
9935        switch (which) {
9936        case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9937                val = ppd->link_width_enabled;
9938                break;
9939        case HFI1_IB_CFG_LWID: /* currently active Link-width */
9940                val = ppd->link_width_active;
9941                break;
9942        case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9943                val = ppd->link_speed_enabled;
9944                break;
9945        case HFI1_IB_CFG_SPD: /* current Link speed */
9946                val = ppd->link_speed_active;
9947                break;
9948
9949        case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9950        case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9951        case HFI1_IB_CFG_LINKLATENCY:
9952                goto unimplemented;
9953
9954        case HFI1_IB_CFG_OP_VLS:
9955                val = ppd->vls_operational;
9956                break;
9957        case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9958                val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9959                break;
9960        case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9961                val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9962                break;
9963        case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9964                val = ppd->overrun_threshold;
9965                break;
9966        case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9967                val = ppd->phy_error_threshold;
9968                break;
9969        case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9970                val = dd->link_default;
9971                break;
9972
9973        case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9974        case HFI1_IB_CFG_PMA_TICKS:
9975        default:
9976unimplemented:
9977                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9978                        dd_dev_info(
9979                                dd,
9980                                "%s: which %s: not implemented\n",
9981                                __func__,
9982                                ib_cfg_name(which));
9983                break;
9984        }
9985
9986        return val;
9987}
9988
9989/*
9990 * The largest MAD packet size.
9991 */
9992#define MAX_MAD_PACKET 2048
9993
9994/*
9995 * Return the maximum header bytes that can go on the _wire_
9996 * for this device. This count includes the ICRC which is
9997 * not part of the packet held in memory but it is appended
9998 * by the HW.
9999 * This is dependent on the device's receive header entry size.
10000 * HFI allows this to be set per-receive context, but the
10001 * driver presently enforces a global value.
10002 */
10003u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10004{
10005        /*
10006         * The maximum non-payload (MTU) bytes in LRH.PktLen are
10007         * the Receive Header Entry Size minus the PBC (or RHF) size
10008         * plus one DW for the ICRC appended by HW.
10009         *
10010         * dd->rcd[0].rcvhdrqentsize is in DW.
10011         * We use rcd[0] as all context will have the same value. Also,
10012         * the first kernel context would have been allocated by now so
10013         * we are guaranteed a valid value.
10014         */
10015        return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10016}
10017
10018/*
10019 * Set Send Length
10020 * @ppd - per port data
10021 *
10022 * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10023 * registers compare against LRH.PktLen, so use the max bytes included
10024 * in the LRH.
10025 *
10026 * This routine changes all VL values except VL15, which it maintains at
10027 * the same value.
10028 */
10029static void set_send_length(struct hfi1_pportdata *ppd)
10030{
10031        struct hfi1_devdata *dd = ppd->dd;
10032        u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10033        u32 maxvlmtu = dd->vld[15].mtu;
10034        u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10035                              & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10036                SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10037        int i, j;
10038        u32 thres;
10039
10040        for (i = 0; i < ppd->vls_supported; i++) {
10041                if (dd->vld[i].mtu > maxvlmtu)
10042                        maxvlmtu = dd->vld[i].mtu;
10043                if (i <= 3)
10044                        len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10045                                 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10046                                ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10047                else
10048                        len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10049                                 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10050                                ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10051        }
10052        write_csr(dd, SEND_LEN_CHECK0, len1);
10053        write_csr(dd, SEND_LEN_CHECK1, len2);
10054        /* adjust kernel credit return thresholds based on new MTUs */
10055        /* all kernel receive contexts have the same hdrqentsize */
10056        for (i = 0; i < ppd->vls_supported; i++) {
10057                thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10058                            sc_mtu_to_threshold(dd->vld[i].sc,
10059                                                dd->vld[i].mtu,
10060                                                dd->rcd[0]->rcvhdrqentsize));
10061                for (j = 0; j < INIT_SC_PER_VL; j++)
10062                        sc_set_cr_threshold(
10063                                        pio_select_send_context_vl(dd, j, i),
10064                                            thres);
10065        }
10066        thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10067                    sc_mtu_to_threshold(dd->vld[15].sc,
10068                                        dd->vld[15].mtu,
10069                                        dd->rcd[0]->rcvhdrqentsize));
10070        sc_set_cr_threshold(dd->vld[15].sc, thres);
10071
10072        /* Adjust maximum MTU for the port in DC */
10073        dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10074                (ilog2(maxvlmtu >> 8) + 1);
10075        len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10076        len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10077        len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10078                DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10079        write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10080}
10081
10082static void set_lidlmc(struct hfi1_pportdata *ppd)
10083{
10084        int i;
10085        u64 sreg = 0;
10086        struct hfi1_devdata *dd = ppd->dd;
10087        u32 mask = ~((1U << ppd->lmc) - 1);
10088        u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10089        u32 lid;
10090
10091        /*
10092         * Program 0 in CSR if port lid is extended. This prevents
10093         * 9B packets being sent out for large lids.
10094         */
10095        lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10096        c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10097                | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10098        c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10099                        << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10100              ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10101                        << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10102        write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10103
10104        /*
10105         * Iterate over all the send contexts and set their SLID check
10106         */
10107        sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10108                        SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10109               (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10110                        SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10111
10112        for (i = 0; i < dd->chip_send_contexts; i++) {
10113                hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10114                          i, (u32)sreg);
10115                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10116        }
10117
10118        /* Now we have to do the same thing for the sdma engines */
10119        sdma_update_lmc(dd, mask, lid);
10120}
10121
10122static const char *state_completed_string(u32 completed)
10123{
10124        static const char * const state_completed[] = {
10125                "EstablishComm",
10126                "OptimizeEQ",
10127                "VerifyCap"
10128        };
10129
10130        if (completed < ARRAY_SIZE(state_completed))
10131                return state_completed[completed];
10132
10133        return "unknown";
10134}
10135
10136static const char all_lanes_dead_timeout_expired[] =
10137        "All lanes were inactive – was the interconnect media removed?";
10138static const char tx_out_of_policy[] =
10139        "Passing lanes on local port do not meet the local link width policy";
10140static const char no_state_complete[] =
10141        "State timeout occurred before link partner completed the state";
10142static const char * const state_complete_reasons[] = {
10143        [0x00] = "Reason unknown",
10144        [0x01] = "Link was halted by driver, refer to LinkDownReason",
10145        [0x02] = "Link partner reported failure",
10146        [0x10] = "Unable to achieve frame sync on any lane",
10147        [0x11] =
10148          "Unable to find a common bit rate with the link partner",
10149        [0x12] =
10150          "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10151        [0x13] =
10152          "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10153        [0x14] = no_state_complete,
10154        [0x15] =
10155          "State timeout occurred before link partner identified equalization presets",
10156        [0x16] =
10157          "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10158        [0x17] = tx_out_of_policy,
10159        [0x20] = all_lanes_dead_timeout_expired,
10160        [0x21] =
10161          "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10162        [0x22] = no_state_complete,
10163        [0x23] =
10164          "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10165        [0x24] = tx_out_of_policy,
10166        [0x30] = all_lanes_dead_timeout_expired,
10167        [0x31] =
10168          "State timeout occurred waiting for host to process received frames",
10169        [0x32] = no_state_complete,
10170        [0x33] =
10171          "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10172        [0x34] = tx_out_of_policy,
10173};
10174
10175static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10176                                                     u32 code)
10177{
10178        const char *str = NULL;
10179
10180        if (code < ARRAY_SIZE(state_complete_reasons))
10181                str = state_complete_reasons[code];
10182
10183        if (str)
10184                return str;
10185        return "Reserved";
10186}
10187
10188/* describe the given last state complete frame */
10189static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10190                                  const char *prefix)
10191{
10192        struct hfi1_devdata *dd = ppd->dd;
10193        u32 success;
10194        u32 state;
10195        u32 reason;
10196        u32 lanes;
10197
10198        /*
10199         * Decode frame:
10200         *  [ 0: 0] - success
10201         *  [ 3: 1] - state
10202         *  [ 7: 4] - next state timeout
10203         *  [15: 8] - reason code
10204         *  [31:16] - lanes
10205         */
10206        success = frame & 0x1;
10207        state = (frame >> 1) & 0x7;
10208        reason = (frame >> 8) & 0xff;
10209        lanes = (frame >> 16) & 0xffff;
10210
10211        dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10212                   prefix, frame);
10213        dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10214                   state_completed_string(state), state);
10215        dd_dev_err(dd, "    state successfully completed: %s\n",
10216                   success ? "yes" : "no");
10217        dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10218                   reason, state_complete_reason_code_string(ppd, reason));
10219        dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10220}
10221
10222/*
10223 * Read the last state complete frames and explain them.  This routine
10224 * expects to be called if the link went down during link negotiation
10225 * and initialization (LNI).  That is, anywhere between polling and link up.
10226 */
10227static void check_lni_states(struct hfi1_pportdata *ppd)
10228{
10229        u32 last_local_state;
10230        u32 last_remote_state;
10231
10232        read_last_local_state(ppd->dd, &last_local_state);
10233        read_last_remote_state(ppd->dd, &last_remote_state);
10234
10235        /*
10236         * Don't report anything if there is nothing to report.  A value of
10237         * 0 means the link was taken down while polling and there was no
10238         * training in-process.
10239         */
10240        if (last_local_state == 0 && last_remote_state == 0)
10241                return;
10242
10243        decode_state_complete(ppd, last_local_state, "transmitted");
10244        decode_state_complete(ppd, last_remote_state, "received");
10245}
10246
10247/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10248static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10249{
10250        u64 reg;
10251        unsigned long timeout;
10252
10253        /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10254        timeout = jiffies + msecs_to_jiffies(wait_ms);
10255        while (1) {
10256                reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10257                if (reg)
10258                        break;
10259                if (time_after(jiffies, timeout)) {
10260                        dd_dev_err(dd,
10261                                   "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10262                        return -ETIMEDOUT;
10263                }
10264                udelay(2);
10265        }
10266        return 0;
10267}
10268
10269/* called when the logical link state is not down as it should be */
10270static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10271{
10272        struct hfi1_devdata *dd = ppd->dd;
10273
10274        /*
10275         * Bring link up in LCB loopback
10276         */
10277        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10278        write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10279                  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10280
10281        write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10282        write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10283        write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10284        write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10285
10286        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10287        (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10288        udelay(3);
10289        write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10290        write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10291
10292        wait_link_transfer_active(dd, 100);
10293
10294        /*
10295         * Bring the link down again.
10296         */
10297        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10298        write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10299        write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10300
10301        /* adjust ppd->statusp, if needed */
10302        update_statusp(ppd, IB_PORT_DOWN);
10303
10304        dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10305}
10306
10307/*
10308 * Helper for set_link_state().  Do not call except from that routine.
10309 * Expects ppd->hls_mutex to be held.
10310 *
10311 * @rem_reason value to be sent to the neighbor
10312 *
10313 * LinkDownReasons only set if transition succeeds.
10314 */
10315static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10316{
10317        struct hfi1_devdata *dd = ppd->dd;
10318        u32 previous_state;
10319        int offline_state_ret;
10320        int ret;
10321
10322        update_lcb_cache(dd);
10323
10324        previous_state = ppd->host_link_state;
10325        ppd->host_link_state = HLS_GOING_OFFLINE;
10326
10327        /* start offline transition */
10328        ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10329
10330        if (ret != HCMD_SUCCESS) {
10331                dd_dev_err(dd,
10332                           "Failed to transition to Offline link state, return %d\n",
10333                           ret);
10334                return -EINVAL;
10335        }
10336        if (ppd->offline_disabled_reason ==
10337                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10338                ppd->offline_disabled_reason =
10339                HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10340
10341        offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10342        if (offline_state_ret < 0)
10343                return offline_state_ret;
10344
10345        /* Disabling AOC transmitters */
10346        if (ppd->port_type == PORT_TYPE_QSFP &&
10347            ppd->qsfp_info.limiting_active &&
10348            qsfp_mod_present(ppd)) {
10349                int ret;
10350
10351                ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10352                if (ret == 0) {
10353                        set_qsfp_tx(ppd, 0);
10354                        release_chip_resource(dd, qsfp_resource(dd));
10355                } else {
10356                        /* not fatal, but should warn */
10357                        dd_dev_err(dd,
10358                                   "Unable to acquire lock to turn off QSFP TX\n");
10359                }
10360        }
10361
10362        /*
10363         * Wait for the offline.Quiet transition if it hasn't happened yet. It
10364         * can take a while for the link to go down.
10365         */
10366        if (offline_state_ret != PLS_OFFLINE_QUIET) {
10367                ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10368                if (ret < 0)
10369                        return ret;
10370        }
10371
10372        /*
10373         * Now in charge of LCB - must be after the physical state is
10374         * offline.quiet and before host_link_state is changed.
10375         */
10376        set_host_lcb_access(dd);
10377        write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10378
10379        /* make sure the logical state is also down */
10380        ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10381        if (ret)
10382                force_logical_link_state_down(ppd);
10383
10384        ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10385
10386        /*
10387         * The LNI has a mandatory wait time after the physical state
10388         * moves to Offline.Quiet.  The wait time may be different
10389         * depending on how the link went down.  The 8051 firmware
10390         * will observe the needed wait time and only move to ready
10391         * when that is completed.  The largest of the quiet timeouts
10392         * is 6s, so wait that long and then at least 0.5s more for
10393         * other transitions, and another 0.5s for a buffer.
10394         */
10395        ret = wait_fm_ready(dd, 7000);
10396        if (ret) {
10397                dd_dev_err(dd,
10398                           "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10399                /* state is really offline, so make it so */
10400                ppd->host_link_state = HLS_DN_OFFLINE;
10401                return ret;
10402        }
10403
10404        /*
10405         * The state is now offline and the 8051 is ready to accept host
10406         * requests.
10407         *      - change our state
10408         *      - notify others if we were previously in a linkup state
10409         */
10410        ppd->host_link_state = HLS_DN_OFFLINE;
10411        if (previous_state & HLS_UP) {
10412                /* went down while link was up */
10413                handle_linkup_change(dd, 0);
10414        } else if (previous_state
10415                        & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10416                /* went down while attempting link up */
10417                check_lni_states(ppd);
10418
10419                /* The QSFP doesn't need to be reset on LNI failure */
10420                ppd->qsfp_info.reset_needed = 0;
10421        }
10422
10423        /* the active link width (downgrade) is 0 on link down */
10424        ppd->link_width_active = 0;
10425        ppd->link_width_downgrade_tx_active = 0;
10426        ppd->link_width_downgrade_rx_active = 0;
10427        ppd->current_egress_rate = 0;
10428        return 0;
10429}
10430
10431/* return the link state name */
10432static const char *link_state_name(u32 state)
10433{
10434        const char *name;
10435        int n = ilog2(state);
10436        static const char * const names[] = {
10437                [__HLS_UP_INIT_BP]       = "INIT",
10438                [__HLS_UP_ARMED_BP]      = "ARMED",
10439                [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10440                [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10441                [__HLS_DN_POLL_BP]       = "POLL",
10442                [__HLS_DN_DISABLE_BP]    = "DISABLE",
10443                [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10444                [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10445                [__HLS_GOING_UP_BP]      = "GOING_UP",
10446                [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10447                [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10448        };
10449
10450        name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10451        return name ? name : "unknown";
10452}
10453
10454/* return the link state reason name */
10455static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10456{
10457        if (state == HLS_UP_INIT) {
10458                switch (ppd->linkinit_reason) {
10459                case OPA_LINKINIT_REASON_LINKUP:
10460                        return "(LINKUP)";
10461                case OPA_LINKINIT_REASON_FLAPPING:
10462                        return "(FLAPPING)";
10463                case OPA_LINKINIT_OUTSIDE_POLICY:
10464                        return "(OUTSIDE_POLICY)";
10465                case OPA_LINKINIT_QUARANTINED:
10466                        return "(QUARANTINED)";
10467                case OPA_LINKINIT_INSUFIC_CAPABILITY:
10468                        return "(INSUFIC_CAPABILITY)";
10469                default:
10470                        break;
10471                }
10472        }
10473        return "";
10474}
10475
10476/*
10477 * driver_pstate - convert the driver's notion of a port's
10478 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10479 * Return -1 (converted to a u32) to indicate error.
10480 */
10481u32 driver_pstate(struct hfi1_pportdata *ppd)
10482{
10483        switch (ppd->host_link_state) {
10484        case HLS_UP_INIT:
10485        case HLS_UP_ARMED:
10486        case HLS_UP_ACTIVE:
10487                return IB_PORTPHYSSTATE_LINKUP;
10488        case HLS_DN_POLL:
10489                return IB_PORTPHYSSTATE_POLLING;
10490        case HLS_DN_DISABLE:
10491                return IB_PORTPHYSSTATE_DISABLED;
10492        case HLS_DN_OFFLINE:
10493                return OPA_PORTPHYSSTATE_OFFLINE;
10494        case HLS_VERIFY_CAP:
10495                return IB_PORTPHYSSTATE_POLLING;
10496        case HLS_GOING_UP:
10497                return IB_PORTPHYSSTATE_POLLING;
10498        case HLS_GOING_OFFLINE:
10499                return OPA_PORTPHYSSTATE_OFFLINE;
10500        case HLS_LINK_COOLDOWN:
10501                return OPA_PORTPHYSSTATE_OFFLINE;
10502        case HLS_DN_DOWNDEF:
10503        default:
10504                dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10505                           ppd->host_link_state);
10506                return  -1;
10507        }
10508}
10509
10510/*
10511 * driver_lstate - convert the driver's notion of a port's
10512 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10513 * (converted to a u32) to indicate error.
10514 */
10515u32 driver_lstate(struct hfi1_pportdata *ppd)
10516{
10517        if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10518                return IB_PORT_DOWN;
10519
10520        switch (ppd->host_link_state & HLS_UP) {
10521        case HLS_UP_INIT:
10522                return IB_PORT_INIT;
10523        case HLS_UP_ARMED:
10524                return IB_PORT_ARMED;
10525        case HLS_UP_ACTIVE:
10526                return IB_PORT_ACTIVE;
10527        default:
10528                dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10529                           ppd->host_link_state);
10530        return -1;
10531        }
10532}
10533
10534void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10535                          u8 neigh_reason, u8 rem_reason)
10536{
10537        if (ppd->local_link_down_reason.latest == 0 &&
10538            ppd->neigh_link_down_reason.latest == 0) {
10539                ppd->local_link_down_reason.latest = lcl_reason;
10540                ppd->neigh_link_down_reason.latest = neigh_reason;
10541                ppd->remote_link_down_reason = rem_reason;
10542        }
10543}
10544
10545/*
10546 * Verify if BCT for data VLs is non-zero.
10547 */
10548static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10549{
10550        return !!ppd->actual_vls_operational;
10551}
10552
10553/*
10554 * Change the physical and/or logical link state.
10555 *
10556 * Do not call this routine while inside an interrupt.  It contains
10557 * calls to routines that can take multiple seconds to finish.
10558 *
10559 * Returns 0 on success, -errno on failure.
10560 */
10561int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10562{
10563        struct hfi1_devdata *dd = ppd->dd;
10564        struct ib_event event = {.device = NULL};
10565        int ret1, ret = 0;
10566        int orig_new_state, poll_bounce;
10567
10568        mutex_lock(&ppd->hls_lock);
10569
10570        orig_new_state = state;
10571        if (state == HLS_DN_DOWNDEF)
10572                state = dd->link_default;
10573
10574        /* interpret poll -> poll as a link bounce */
10575        poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10576                      state == HLS_DN_POLL;
10577
10578        dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10579                    link_state_name(ppd->host_link_state),
10580                    link_state_name(orig_new_state),
10581                    poll_bounce ? "(bounce) " : "",
10582                    link_state_reason_name(ppd, state));
10583
10584        /*
10585         * If we're going to a (HLS_*) link state that implies the logical
10586         * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10587         * reset is_sm_config_started to 0.
10588         */
10589        if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10590                ppd->is_sm_config_started = 0;
10591
10592        /*
10593         * Do nothing if the states match.  Let a poll to poll link bounce
10594         * go through.
10595         */
10596        if (ppd->host_link_state == state && !poll_bounce)
10597                goto done;
10598
10599        switch (state) {
10600        case HLS_UP_INIT:
10601                if (ppd->host_link_state == HLS_DN_POLL &&
10602                    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10603                        /*
10604                         * Quick link up jumps from polling to here.
10605                         *
10606                         * Whether in normal or loopback mode, the
10607                         * simulator jumps from polling to link up.
10608                         * Accept that here.
10609                         */
10610                        /* OK */
10611                } else if (ppd->host_link_state != HLS_GOING_UP) {
10612                        goto unexpected;
10613                }
10614
10615                /*
10616                 * Wait for Link_Up physical state.
10617                 * Physical and Logical states should already be
10618                 * be transitioned to LinkUp and LinkInit respectively.
10619                 */
10620                ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10621                if (ret) {
10622                        dd_dev_err(dd,
10623                                   "%s: physical state did not change to LINK-UP\n",
10624                                   __func__);
10625                        break;
10626                }
10627
10628                ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10629                if (ret) {
10630                        dd_dev_err(dd,
10631                                   "%s: logical state did not change to INIT\n",
10632                                   __func__);
10633                        break;
10634                }
10635
10636                /* clear old transient LINKINIT_REASON code */
10637                if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10638                        ppd->linkinit_reason =
10639                                OPA_LINKINIT_REASON_LINKUP;
10640
10641                /* enable the port */
10642                add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10643
10644                handle_linkup_change(dd, 1);
10645                ppd->host_link_state = HLS_UP_INIT;
10646                break;
10647        case HLS_UP_ARMED:
10648                if (ppd->host_link_state != HLS_UP_INIT)
10649                        goto unexpected;
10650
10651                if (!data_vls_operational(ppd)) {
10652                        dd_dev_err(dd,
10653                                   "%s: data VLs not operational\n", __func__);
10654                        ret = -EINVAL;
10655                        break;
10656                }
10657
10658                set_logical_state(dd, LSTATE_ARMED);
10659                ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10660                if (ret) {
10661                        dd_dev_err(dd,
10662                                   "%s: logical state did not change to ARMED\n",
10663                                   __func__);
10664                        break;
10665                }
10666                ppd->host_link_state = HLS_UP_ARMED;
10667                /*
10668                 * The simulator does not currently implement SMA messages,
10669                 * so neighbor_normal is not set.  Set it here when we first
10670                 * move to Armed.
10671                 */
10672                if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10673                        ppd->neighbor_normal = 1;
10674                break;
10675        case HLS_UP_ACTIVE:
10676                if (ppd->host_link_state != HLS_UP_ARMED)
10677                        goto unexpected;
10678
10679                set_logical_state(dd, LSTATE_ACTIVE);
10680                ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10681                if (ret) {
10682                        dd_dev_err(dd,
10683                                   "%s: logical state did not change to ACTIVE\n",
10684                                   __func__);
10685                } else {
10686                        /* tell all engines to go running */
10687                        sdma_all_running(dd);
10688                        ppd->host_link_state = HLS_UP_ACTIVE;
10689
10690                        /* Signal the IB layer that the port has went active */
10691                        event.device = &dd->verbs_dev.rdi.ibdev;
10692                        event.element.port_num = ppd->port;
10693                        event.event = IB_EVENT_PORT_ACTIVE;
10694                }
10695                break;
10696        case HLS_DN_POLL:
10697                if ((ppd->host_link_state == HLS_DN_DISABLE ||
10698                     ppd->host_link_state == HLS_DN_OFFLINE) &&
10699                    dd->dc_shutdown)
10700                        dc_start(dd);
10701                /* Hand LED control to the DC */
10702                write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10703
10704                if (ppd->host_link_state != HLS_DN_OFFLINE) {
10705                        u8 tmp = ppd->link_enabled;
10706
10707                        ret = goto_offline(ppd, ppd->remote_link_down_reason);
10708                        if (ret) {
10709                                ppd->link_enabled = tmp;
10710                                break;
10711                        }
10712                        ppd->remote_link_down_reason = 0;
10713
10714                        if (ppd->driver_link_ready)
10715                                ppd->link_enabled = 1;
10716                }
10717
10718                set_all_slowpath(ppd->dd);
10719                ret = set_local_link_attributes(ppd);
10720                if (ret)
10721                        break;
10722
10723                ppd->port_error_action = 0;
10724                ppd->host_link_state = HLS_DN_POLL;
10725
10726                if (quick_linkup) {
10727                        /* quick linkup does not go into polling */
10728                        ret = do_quick_linkup(dd);
10729                } else {
10730                        ret1 = set_physical_link_state(dd, PLS_POLLING);
10731                        if (ret1 != HCMD_SUCCESS) {
10732                                dd_dev_err(dd,
10733                                           "Failed to transition to Polling link state, return 0x%x\n",
10734                                           ret1);
10735                                ret = -EINVAL;
10736                        }
10737                }
10738                ppd->offline_disabled_reason =
10739                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10740                /*
10741                 * If an error occurred above, go back to offline.  The
10742                 * caller may reschedule another attempt.
10743                 */
10744                if (ret)
10745                        goto_offline(ppd, 0);
10746                else
10747                        log_physical_state(ppd, PLS_POLLING);
10748                break;
10749        case HLS_DN_DISABLE:
10750                /* link is disabled */
10751                ppd->link_enabled = 0;
10752
10753                /* allow any state to transition to disabled */
10754
10755                /* must transition to offline first */
10756                if (ppd->host_link_state != HLS_DN_OFFLINE) {
10757                        ret = goto_offline(ppd, ppd->remote_link_down_reason);
10758                        if (ret)
10759                                break;
10760                        ppd->remote_link_down_reason = 0;
10761                }
10762
10763                if (!dd->dc_shutdown) {
10764                        ret1 = set_physical_link_state(dd, PLS_DISABLED);
10765                        if (ret1 != HCMD_SUCCESS) {
10766                                dd_dev_err(dd,
10767                                           "Failed to transition to Disabled link state, return 0x%x\n",
10768                                           ret1);
10769                                ret = -EINVAL;
10770                                break;
10771                        }
10772                        ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10773                        if (ret) {
10774                                dd_dev_err(dd,
10775                                           "%s: physical state did not change to DISABLED\n",
10776                                           __func__);
10777                                break;
10778                        }
10779                        dc_shutdown(dd);
10780                }
10781                ppd->host_link_state = HLS_DN_DISABLE;
10782                break;
10783        case HLS_DN_OFFLINE:
10784                if (ppd->host_link_state == HLS_DN_DISABLE)
10785                        dc_start(dd);
10786
10787                /* allow any state to transition to offline */
10788                ret = goto_offline(ppd, ppd->remote_link_down_reason);
10789                if (!ret)
10790                        ppd->remote_link_down_reason = 0;
10791                break;
10792        case HLS_VERIFY_CAP:
10793                if (ppd->host_link_state != HLS_DN_POLL)
10794                        goto unexpected;
10795                ppd->host_link_state = HLS_VERIFY_CAP;
10796                log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10797                break;
10798        case HLS_GOING_UP:
10799                if (ppd->host_link_state != HLS_VERIFY_CAP)
10800                        goto unexpected;
10801
10802                ret1 = set_physical_link_state(dd, PLS_LINKUP);
10803                if (ret1 != HCMD_SUCCESS) {
10804                        dd_dev_err(dd,
10805                                   "Failed to transition to link up state, return 0x%x\n",
10806                                   ret1);
10807                        ret = -EINVAL;
10808                        break;
10809                }
10810                ppd->host_link_state = HLS_GOING_UP;
10811                break;
10812
10813        case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10814        case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10815        default:
10816                dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10817                            __func__, state);
10818                ret = -EINVAL;
10819                break;
10820        }
10821
10822        goto done;
10823
10824unexpected:
10825        dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10826                   __func__, link_state_name(ppd->host_link_state),
10827                   link_state_name(state));
10828        ret = -EINVAL;
10829
10830done:
10831        mutex_unlock(&ppd->hls_lock);
10832
10833        if (event.device)
10834                ib_dispatch_event(&event);
10835
10836        return ret;
10837}
10838
10839int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10840{
10841        u64 reg;
10842        int ret = 0;
10843
10844        switch (which) {
10845        case HFI1_IB_CFG_LIDLMC:
10846                set_lidlmc(ppd);
10847                break;
10848        case HFI1_IB_CFG_VL_HIGH_LIMIT:
10849                /*
10850                 * The VL Arbitrator high limit is sent in units of 4k
10851                 * bytes, while HFI stores it in units of 64 bytes.
10852                 */
10853                val *= 4096 / 64;
10854                reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10855                        << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10856                write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10857                break;
10858        case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10859                /* HFI only supports POLL as the default link down state */
10860                if (val != HLS_DN_POLL)
10861                        ret = -EINVAL;
10862                break;
10863        case HFI1_IB_CFG_OP_VLS:
10864                if (ppd->vls_operational != val) {
10865                        ppd->vls_operational = val;
10866                        if (!ppd->port)
10867                                ret = -EINVAL;
10868                }
10869                break;
10870        /*
10871         * For link width, link width downgrade, and speed enable, always AND
10872         * the setting with what is actually supported.  This has two benefits.
10873         * First, enabled can't have unsupported values, no matter what the
10874         * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10875         * "fill in with your supported value" have all the bits in the
10876         * field set, so simply ANDing with supported has the desired result.
10877         */
10878        case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10879                ppd->link_width_enabled = val & ppd->link_width_supported;
10880                break;
10881        case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10882                ppd->link_width_downgrade_enabled =
10883                                val & ppd->link_width_downgrade_supported;
10884                break;
10885        case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10886                ppd->link_speed_enabled = val & ppd->link_speed_supported;
10887                break;
10888        case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10889                /*
10890                 * HFI does not follow IB specs, save this value
10891                 * so we can report it, if asked.
10892                 */
10893                ppd->overrun_threshold = val;
10894                break;
10895        case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10896                /*
10897                 * HFI does not follow IB specs, save this value
10898                 * so we can report it, if asked.
10899                 */
10900                ppd->phy_error_threshold = val;
10901                break;
10902
10903        case HFI1_IB_CFG_MTU:
10904                set_send_length(ppd);
10905                break;
10906
10907        case HFI1_IB_CFG_PKEYS:
10908                if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10909                        set_partition_keys(ppd);
10910                break;
10911
10912        default:
10913                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10914                        dd_dev_info(ppd->dd,
10915                                    "%s: which %s, val 0x%x: not implemented\n",
10916                                    __func__, ib_cfg_name(which), val);
10917                break;
10918        }
10919        return ret;
10920}
10921
10922/* begin functions related to vl arbitration table caching */
10923static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10924{
10925        int i;
10926
10927        BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10928                        VL_ARB_LOW_PRIO_TABLE_SIZE);
10929        BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10930                        VL_ARB_HIGH_PRIO_TABLE_SIZE);
10931
10932        /*
10933         * Note that we always return values directly from the
10934         * 'vl_arb_cache' (and do no CSR reads) in response to a
10935         * 'Get(VLArbTable)'. This is obviously correct after a
10936         * 'Set(VLArbTable)', since the cache will then be up to
10937         * date. But it's also correct prior to any 'Set(VLArbTable)'
10938         * since then both the cache, and the relevant h/w registers
10939         * will be zeroed.
10940         */
10941
10942        for (i = 0; i < MAX_PRIO_TABLE; i++)
10943                spin_lock_init(&ppd->vl_arb_cache[i].lock);
10944}
10945
10946/*
10947 * vl_arb_lock_cache
10948 *
10949 * All other vl_arb_* functions should be called only after locking
10950 * the cache.
10951 */
10952static inline struct vl_arb_cache *
10953vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10954{
10955        if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10956                return NULL;
10957        spin_lock(&ppd->vl_arb_cache[idx].lock);
10958        return &ppd->vl_arb_cache[idx];
10959}
10960
10961static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10962{
10963        spin_unlock(&ppd->vl_arb_cache[idx].lock);
10964}
10965
10966static void vl_arb_get_cache(struct vl_arb_cache *cache,
10967                             struct ib_vl_weight_elem *vl)
10968{
10969        memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10970}
10971
10972static void vl_arb_set_cache(struct vl_arb_cache *cache,
10973                             struct ib_vl_weight_elem *vl)
10974{
10975        memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10976}
10977
10978static int vl_arb_match_cache(struct vl_arb_cache *cache,
10979                              struct ib_vl_weight_elem *vl)
10980{
10981        return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10982}
10983
10984/* end functions related to vl arbitration table caching */
10985
10986static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10987                          u32 size, struct ib_vl_weight_elem *vl)
10988{
10989        struct hfi1_devdata *dd = ppd->dd;
10990        u64 reg;
10991        unsigned int i, is_up = 0;
10992        int drain, ret = 0;
10993
10994        mutex_lock(&ppd->hls_lock);
10995
10996        if (ppd->host_link_state & HLS_UP)
10997                is_up = 1;
10998
10999        drain = !is_ax(dd) && is_up;
11000
11001        if (drain)
11002                /*
11003                 * Before adjusting VL arbitration weights, empty per-VL
11004                 * FIFOs, otherwise a packet whose VL weight is being
11005                 * set to 0 could get stuck in a FIFO with no chance to
11006                 * egress.
11007                 */
11008                ret = stop_drain_data_vls(dd);
11009
11010        if (ret) {
11011                dd_dev_err(
11012                        dd,
11013                        "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11014                        __func__);
11015                goto err;
11016        }
11017
11018        for (i = 0; i < size; i++, vl++) {
11019                /*
11020                 * NOTE: The low priority shift and mask are used here, but
11021                 * they are the same for both the low and high registers.
11022                 */
11023                reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11024                                << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11025                      | (((u64)vl->weight
11026                                & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11027                                << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11028                write_csr(dd, target + (i * 8), reg);
11029        }
11030        pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11031
11032        if (drain)
11033                open_fill_data_vls(dd); /* reopen all VLs */
11034
11035err:
11036        mutex_unlock(&ppd->hls_lock);
11037
11038        return ret;
11039}
11040
11041/*
11042 * Read one credit merge VL register.
11043 */
11044static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11045                           struct vl_limit *vll)
11046{
11047        u64 reg = read_csr(dd, csr);
11048
11049        vll->dedicated = cpu_to_be16(
11050                (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11051                & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11052        vll->shared = cpu_to_be16(
11053                (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11054                & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11055}
11056
11057/*
11058 * Read the current credit merge limits.
11059 */
11060static int get_buffer_control(struct hfi1_devdata *dd,
11061                              struct buffer_control *bc, u16 *overall_limit)
11062{
11063        u64 reg;
11064        int i;
11065
11066        /* not all entries are filled in */
11067        memset(bc, 0, sizeof(*bc));
11068
11069        /* OPA and HFI have a 1-1 mapping */
11070        for (i = 0; i < TXE_NUM_DATA_VL; i++)
11071                read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11072
11073        /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11074        read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11075
11076        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11077        bc->overall_shared_limit = cpu_to_be16(
11078                (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11079                & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11080        if (overall_limit)
11081                *overall_limit = (reg
11082                        >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11083                        & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11084        return sizeof(struct buffer_control);
11085}
11086
11087static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11088{
11089        u64 reg;
11090        int i;
11091
11092        /* each register contains 16 SC->VLnt mappings, 4 bits each */
11093        reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11094        for (i = 0; i < sizeof(u64); i++) {
11095                u8 byte = *(((u8 *)&reg) + i);
11096
11097                dp->vlnt[2 * i] = byte & 0xf;
11098                dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11099        }
11100
11101        reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11102        for (i = 0; i < sizeof(u64); i++) {
11103                u8 byte = *(((u8 *)&reg) + i);
11104
11105                dp->vlnt[16 + (2 * i)] = byte & 0xf;
11106                dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11107        }
11108        return sizeof(struct sc2vlnt);
11109}
11110
11111static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11112                              struct ib_vl_weight_elem *vl)
11113{
11114        unsigned int i;
11115
11116        for (i = 0; i < nelems; i++, vl++) {
11117                vl->vl = 0xf;
11118                vl->weight = 0;
11119        }
11120}
11121
11122static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11123{
11124        write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11125                  DC_SC_VL_VAL(15_0,
11126                               0, dp->vlnt[0] & 0xf,
11127                               1, dp->vlnt[1] & 0xf,
11128                               2, dp->vlnt[2] & 0xf,
11129                               3, dp->vlnt[3] & 0xf,
11130                               4, dp->vlnt[4] & 0xf,
11131                               5, dp->vlnt[5] & 0xf,
11132                               6, dp->vlnt[6] & 0xf,
11133                               7, dp->vlnt[7] & 0xf,
11134                               8, dp->vlnt[8] & 0xf,
11135                               9, dp->vlnt[9] & 0xf,
11136                               10, dp->vlnt[10] & 0xf,
11137                               11, dp->vlnt[11] & 0xf,
11138                               12, dp->vlnt[12] & 0xf,
11139                               13, dp->vlnt[13] & 0xf,
11140                               14, dp->vlnt[14] & 0xf,
11141                               15, dp->vlnt[15] & 0xf));
11142        write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11143                  DC_SC_VL_VAL(31_16,
11144                               16, dp->vlnt[16] & 0xf,
11145                               17, dp->vlnt[17] & 0xf,
11146                               18, dp->vlnt[18] & 0xf,
11147                               19, dp->vlnt[19] & 0xf,
11148                               20, dp->vlnt[20] & 0xf,
11149                               21, dp->vlnt[21] & 0xf,
11150                               22, dp->vlnt[22] & 0xf,
11151                               23, dp->vlnt[23] & 0xf,
11152                               24, dp->vlnt[24] & 0xf,
11153                               25, dp->vlnt[25] & 0xf,
11154                               26, dp->vlnt[26] & 0xf,
11155                               27, dp->vlnt[27] & 0xf,
11156                               28, dp->vlnt[28] & 0xf,
11157                               29, dp->vlnt[29] & 0xf,
11158                               30, dp->vlnt[30] & 0xf,
11159                               31, dp->vlnt[31] & 0xf));
11160}
11161
11162static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11163                        u16 limit)
11164{
11165        if (limit != 0)
11166                dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11167                            what, (int)limit, idx);
11168}
11169
11170/* change only the shared limit portion of SendCmGLobalCredit */
11171static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11172{
11173        u64 reg;
11174
11175        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11176        reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11177        reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11178        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11179}
11180
11181/* change only the total credit limit portion of SendCmGLobalCredit */
11182static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11183{
11184        u64 reg;
11185
11186        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11187        reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11188        reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11189        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11190}
11191
11192/* set the given per-VL shared limit */
11193static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11194{
11195        u64 reg;
11196        u32 addr;
11197
11198        if (vl < TXE_NUM_DATA_VL)
11199                addr = SEND_CM_CREDIT_VL + (8 * vl);
11200        else
11201                addr = SEND_CM_CREDIT_VL15;
11202
11203        reg = read_csr(dd, addr);
11204        reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11205        reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11206        write_csr(dd, addr, reg);
11207}
11208
11209/* set the given per-VL dedicated limit */
11210static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11211{
11212        u64 reg;
11213        u32 addr;
11214
11215        if (vl < TXE_NUM_DATA_VL)
11216                addr = SEND_CM_CREDIT_VL + (8 * vl);
11217        else
11218                addr = SEND_CM_CREDIT_VL15;
11219
11220        reg = read_csr(dd, addr);
11221        reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11222        reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11223        write_csr(dd, addr, reg);
11224}
11225
11226/* spin until the given per-VL status mask bits clear */
11227static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11228                                     const char *which)
11229{
11230        unsigned long timeout;
11231        u64 reg;
11232
11233        timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11234        while (1) {
11235                reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11236
11237                if (reg == 0)
11238                        return; /* success */
11239                if (time_after(jiffies, timeout))
11240                        break;          /* timed out */
11241                udelay(1);
11242        }
11243
11244        dd_dev_err(dd,
11245                   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11246                   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11247        /*
11248         * If this occurs, it is likely there was a credit loss on the link.
11249         * The only recovery from that is a link bounce.
11250         */
11251        dd_dev_err(dd,
11252                   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11253}
11254
11255/*
11256 * The number of credits on the VLs may be changed while everything
11257 * is "live", but the following algorithm must be followed due to
11258 * how the hardware is actually implemented.  In particular,
11259 * Return_Credit_Status[] is the only correct status check.
11260 *
11261 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11262 *     set Global_Shared_Credit_Limit = 0
11263 *     use_all_vl = 1
11264 * mask0 = all VLs that are changing either dedicated or shared limits
11265 * set Shared_Limit[mask0] = 0
11266 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11267 * if (changing any dedicated limit)
11268 *     mask1 = all VLs that are lowering dedicated limits
11269 *     lower Dedicated_Limit[mask1]
11270 *     spin until Return_Credit_Status[mask1] == 0
11271 *     raise Dedicated_Limits
11272 * raise Shared_Limits
11273 * raise Global_Shared_Credit_Limit
11274 *
11275 * lower = if the new limit is lower, set the limit to the new value
11276 * raise = if the new limit is higher than the current value (may be changed
11277 *      earlier in the algorithm), set the new limit to the new value
11278 */
11279int set_buffer_control(struct hfi1_pportdata *ppd,
11280                       struct buffer_control *new_bc)
11281{
11282        struct hfi1_devdata *dd = ppd->dd;
11283        u64 changing_mask, ld_mask, stat_mask;
11284        int change_count;
11285        int i, use_all_mask;
11286        int this_shared_changing;
11287        int vl_count = 0, ret;
11288        /*
11289         * A0: add the variable any_shared_limit_changing below and in the
11290         * algorithm above.  If removing A0 support, it can be removed.
11291         */
11292        int any_shared_limit_changing;
11293        struct buffer_control cur_bc;
11294        u8 changing[OPA_MAX_VLS];
11295        u8 lowering_dedicated[OPA_MAX_VLS];
11296        u16 cur_total;
11297        u32 new_total = 0;
11298        const u64 all_mask =
11299        SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11300         | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11301         | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11302         | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11303         | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11304         | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11305         | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11306         | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11307         | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11308
11309#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11310#define NUM_USABLE_VLS 16       /* look at VL15 and less */
11311
11312        /* find the new total credits, do sanity check on unused VLs */
11313        for (i = 0; i < OPA_MAX_VLS; i++) {
11314                if (valid_vl(i)) {
11315                        new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11316                        continue;
11317                }
11318                nonzero_msg(dd, i, "dedicated",
11319                            be16_to_cpu(new_bc->vl[i].dedicated));
11320                nonzero_msg(dd, i, "shared",
11321                            be16_to_cpu(new_bc->vl[i].shared));
11322                new_bc->vl[i].dedicated = 0;
11323                new_bc->vl[i].shared = 0;
11324        }
11325        new_total += be16_to_cpu(new_bc->overall_shared_limit);
11326
11327        /* fetch the current values */
11328        get_buffer_control(dd, &cur_bc, &cur_total);
11329
11330        /*
11331         * Create the masks we will use.
11332         */
11333        memset(changing, 0, sizeof(changing));
11334        memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11335        /*
11336         * NOTE: Assumes that the individual VL bits are adjacent and in
11337         * increasing order
11338         */
11339        stat_mask =
11340                SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11341        changing_mask = 0;
11342        ld_mask = 0;
11343        change_count = 0;
11344        any_shared_limit_changing = 0;
11345        for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11346                if (!valid_vl(i))
11347                        continue;
11348                this_shared_changing = new_bc->vl[i].shared
11349                                                != cur_bc.vl[i].shared;
11350                if (this_shared_changing)
11351                        any_shared_limit_changing = 1;
11352                if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11353                    this_shared_changing) {
11354                        changing[i] = 1;
11355                        changing_mask |= stat_mask;
11356                        change_count++;
11357                }
11358                if (be16_to_cpu(new_bc->vl[i].dedicated) <
11359                                        be16_to_cpu(cur_bc.vl[i].dedicated)) {
11360                        lowering_dedicated[i] = 1;
11361                        ld_mask |= stat_mask;
11362                }
11363        }
11364
11365        /* bracket the credit change with a total adjustment */
11366        if (new_total > cur_total)
11367                set_global_limit(dd, new_total);
11368
11369        /*
11370         * Start the credit change algorithm.
11371         */
11372        use_all_mask = 0;
11373        if ((be16_to_cpu(new_bc->overall_shared_limit) <
11374             be16_to_cpu(cur_bc.overall_shared_limit)) ||
11375            (is_ax(dd) && any_shared_limit_changing)) {
11376                set_global_shared(dd, 0);
11377                cur_bc.overall_shared_limit = 0;
11378                use_all_mask = 1;
11379        }
11380
11381        for (i = 0; i < NUM_USABLE_VLS; i++) {
11382                if (!valid_vl(i))
11383                        continue;
11384
11385                if (changing[i]) {
11386                        set_vl_shared(dd, i, 0);
11387                        cur_bc.vl[i].shared = 0;
11388                }
11389        }
11390
11391        wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11392                                 "shared");
11393
11394        if (change_count > 0) {
11395                for (i = 0; i < NUM_USABLE_VLS; i++) {
11396                        if (!valid_vl(i))
11397                                continue;
11398
11399                        if (lowering_dedicated[i]) {
11400                                set_vl_dedicated(dd, i,
11401                                                 be16_to_cpu(new_bc->
11402                                                             vl[i].dedicated));
11403                                cur_bc.vl[i].dedicated =
11404                                                new_bc->vl[i].dedicated;
11405                        }
11406                }
11407
11408                wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11409
11410                /* now raise all dedicated that are going up */
11411                for (i = 0; i < NUM_USABLE_VLS; i++) {
11412                        if (!valid_vl(i))
11413                                continue;
11414
11415                        if (be16_to_cpu(new_bc->vl[i].dedicated) >
11416                                        be16_to_cpu(cur_bc.vl[i].dedicated))
11417                                set_vl_dedicated(dd, i,
11418                                                 be16_to_cpu(new_bc->
11419                                                             vl[i].dedicated));
11420                }
11421        }
11422
11423        /* next raise all shared that are going up */
11424        for (i = 0; i < NUM_USABLE_VLS; i++) {
11425                if (!valid_vl(i))
11426                        continue;
11427
11428                if (be16_to_cpu(new_bc->vl[i].shared) >
11429                                be16_to_cpu(cur_bc.vl[i].shared))
11430                        set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11431        }
11432
11433        /* finally raise the global shared */
11434        if (be16_to_cpu(new_bc->overall_shared_limit) >
11435            be16_to_cpu(cur_bc.overall_shared_limit))
11436                set_global_shared(dd,
11437                                  be16_to_cpu(new_bc->overall_shared_limit));
11438
11439        /* bracket the credit change with a total adjustment */
11440        if (new_total < cur_total)
11441                set_global_limit(dd, new_total);
11442
11443        /*
11444         * Determine the actual number of operational VLS using the number of
11445         * dedicated and shared credits for each VL.
11446         */
11447        if (change_count > 0) {
11448                for (i = 0; i < TXE_NUM_DATA_VL; i++)
11449                        if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11450                            be16_to_cpu(new_bc->vl[i].shared) > 0)
11451                                vl_count++;
11452                ppd->actual_vls_operational = vl_count;
11453                ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11454                                    ppd->actual_vls_operational :
11455                                    ppd->vls_operational,
11456                                    NULL);
11457                if (ret == 0)
11458                        ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11459                                           ppd->actual_vls_operational :
11460                                           ppd->vls_operational, NULL);
11461                if (ret)
11462                        return ret;
11463        }
11464        return 0;
11465}
11466
11467/*
11468 * Read the given fabric manager table. Return the size of the
11469 * table (in bytes) on success, and a negative error code on
11470 * failure.
11471 */
11472int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11473
11474{
11475        int size;
11476        struct vl_arb_cache *vlc;
11477
11478        switch (which) {
11479        case FM_TBL_VL_HIGH_ARB:
11480                size = 256;
11481                /*
11482                 * OPA specifies 128 elements (of 2 bytes each), though
11483                 * HFI supports only 16 elements in h/w.
11484                 */
11485                vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11486                vl_arb_get_cache(vlc, t);
11487                vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11488                break;
11489        case FM_TBL_VL_LOW_ARB:
11490                size = 256;
11491                /*
11492                 * OPA specifies 128 elements (of 2 bytes each), though
11493                 * HFI supports only 16 elements in h/w.
11494                 */
11495                vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11496                vl_arb_get_cache(vlc, t);
11497                vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11498                break;
11499        case FM_TBL_BUFFER_CONTROL:
11500                size = get_buffer_control(ppd->dd, t, NULL);
11501                break;
11502        case FM_TBL_SC2VLNT:
11503                size = get_sc2vlnt(ppd->dd, t);
11504                break;
11505        case FM_TBL_VL_PREEMPT_ELEMS:
11506                size = 256;
11507                /* OPA specifies 128 elements, of 2 bytes each */
11508                get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11509                break;
11510        case FM_TBL_VL_PREEMPT_MATRIX:
11511                size = 256;
11512                /*
11513                 * OPA specifies that this is the same size as the VL
11514                 * arbitration tables (i.e., 256 bytes).
11515                 */
11516                break;
11517        default:
11518                return -EINVAL;
11519        }
11520        return size;
11521}
11522
11523/*
11524 * Write the given fabric manager table.
11525 */
11526int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11527{
11528        int ret = 0;
11529        struct vl_arb_cache *vlc;
11530
11531        switch (which) {
11532        case FM_TBL_VL_HIGH_ARB:
11533                vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11534                if (vl_arb_match_cache(vlc, t)) {
11535                        vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11536                        break;
11537                }
11538                vl_arb_set_cache(vlc, t);
11539                vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11540                ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11541                                     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11542                break;
11543        case FM_TBL_VL_LOW_ARB:
11544                vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11545                if (vl_arb_match_cache(vlc, t)) {
11546                        vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11547                        break;
11548                }
11549                vl_arb_set_cache(vlc, t);
11550                vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11551                ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11552                                     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11553                break;
11554        case FM_TBL_BUFFER_CONTROL:
11555                ret = set_buffer_control(ppd, t);
11556                break;
11557        case FM_TBL_SC2VLNT:
11558                set_sc2vlnt(ppd->dd, t);
11559                break;
11560        default:
11561                ret = -EINVAL;
11562        }
11563        return ret;
11564}
11565
11566/*
11567 * Disable all data VLs.
11568 *
11569 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11570 */
11571static int disable_data_vls(struct hfi1_devdata *dd)
11572{
11573        if (is_ax(dd))
11574                return 1;
11575
11576        pio_send_control(dd, PSC_DATA_VL_DISABLE);
11577
11578        return 0;
11579}
11580
11581/*
11582 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11583 * Just re-enables all data VLs (the "fill" part happens
11584 * automatically - the name was chosen for symmetry with
11585 * stop_drain_data_vls()).
11586 *
11587 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11588 */
11589int open_fill_data_vls(struct hfi1_devdata *dd)
11590{
11591        if (is_ax(dd))
11592                return 1;
11593
11594        pio_send_control(dd, PSC_DATA_VL_ENABLE);
11595
11596        return 0;
11597}
11598
11599/*
11600 * drain_data_vls() - assumes that disable_data_vls() has been called,
11601 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11602 * engines to drop to 0.
11603 */
11604static void drain_data_vls(struct hfi1_devdata *dd)
11605{
11606        sc_wait(dd);
11607        sdma_wait(dd);
11608        pause_for_credit_return(dd);
11609}
11610
11611/*
11612 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11613 *
11614 * Use open_fill_data_vls() to resume using data VLs.  This pair is
11615 * meant to be used like this:
11616 *
11617 * stop_drain_data_vls(dd);
11618 * // do things with per-VL resources
11619 * open_fill_data_vls(dd);
11620 */
11621int stop_drain_data_vls(struct hfi1_devdata *dd)
11622{
11623        int ret;
11624
11625        ret = disable_data_vls(dd);
11626        if (ret == 0)
11627                drain_data_vls(dd);
11628
11629        return ret;
11630}
11631
11632/*
11633 * Convert a nanosecond time to a cclock count.  No matter how slow
11634 * the cclock, a non-zero ns will always have a non-zero result.
11635 */
11636u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11637{
11638        u32 cclocks;
11639
11640        if (dd->icode == ICODE_FPGA_EMULATION)
11641                cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11642        else  /* simulation pretends to be ASIC */
11643                cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11644        if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11645                cclocks = 1;
11646        return cclocks;
11647}
11648
11649/*
11650 * Convert a cclock count to nanoseconds. Not matter how slow
11651 * the cclock, a non-zero cclocks will always have a non-zero result.
11652 */
11653u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11654{
11655        u32 ns;
11656
11657        if (dd->icode == ICODE_FPGA_EMULATION)
11658                ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11659        else  /* simulation pretends to be ASIC */
11660                ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11661        if (cclocks && !ns)
11662                ns = 1;
11663        return ns;
11664}
11665
11666/*
11667 * Dynamically adjust the receive interrupt timeout for a context based on
11668 * incoming packet rate.
11669 *
11670 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11671 */
11672static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11673{
11674        struct hfi1_devdata *dd = rcd->dd;
11675        u32 timeout = rcd->rcvavail_timeout;
11676
11677        /*
11678         * This algorithm doubles or halves the timeout depending on whether
11679         * the number of packets received in this interrupt were less than or
11680         * greater equal the interrupt count.
11681         *
11682         * The calculations below do not allow a steady state to be achieved.
11683         * Only at the endpoints it is possible to have an unchanging
11684         * timeout.
11685         */
11686        if (npkts < rcv_intr_count) {
11687                /*
11688                 * Not enough packets arrived before the timeout, adjust
11689                 * timeout downward.
11690                 */
11691                if (timeout < 2) /* already at minimum? */
11692                        return;
11693                timeout >>= 1;
11694        } else {
11695                /*
11696                 * More than enough packets arrived before the timeout, adjust
11697                 * timeout upward.
11698                 */
11699                if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11700                        return;
11701                timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11702        }
11703
11704        rcd->rcvavail_timeout = timeout;
11705        /*
11706         * timeout cannot be larger than rcv_intr_timeout_csr which has already
11707         * been verified to be in range
11708         */
11709        write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11710                        (u64)timeout <<
11711                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11712}
11713
11714void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11715                    u32 intr_adjust, u32 npkts)
11716{
11717        struct hfi1_devdata *dd = rcd->dd;
11718        u64 reg;
11719        u32 ctxt = rcd->ctxt;
11720
11721        /*
11722         * Need to write timeout register before updating RcvHdrHead to ensure
11723         * that a new value is used when the HW decides to restart counting.
11724         */
11725        if (intr_adjust)
11726                adjust_rcv_timeout(rcd, npkts);
11727        if (updegr) {
11728                reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11729                        << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11730                write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11731        }
11732        mmiowb();
11733        reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11734                (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11735                        << RCV_HDR_HEAD_HEAD_SHIFT);
11736        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11737        mmiowb();
11738}
11739
11740u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11741{
11742        u32 head, tail;
11743
11744        head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11745                & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11746
11747        if (rcd->rcvhdrtail_kvaddr)
11748                tail = get_rcvhdrtail(rcd);
11749        else
11750                tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11751
11752        return head == tail;
11753}
11754
11755/*
11756 * Context Control and Receive Array encoding for buffer size:
11757 *      0x0 invalid
11758 *      0x1   4 KB
11759 *      0x2   8 KB
11760 *      0x3  16 KB
11761 *      0x4  32 KB
11762 *      0x5  64 KB
11763 *      0x6 128 KB
11764 *      0x7 256 KB
11765 *      0x8 512 KB (Receive Array only)
11766 *      0x9   1 MB (Receive Array only)
11767 *      0xa   2 MB (Receive Array only)
11768 *
11769 *      0xB-0xF - reserved (Receive Array only)
11770 *
11771 *
11772 * This routine assumes that the value has already been sanity checked.
11773 */
11774static u32 encoded_size(u32 size)
11775{
11776        switch (size) {
11777        case   4 * 1024: return 0x1;
11778        case   8 * 1024: return 0x2;
11779        case  16 * 1024: return 0x3;
11780        case  32 * 1024: return 0x4;
11781        case  64 * 1024: return 0x5;
11782        case 128 * 1024: return 0x6;
11783        case 256 * 1024: return 0x7;
11784        case 512 * 1024: return 0x8;
11785        case   1 * 1024 * 1024: return 0x9;
11786        case   2 * 1024 * 1024: return 0xa;
11787        }
11788        return 0x1;     /* if invalid, go with the minimum size */
11789}
11790
11791void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11792                  struct hfi1_ctxtdata *rcd)
11793{
11794        u64 rcvctrl, reg;
11795        int did_enable = 0;
11796        u16 ctxt;
11797
11798        if (!rcd)
11799                return;
11800
11801        ctxt = rcd->ctxt;
11802
11803        hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11804
11805        rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11806        /* if the context already enabled, don't do the extra steps */
11807        if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11808            !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11809                /* reset the tail and hdr addresses, and sequence count */
11810                write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11811                                rcd->rcvhdrq_dma);
11812                if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11813                        write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11814                                        rcd->rcvhdrqtailaddr_dma);
11815                rcd->seq_cnt = 1;
11816
11817                /* reset the cached receive header queue head value */
11818                rcd->head = 0;
11819
11820                /*
11821                 * Zero the receive header queue so we don't get false
11822                 * positives when checking the sequence number.  The
11823                 * sequence numbers could land exactly on the same spot.
11824                 * E.g. a rcd restart before the receive header wrapped.
11825                 */
11826                memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11827
11828                /* starting timeout */
11829                rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11830
11831                /* enable the context */
11832                rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11833
11834                /* clean the egr buffer size first */
11835                rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11836                rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11837                                & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11838                                        << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11839
11840                /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11841                write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11842                did_enable = 1;
11843
11844                /* zero RcvEgrIndexHead */
11845                write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11846
11847                /* set eager count and base index */
11848                reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11849                        & RCV_EGR_CTRL_EGR_CNT_MASK)
11850                       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11851                        (((rcd->eager_base >> RCV_SHIFT)
11852                          & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11853                         << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11854                write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11855
11856                /*
11857                 * Set TID (expected) count and base index.
11858                 * rcd->expected_count is set to individual RcvArray entries,
11859                 * not pairs, and the CSR takes a pair-count in groups of
11860                 * four, so divide by 8.
11861                 */
11862                reg = (((rcd->expected_count >> RCV_SHIFT)
11863                                        & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11864                                << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11865                      (((rcd->expected_base >> RCV_SHIFT)
11866                                        & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11867                                << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11868                write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11869                if (ctxt == HFI1_CTRL_CTXT)
11870                        write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11871        }
11872        if (op & HFI1_RCVCTRL_CTXT_DIS) {
11873                write_csr(dd, RCV_VL15, 0);
11874                /*
11875                 * When receive context is being disabled turn on tail
11876                 * update with a dummy tail address and then disable
11877                 * receive context.
11878                 */
11879                if (dd->rcvhdrtail_dummy_dma) {
11880                        write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11881                                        dd->rcvhdrtail_dummy_dma);
11882                        /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11883                        rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11884                }
11885
11886                rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11887        }
11888        if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11889                rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11890        if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11891                rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11892        if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
11893                rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11894        if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11895                /* See comment on RcvCtxtCtrl.TailUpd above */
11896                if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11897                        rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11898        }
11899        if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11900                rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11901        if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11902                rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11903        if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11904                /*
11905                 * In one-packet-per-eager mode, the size comes from
11906                 * the RcvArray entry.
11907                 */
11908                rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11909                rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11910        }
11911        if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11912                rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11913        if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11914                rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11915        if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11916                rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11917        if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11918                rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11919        if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11920                rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11921        rcd->rcvctrl = rcvctrl;
11922        hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11923        write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11924
11925        /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11926        if (did_enable &&
11927            (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11928                reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11929                if (reg != 0) {
11930                        dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11931                                    ctxt, reg);
11932                        read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11933                        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11934                        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11935                        read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11936                        reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11937                        dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11938                                    ctxt, reg, reg == 0 ? "not" : "still");
11939                }
11940        }
11941
11942        if (did_enable) {
11943                /*
11944                 * The interrupt timeout and count must be set after
11945                 * the context is enabled to take effect.
11946                 */
11947                /* set interrupt timeout */
11948                write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11949                                (u64)rcd->rcvavail_timeout <<
11950                                RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11951
11952                /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11953                reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11954                write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11955        }
11956
11957        if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11958                /*
11959                 * If the context has been disabled and the Tail Update has
11960                 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11961                 * so it doesn't contain an address that is invalid.
11962                 */
11963                write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11964                                dd->rcvhdrtail_dummy_dma);
11965}
11966
11967u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11968{
11969        int ret;
11970        u64 val = 0;
11971
11972        if (namep) {
11973                ret = dd->cntrnameslen;
11974                *namep = dd->cntrnames;
11975        } else {
11976                const struct cntr_entry *entry;
11977                int i, j;
11978
11979                ret = (dd->ndevcntrs) * sizeof(u64);
11980
11981                /* Get the start of the block of counters */
11982                *cntrp = dd->cntrs;
11983
11984                /*
11985                 * Now go and fill in each counter in the block.
11986                 */
11987                for (i = 0; i < DEV_CNTR_LAST; i++) {
11988                        entry = &dev_cntrs[i];
11989                        hfi1_cdbg(CNTR, "reading %s", entry->name);
11990                        if (entry->flags & CNTR_DISABLED) {
11991                                /* Nothing */
11992                                hfi1_cdbg(CNTR, "\tDisabled\n");
11993                        } else {
11994                                if (entry->flags & CNTR_VL) {
11995                                        hfi1_cdbg(CNTR, "\tPer VL\n");
11996                                        for (j = 0; j < C_VL_COUNT; j++) {
11997                                                val = entry->rw_cntr(entry,
11998                                                                  dd, j,
11999                                                                  CNTR_MODE_R,
12000                                                                  0);
12001                                                hfi1_cdbg(
12002                                                   CNTR,
12003                                                   "\t\tRead 0x%llx for %d\n",
12004                                                   val, j);
12005                                                dd->cntrs[entry->offset + j] =
12006                                                                            val;
12007                                        }
12008                                } else if (entry->flags & CNTR_SDMA) {
12009                                        hfi1_cdbg(CNTR,
12010                                                  "\t Per SDMA Engine\n");
12011                                        for (j = 0; j < dd->chip_sdma_engines;
12012                                             j++) {
12013                                                val =
12014                                                entry->rw_cntr(entry, dd, j,
12015                                                               CNTR_MODE_R, 0);
12016                                                hfi1_cdbg(CNTR,
12017                                                          "\t\tRead 0x%llx for %d\n",
12018                                                          val, j);
12019                                                dd->cntrs[entry->offset + j] =
12020                                                                        val;
12021                                        }
12022                                } else {
12023                                        val = entry->rw_cntr(entry, dd,
12024                                                        CNTR_INVALID_VL,
12025                                                        CNTR_MODE_R, 0);
12026                                        dd->cntrs[entry->offset] = val;
12027                                        hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12028                                }
12029                        }
12030                }
12031        }
12032        return ret;
12033}
12034
12035/*
12036 * Used by sysfs to create files for hfi stats to read
12037 */
12038u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12039{
12040        int ret;
12041        u64 val = 0;
12042
12043        if (namep) {
12044                ret = ppd->dd->portcntrnameslen;
12045                *namep = ppd->dd->portcntrnames;
12046        } else {
12047                const struct cntr_entry *entry;
12048                int i, j;
12049
12050                ret = ppd->dd->nportcntrs * sizeof(u64);
12051                *cntrp = ppd->cntrs;
12052
12053                for (i = 0; i < PORT_CNTR_LAST; i++) {
12054                        entry = &port_cntrs[i];
12055                        hfi1_cdbg(CNTR, "reading %s", entry->name);
12056                        if (entry->flags & CNTR_DISABLED) {
12057                                /* Nothing */
12058                                hfi1_cdbg(CNTR, "\tDisabled\n");
12059                                continue;
12060                        }
12061
12062                        if (entry->flags & CNTR_VL) {
12063                                hfi1_cdbg(CNTR, "\tPer VL");
12064                                for (j = 0; j < C_VL_COUNT; j++) {
12065                                        val = entry->rw_cntr(entry, ppd, j,
12066                                                               CNTR_MODE_R,
12067                                                               0);
12068                                        hfi1_cdbg(
12069                                           CNTR,
12070                                           "\t\tRead 0x%llx for %d",
12071                                           val, j);
12072                                        ppd->cntrs[entry->offset + j] = val;
12073                                }
12074                        } else {
12075                                val = entry->rw_cntr(entry, ppd,
12076                                                       CNTR_INVALID_VL,
12077                                                       CNTR_MODE_R,
12078                                                       0);
12079                                ppd->cntrs[entry->offset] = val;
12080                                hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12081                        }
12082                }
12083        }
12084        return ret;
12085}
12086
12087static void free_cntrs(struct hfi1_devdata *dd)
12088{
12089        struct hfi1_pportdata *ppd;
12090        int i;
12091
12092        if (dd->synth_stats_timer.data)
12093                del_timer_sync(&dd->synth_stats_timer);
12094        dd->synth_stats_timer.data = 0;
12095        ppd = (struct hfi1_pportdata *)(dd + 1);
12096        for (i = 0; i < dd->num_pports; i++, ppd++) {
12097                kfree(ppd->cntrs);
12098                kfree(ppd->scntrs);
12099                free_percpu(ppd->ibport_data.rvp.rc_acks);
12100                free_percpu(ppd->ibport_data.rvp.rc_qacks);
12101                free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12102                ppd->cntrs = NULL;
12103                ppd->scntrs = NULL;
12104                ppd->ibport_data.rvp.rc_acks = NULL;
12105                ppd->ibport_data.rvp.rc_qacks = NULL;
12106                ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12107        }
12108        kfree(dd->portcntrnames);
12109        dd->portcntrnames = NULL;
12110        kfree(dd->cntrs);
12111        dd->cntrs = NULL;
12112        kfree(dd->scntrs);
12113        dd->scntrs = NULL;
12114        kfree(dd->cntrnames);
12115        dd->cntrnames = NULL;
12116        if (dd->update_cntr_wq) {
12117                destroy_workqueue(dd->update_cntr_wq);
12118                dd->update_cntr_wq = NULL;
12119        }
12120}
12121
12122static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12123                              u64 *psval, void *context, int vl)
12124{
12125        u64 val;
12126        u64 sval = *psval;
12127
12128        if (entry->flags & CNTR_DISABLED) {
12129                dd_dev_err(dd, "Counter %s not enabled", entry->name);
12130                return 0;
12131        }
12132
12133        hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12134
12135        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12136
12137        /* If its a synthetic counter there is more work we need to do */
12138        if (entry->flags & CNTR_SYNTH) {
12139                if (sval == CNTR_MAX) {
12140                        /* No need to read already saturated */
12141                        return CNTR_MAX;
12142                }
12143
12144                if (entry->flags & CNTR_32BIT) {
12145                        /* 32bit counters can wrap multiple times */
12146                        u64 upper = sval >> 32;
12147                        u64 lower = (sval << 32) >> 32;
12148
12149                        if (lower > val) { /* hw wrapped */
12150                                if (upper == CNTR_32BIT_MAX)
12151                                        val = CNTR_MAX;
12152                                else
12153                                        upper++;
12154                        }
12155
12156                        if (val != CNTR_MAX)
12157                                val = (upper << 32) | val;
12158
12159                } else {
12160                        /* If we rolled we are saturated */
12161                        if ((val < sval) || (val > CNTR_MAX))
12162                                val = CNTR_MAX;
12163                }
12164        }
12165
12166        *psval = val;
12167
12168        hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12169
12170        return val;
12171}
12172
12173static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12174                               struct cntr_entry *entry,
12175                               u64 *psval, void *context, int vl, u64 data)
12176{
12177        u64 val;
12178
12179        if (entry->flags & CNTR_DISABLED) {
12180                dd_dev_err(dd, "Counter %s not enabled", entry->name);
12181                return 0;
12182        }
12183
12184        hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12185
12186        if (entry->flags & CNTR_SYNTH) {
12187                *psval = data;
12188                if (entry->flags & CNTR_32BIT) {
12189                        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12190                                             (data << 32) >> 32);
12191                        val = data; /* return the full 64bit value */
12192                } else {
12193                        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12194                                             data);
12195                }
12196        } else {
12197                val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12198        }
12199
12200        *psval = val;
12201
12202        hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12203
12204        return val;
12205}
12206
12207u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12208{
12209        struct cntr_entry *entry;
12210        u64 *sval;
12211
12212        entry = &dev_cntrs[index];
12213        sval = dd->scntrs + entry->offset;
12214
12215        if (vl != CNTR_INVALID_VL)
12216                sval += vl;
12217
12218        return read_dev_port_cntr(dd, entry, sval, dd, vl);
12219}
12220
12221u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12222{
12223        struct cntr_entry *entry;
12224        u64 *sval;
12225
12226        entry = &dev_cntrs[index];
12227        sval = dd->scntrs + entry->offset;
12228
12229        if (vl != CNTR_INVALID_VL)
12230                sval += vl;
12231
12232        return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12233}
12234
12235u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12236{
12237        struct cntr_entry *entry;
12238        u64 *sval;
12239
12240        entry = &port_cntrs[index];
12241        sval = ppd->scntrs + entry->offset;
12242
12243        if (vl != CNTR_INVALID_VL)
12244                sval += vl;
12245
12246        if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12247            (index <= C_RCV_HDR_OVF_LAST)) {
12248                /* We do not want to bother for disabled contexts */
12249                return 0;
12250        }
12251
12252        return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12253}
12254
12255u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12256{
12257        struct cntr_entry *entry;
12258        u64 *sval;
12259
12260        entry = &port_cntrs[index];
12261        sval = ppd->scntrs + entry->offset;
12262
12263        if (vl != CNTR_INVALID_VL)
12264                sval += vl;
12265
12266        if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12267            (index <= C_RCV_HDR_OVF_LAST)) {
12268                /* We do not want to bother for disabled contexts */
12269                return 0;
12270        }
12271
12272        return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12273}
12274
12275static void do_update_synth_timer(struct work_struct *work)
12276{
12277        u64 cur_tx;
12278        u64 cur_rx;
12279        u64 total_flits;
12280        u8 update = 0;
12281        int i, j, vl;
12282        struct hfi1_pportdata *ppd;
12283        struct cntr_entry *entry;
12284        struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12285                                               update_cntr_work);
12286
12287        /*
12288         * Rather than keep beating on the CSRs pick a minimal set that we can
12289         * check to watch for potential roll over. We can do this by looking at
12290         * the number of flits sent/recv. If the total flits exceeds 32bits then
12291         * we have to iterate all the counters and update.
12292         */
12293        entry = &dev_cntrs[C_DC_RCV_FLITS];
12294        cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12295
12296        entry = &dev_cntrs[C_DC_XMIT_FLITS];
12297        cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12298
12299        hfi1_cdbg(
12300            CNTR,
12301            "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12302            dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12303
12304        if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12305                /*
12306                 * May not be strictly necessary to update but it won't hurt and
12307                 * simplifies the logic here.
12308                 */
12309                update = 1;
12310                hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12311                          dd->unit);
12312        } else {
12313                total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12314                hfi1_cdbg(CNTR,
12315                          "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12316                          total_flits, (u64)CNTR_32BIT_MAX);
12317                if (total_flits >= CNTR_32BIT_MAX) {
12318                        hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12319                                  dd->unit);
12320                        update = 1;
12321                }
12322        }
12323
12324        if (update) {
12325                hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12326                for (i = 0; i < DEV_CNTR_LAST; i++) {
12327                        entry = &dev_cntrs[i];
12328                        if (entry->flags & CNTR_VL) {
12329                                for (vl = 0; vl < C_VL_COUNT; vl++)
12330                                        read_dev_cntr(dd, i, vl);
12331                        } else {
12332                                read_dev_cntr(dd, i, CNTR_INVALID_VL);
12333                        }
12334                }
12335                ppd = (struct hfi1_pportdata *)(dd + 1);
12336                for (i = 0; i < dd->num_pports; i++, ppd++) {
12337                        for (j = 0; j < PORT_CNTR_LAST; j++) {
12338                                entry = &port_cntrs[j];
12339                                if (entry->flags & CNTR_VL) {
12340                                        for (vl = 0; vl < C_VL_COUNT; vl++)
12341                                                read_port_cntr(ppd, j, vl);
12342                                } else {
12343                                        read_port_cntr(ppd, j, CNTR_INVALID_VL);
12344                                }
12345                        }
12346                }
12347
12348                /*
12349                 * We want the value in the register. The goal is to keep track
12350                 * of the number of "ticks" not the counter value. In other
12351                 * words if the register rolls we want to notice it and go ahead
12352                 * and force an update.
12353                 */
12354                entry = &dev_cntrs[C_DC_XMIT_FLITS];
12355                dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12356                                                CNTR_MODE_R, 0);
12357
12358                entry = &dev_cntrs[C_DC_RCV_FLITS];
12359                dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12360                                                CNTR_MODE_R, 0);
12361
12362                hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12363                          dd->unit, dd->last_tx, dd->last_rx);
12364
12365        } else {
12366                hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12367        }
12368}
12369
12370static void update_synth_timer(unsigned long opaque)
12371{
12372        struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12373
12374        queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12375        mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12376}
12377
12378#define C_MAX_NAME 16 /* 15 chars + one for /0 */
12379static int init_cntrs(struct hfi1_devdata *dd)
12380{
12381        int i, rcv_ctxts, j;
12382        size_t sz;
12383        char *p;
12384        char name[C_MAX_NAME];
12385        struct hfi1_pportdata *ppd;
12386        const char *bit_type_32 = ",32";
12387        const int bit_type_32_sz = strlen(bit_type_32);
12388
12389        /* set up the stats timer; the add_timer is done at the end */
12390        setup_timer(&dd->synth_stats_timer, update_synth_timer,
12391                    (unsigned long)dd);
12392
12393        /***********************/
12394        /* per device counters */
12395        /***********************/
12396
12397        /* size names and determine how many we have*/
12398        dd->ndevcntrs = 0;
12399        sz = 0;
12400
12401        for (i = 0; i < DEV_CNTR_LAST; i++) {
12402                if (dev_cntrs[i].flags & CNTR_DISABLED) {
12403                        hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12404                        continue;
12405                }
12406
12407                if (dev_cntrs[i].flags & CNTR_VL) {
12408                        dev_cntrs[i].offset = dd->ndevcntrs;
12409                        for (j = 0; j < C_VL_COUNT; j++) {
12410                                snprintf(name, C_MAX_NAME, "%s%d",
12411                                         dev_cntrs[i].name, vl_from_idx(j));
12412                                sz += strlen(name);
12413                                /* Add ",32" for 32-bit counters */
12414                                if (dev_cntrs[i].flags & CNTR_32BIT)
12415                                        sz += bit_type_32_sz;
12416                                sz++;
12417                                dd->ndevcntrs++;
12418                        }
12419                } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12420                        dev_cntrs[i].offset = dd->ndevcntrs;
12421                        for (j = 0; j < dd->chip_sdma_engines; j++) {
12422                                snprintf(name, C_MAX_NAME, "%s%d",
12423                                         dev_cntrs[i].name, j);
12424                                sz += strlen(name);
12425                                /* Add ",32" for 32-bit counters */
12426                                if (dev_cntrs[i].flags & CNTR_32BIT)
12427                                        sz += bit_type_32_sz;
12428                                sz++;
12429                                dd->ndevcntrs++;
12430                        }
12431                } else {
12432                        /* +1 for newline. */
12433                        sz += strlen(dev_cntrs[i].name) + 1;
12434                        /* Add ",32" for 32-bit counters */
12435                        if (dev_cntrs[i].flags & CNTR_32BIT)
12436                                sz += bit_type_32_sz;
12437                        dev_cntrs[i].offset = dd->ndevcntrs;
12438                        dd->ndevcntrs++;
12439                }
12440        }
12441
12442        /* allocate space for the counter values */
12443        dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12444        if (!dd->cntrs)
12445                goto bail;
12446
12447        dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12448        if (!dd->scntrs)
12449                goto bail;
12450
12451        /* allocate space for the counter names */
12452        dd->cntrnameslen = sz;
12453        dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12454        if (!dd->cntrnames)
12455                goto bail;
12456
12457        /* fill in the names */
12458        for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12459                if (dev_cntrs[i].flags & CNTR_DISABLED) {
12460                        /* Nothing */
12461                } else if (dev_cntrs[i].flags & CNTR_VL) {
12462                        for (j = 0; j < C_VL_COUNT; j++) {
12463                                snprintf(name, C_MAX_NAME, "%s%d",
12464                                         dev_cntrs[i].name,
12465                                         vl_from_idx(j));
12466                                memcpy(p, name, strlen(name));
12467                                p += strlen(name);
12468
12469                                /* Counter is 32 bits */
12470                                if (dev_cntrs[i].flags & CNTR_32BIT) {
12471                                        memcpy(p, bit_type_32, bit_type_32_sz);
12472                                        p += bit_type_32_sz;
12473                                }
12474
12475                                *p++ = '\n';
12476                        }
12477                } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12478                        for (j = 0; j < dd->chip_sdma_engines; j++) {
12479                                snprintf(name, C_MAX_NAME, "%s%d",
12480                                         dev_cntrs[i].name, j);
12481                                memcpy(p, name, strlen(name));
12482                                p += strlen(name);
12483
12484                                /* Counter is 32 bits */
12485                                if (dev_cntrs[i].flags & CNTR_32BIT) {
12486                                        memcpy(p, bit_type_32, bit_type_32_sz);
12487                                        p += bit_type_32_sz;
12488                                }
12489
12490                                *p++ = '\n';
12491                        }
12492                } else {
12493                        memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12494                        p += strlen(dev_cntrs[i].name);
12495
12496                        /* Counter is 32 bits */
12497                        if (dev_cntrs[i].flags & CNTR_32BIT) {
12498                                memcpy(p, bit_type_32, bit_type_32_sz);
12499                                p += bit_type_32_sz;
12500                        }
12501
12502                        *p++ = '\n';
12503                }
12504        }
12505
12506        /*********************/
12507        /* per port counters */
12508        /*********************/
12509
12510        /*
12511         * Go through the counters for the overflows and disable the ones we
12512         * don't need. This varies based on platform so we need to do it
12513         * dynamically here.
12514         */
12515        rcv_ctxts = dd->num_rcv_contexts;
12516        for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12517             i <= C_RCV_HDR_OVF_LAST; i++) {
12518                port_cntrs[i].flags |= CNTR_DISABLED;
12519        }
12520
12521        /* size port counter names and determine how many we have*/
12522        sz = 0;
12523        dd->nportcntrs = 0;
12524        for (i = 0; i < PORT_CNTR_LAST; i++) {
12525                if (port_cntrs[i].flags & CNTR_DISABLED) {
12526                        hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12527                        continue;
12528                }
12529
12530                if (port_cntrs[i].flags & CNTR_VL) {
12531                        port_cntrs[i].offset = dd->nportcntrs;
12532                        for (j = 0; j < C_VL_COUNT; j++) {
12533                                snprintf(name, C_MAX_NAME, "%s%d",
12534                                         port_cntrs[i].name, vl_from_idx(j));
12535                                sz += strlen(name);
12536                                /* Add ",32" for 32-bit counters */
12537                                if (port_cntrs[i].flags & CNTR_32BIT)
12538                                        sz += bit_type_32_sz;
12539                                sz++;
12540                                dd->nportcntrs++;
12541                        }
12542                } else {
12543                        /* +1 for newline */
12544                        sz += strlen(port_cntrs[i].name) + 1;
12545                        /* Add ",32" for 32-bit counters */
12546                        if (port_cntrs[i].flags & CNTR_32BIT)
12547                                sz += bit_type_32_sz;
12548                        port_cntrs[i].offset = dd->nportcntrs;
12549                        dd->nportcntrs++;
12550                }
12551        }
12552
12553        /* allocate space for the counter names */
12554        dd->portcntrnameslen = sz;
12555        dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12556        if (!dd->portcntrnames)
12557                goto bail;
12558
12559        /* fill in port cntr names */
12560        for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12561                if (port_cntrs[i].flags & CNTR_DISABLED)
12562                        continue;
12563
12564                if (port_cntrs[i].flags & CNTR_VL) {
12565                        for (j = 0; j < C_VL_COUNT; j++) {
12566                                snprintf(name, C_MAX_NAME, "%s%d",
12567                                         port_cntrs[i].name, vl_from_idx(j));
12568                                memcpy(p, name, strlen(name));
12569                                p += strlen(name);
12570
12571                                /* Counter is 32 bits */
12572                                if (port_cntrs[i].flags & CNTR_32BIT) {
12573                                        memcpy(p, bit_type_32, bit_type_32_sz);
12574                                        p += bit_type_32_sz;
12575                                }
12576
12577                                *p++ = '\n';
12578                        }
12579                } else {
12580                        memcpy(p, port_cntrs[i].name,
12581                               strlen(port_cntrs[i].name));
12582                        p += strlen(port_cntrs[i].name);
12583
12584                        /* Counter is 32 bits */
12585                        if (port_cntrs[i].flags & CNTR_32BIT) {
12586                                memcpy(p, bit_type_32, bit_type_32_sz);
12587                                p += bit_type_32_sz;
12588                        }
12589
12590                        *p++ = '\n';
12591                }
12592        }
12593
12594        /* allocate per port storage for counter values */
12595        ppd = (struct hfi1_pportdata *)(dd + 1);
12596        for (i = 0; i < dd->num_pports; i++, ppd++) {
12597                ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12598                if (!ppd->cntrs)
12599                        goto bail;
12600
12601                ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12602                if (!ppd->scntrs)
12603                        goto bail;
12604        }
12605
12606        /* CPU counters need to be allocated and zeroed */
12607        if (init_cpu_counters(dd))
12608                goto bail;
12609
12610        dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12611                                                     WQ_MEM_RECLAIM, dd->unit);
12612        if (!dd->update_cntr_wq)
12613                goto bail;
12614
12615        INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12616
12617        mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12618        return 0;
12619bail:
12620        free_cntrs(dd);
12621        return -ENOMEM;
12622}
12623
12624static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12625{
12626        switch (chip_lstate) {
12627        default:
12628                dd_dev_err(dd,
12629                           "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12630                           chip_lstate);
12631                /* fall through */
12632        case LSTATE_DOWN:
12633                return IB_PORT_DOWN;
12634        case LSTATE_INIT:
12635                return IB_PORT_INIT;
12636        case LSTATE_ARMED:
12637                return IB_PORT_ARMED;
12638        case LSTATE_ACTIVE:
12639                return IB_PORT_ACTIVE;
12640        }
12641}
12642
12643u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12644{
12645        /* look at the HFI meta-states only */
12646        switch (chip_pstate & 0xf0) {
12647        default:
12648                dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12649                           chip_pstate);
12650                /* fall through */
12651        case PLS_DISABLED:
12652                return IB_PORTPHYSSTATE_DISABLED;
12653        case PLS_OFFLINE:
12654                return OPA_PORTPHYSSTATE_OFFLINE;
12655        case PLS_POLLING:
12656                return IB_PORTPHYSSTATE_POLLING;
12657        case PLS_CONFIGPHY:
12658                return IB_PORTPHYSSTATE_TRAINING;
12659        case PLS_LINKUP:
12660                return IB_PORTPHYSSTATE_LINKUP;
12661        case PLS_PHYTEST:
12662                return IB_PORTPHYSSTATE_PHY_TEST;
12663        }
12664}
12665
12666/* return the OPA port logical state name */
12667const char *opa_lstate_name(u32 lstate)
12668{
12669        static const char * const port_logical_names[] = {
12670                "PORT_NOP",
12671                "PORT_DOWN",
12672                "PORT_INIT",
12673                "PORT_ARMED",
12674                "PORT_ACTIVE",
12675                "PORT_ACTIVE_DEFER",
12676        };
12677        if (lstate < ARRAY_SIZE(port_logical_names))
12678                return port_logical_names[lstate];
12679        return "unknown";
12680}
12681
12682/* return the OPA port physical state name */
12683const char *opa_pstate_name(u32 pstate)
12684{
12685        static const char * const port_physical_names[] = {
12686                "PHYS_NOP",
12687                "reserved1",
12688                "PHYS_POLL",
12689                "PHYS_DISABLED",
12690                "PHYS_TRAINING",
12691                "PHYS_LINKUP",
12692                "PHYS_LINK_ERR_RECOVER",
12693                "PHYS_PHY_TEST",
12694                "reserved8",
12695                "PHYS_OFFLINE",
12696                "PHYS_GANGED",
12697                "PHYS_TEST",
12698        };
12699        if (pstate < ARRAY_SIZE(port_physical_names))
12700                return port_physical_names[pstate];
12701        return "unknown";
12702}
12703
12704static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12705{
12706        /*
12707         * Set port status flags in the page mapped into userspace
12708         * memory. Do it here to ensure a reliable state - this is
12709         * the only function called by all state handling code.
12710         * Always set the flags due to the fact that the cache value
12711         * might have been changed explicitly outside of this
12712         * function.
12713         */
12714        if (ppd->statusp) {
12715                switch (state) {
12716                case IB_PORT_DOWN:
12717                case IB_PORT_INIT:
12718                        *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12719                                           HFI1_STATUS_IB_READY);
12720                        break;
12721                case IB_PORT_ARMED:
12722                        *ppd->statusp |= HFI1_STATUS_IB_CONF;
12723                        break;
12724                case IB_PORT_ACTIVE:
12725                        *ppd->statusp |= HFI1_STATUS_IB_READY;
12726                        break;
12727                }
12728        }
12729}
12730
12731/*
12732 * wait_logical_linkstate - wait for an IB link state change to occur
12733 * @ppd: port device
12734 * @state: the state to wait for
12735 * @msecs: the number of milliseconds to wait
12736 *
12737 * Wait up to msecs milliseconds for IB link state change to occur.
12738 * For now, take the easy polling route.
12739 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12740 */
12741static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12742                                  int msecs)
12743{
12744        unsigned long timeout;
12745        u32 new_state;
12746
12747        timeout = jiffies + msecs_to_jiffies(msecs);
12748        while (1) {
12749                new_state = chip_to_opa_lstate(ppd->dd,
12750                                               read_logical_state(ppd->dd));
12751                if (new_state == state)
12752                        break;
12753                if (time_after(jiffies, timeout)) {
12754                        dd_dev_err(ppd->dd,
12755                                   "timeout waiting for link state 0x%x\n",
12756                                   state);
12757                        return -ETIMEDOUT;
12758                }
12759                msleep(20);
12760        }
12761
12762        update_statusp(ppd, state);
12763        dd_dev_info(ppd->dd,
12764                    "logical state changed to %s (0x%x)\n",
12765                    opa_lstate_name(state),
12766                    state);
12767        return 0;
12768}
12769
12770static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12771{
12772        u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12773
12774        dd_dev_info(ppd->dd,
12775                    "physical state changed to %s (0x%x), phy 0x%x\n",
12776                    opa_pstate_name(ib_pstate), ib_pstate, state);
12777}
12778
12779/*
12780 * Read the physical hardware link state and check if it matches host
12781 * drivers anticipated state.
12782 */
12783static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12784{
12785        u32 read_state = read_physical_state(ppd->dd);
12786
12787        if (read_state == state) {
12788                log_state_transition(ppd, state);
12789        } else {
12790                dd_dev_err(ppd->dd,
12791                           "anticipated phy link state 0x%x, read 0x%x\n",
12792                           state, read_state);
12793        }
12794}
12795
12796/*
12797 * wait_physical_linkstate - wait for an physical link state change to occur
12798 * @ppd: port device
12799 * @state: the state to wait for
12800 * @msecs: the number of milliseconds to wait
12801 *
12802 * Wait up to msecs milliseconds for physical link state change to occur.
12803 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12804 */
12805static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12806                                   int msecs)
12807{
12808        u32 read_state;
12809        unsigned long timeout;
12810
12811        timeout = jiffies + msecs_to_jiffies(msecs);
12812        while (1) {
12813                read_state = read_physical_state(ppd->dd);
12814                if (read_state == state)
12815                        break;
12816                if (time_after(jiffies, timeout)) {
12817                        dd_dev_err(ppd->dd,
12818                                   "timeout waiting for phy link state 0x%x\n",
12819                                   state);
12820                        return -ETIMEDOUT;
12821                }
12822                usleep_range(1950, 2050); /* sleep 2ms-ish */
12823        }
12824
12825        log_state_transition(ppd, state);
12826        return 0;
12827}
12828
12829/*
12830 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12831 * @ppd: port device
12832 * @msecs: the number of milliseconds to wait
12833 *
12834 * Wait up to msecs milliseconds for any offline physical link
12835 * state change to occur.
12836 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12837 */
12838static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12839                                            int msecs)
12840{
12841        u32 read_state;
12842        unsigned long timeout;
12843
12844        timeout = jiffies + msecs_to_jiffies(msecs);
12845        while (1) {
12846                read_state = read_physical_state(ppd->dd);
12847                if ((read_state & 0xF0) == PLS_OFFLINE)
12848                        break;
12849                if (time_after(jiffies, timeout)) {
12850                        dd_dev_err(ppd->dd,
12851                                   "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12852                                   read_state, msecs);
12853                        return -ETIMEDOUT;
12854                }
12855                usleep_range(1950, 2050); /* sleep 2ms-ish */
12856        }
12857
12858        log_state_transition(ppd, read_state);
12859        return read_state;
12860}
12861
12862#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12863(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12864
12865#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12866(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12867
12868void hfi1_init_ctxt(struct send_context *sc)
12869{
12870        if (sc) {
12871                struct hfi1_devdata *dd = sc->dd;
12872                u64 reg;
12873                u8 set = (sc->type == SC_USER ?
12874                          HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12875                          HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12876                reg = read_kctxt_csr(dd, sc->hw_context,
12877                                     SEND_CTXT_CHECK_ENABLE);
12878                if (set)
12879                        CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12880                else
12881                        SET_STATIC_RATE_CONTROL_SMASK(reg);
12882                write_kctxt_csr(dd, sc->hw_context,
12883                                SEND_CTXT_CHECK_ENABLE, reg);
12884        }
12885}
12886
12887int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12888{
12889        int ret = 0;
12890        u64 reg;
12891
12892        if (dd->icode != ICODE_RTL_SILICON) {
12893                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12894                        dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12895                                    __func__);
12896                return -EINVAL;
12897        }
12898        reg = read_csr(dd, ASIC_STS_THERM);
12899        temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12900                      ASIC_STS_THERM_CURR_TEMP_MASK);
12901        temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12902                        ASIC_STS_THERM_LO_TEMP_MASK);
12903        temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12904                        ASIC_STS_THERM_HI_TEMP_MASK);
12905        temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12906                          ASIC_STS_THERM_CRIT_TEMP_MASK);
12907        /* triggers is a 3-bit value - 1 bit per trigger. */
12908        temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12909
12910        return ret;
12911}
12912
12913/* ========================================================================= */
12914
12915/*
12916 * Enable/disable chip from delivering interrupts.
12917 */
12918void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12919{
12920        int i;
12921
12922        /*
12923         * In HFI, the mask needs to be 1 to allow interrupts.
12924         */
12925        if (enable) {
12926                /* enable all interrupts */
12927                for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12928                        write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12929
12930                init_qsfp_int(dd);
12931        } else {
12932                for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12933                        write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12934        }
12935}
12936
12937/*
12938 * Clear all interrupt sources on the chip.
12939 */
12940static void clear_all_interrupts(struct hfi1_devdata *dd)
12941{
12942        int i;
12943
12944        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12945                write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12946
12947        write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12948        write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12949        write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12950        write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12951        write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12952        write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12953        write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12954        for (i = 0; i < dd->chip_send_contexts; i++)
12955                write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12956        for (i = 0; i < dd->chip_sdma_engines; i++)
12957                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12958
12959        write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12960        write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12961        write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12962}
12963
12964/* Move to pcie.c? */
12965static void disable_intx(struct pci_dev *pdev)
12966{
12967        pci_intx(pdev, 0);
12968}
12969
12970static void clean_up_interrupts(struct hfi1_devdata *dd)
12971{
12972        int i;
12973
12974        /* remove irqs - must happen before disabling/turning off */
12975        if (dd->num_msix_entries) {
12976                /* MSI-X */
12977                struct hfi1_msix_entry *me = dd->msix_entries;
12978
12979                for (i = 0; i < dd->num_msix_entries; i++, me++) {
12980                        if (!me->arg) /* => no irq, no affinity */
12981                                continue;
12982                        hfi1_put_irq_affinity(dd, me);
12983                        free_irq(me->irq, me->arg);
12984                }
12985
12986                /* clean structures */
12987                kfree(dd->msix_entries);
12988                dd->msix_entries = NULL;
12989                dd->num_msix_entries = 0;
12990        } else {
12991                /* INTx */
12992                if (dd->requested_intx_irq) {
12993                        free_irq(dd->pcidev->irq, dd);
12994                        dd->requested_intx_irq = 0;
12995                }
12996                disable_intx(dd->pcidev);
12997        }
12998
12999        pci_free_irq_vectors(dd->pcidev);
13000}
13001
13002/*
13003 * Remap the interrupt source from the general handler to the given MSI-X
13004 * interrupt.
13005 */
13006static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13007{
13008        u64 reg;
13009        int m, n;
13010
13011        /* clear from the handled mask of the general interrupt */
13012        m = isrc / 64;
13013        n = isrc % 64;
13014        if (likely(m < CCE_NUM_INT_CSRS)) {
13015                dd->gi_mask[m] &= ~((u64)1 << n);
13016        } else {
13017                dd_dev_err(dd, "remap interrupt err\n");
13018                return;
13019        }
13020
13021        /* direct the chip source to the given MSI-X interrupt */
13022        m = isrc / 8;
13023        n = isrc % 8;
13024        reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13025        reg &= ~((u64)0xff << (8 * n));
13026        reg |= ((u64)msix_intr & 0xff) << (8 * n);
13027        write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13028}
13029
13030static void remap_sdma_interrupts(struct hfi1_devdata *dd,
13031                                  int engine, int msix_intr)
13032{
13033        /*
13034         * SDMA engine interrupt sources grouped by type, rather than
13035         * engine.  Per-engine interrupts are as follows:
13036         *      SDMA
13037         *      SDMAProgress
13038         *      SDMAIdle
13039         */
13040        remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
13041                   msix_intr);
13042        remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
13043                   msix_intr);
13044        remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
13045                   msix_intr);
13046}
13047
13048static int request_intx_irq(struct hfi1_devdata *dd)
13049{
13050        int ret;
13051
13052        snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
13053                 dd->unit);
13054        ret = request_irq(dd->pcidev->irq, general_interrupt,
13055                          IRQF_SHARED, dd->intx_name, dd);
13056        if (ret)
13057                dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
13058                           ret);
13059        else
13060                dd->requested_intx_irq = 1;
13061        return ret;
13062}
13063
13064static int request_msix_irqs(struct hfi1_devdata *dd)
13065{
13066        int first_general, last_general;
13067        int first_sdma, last_sdma;
13068        int first_rx, last_rx;
13069        int i, ret = 0;
13070
13071        /* calculate the ranges we are going to use */
13072        first_general = 0;
13073        last_general = first_general + 1;
13074        first_sdma = last_general;
13075        last_sdma = first_sdma + dd->num_sdma;
13076        first_rx = last_sdma;
13077        last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
13078
13079        /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13080        dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13081
13082        /*
13083         * Sanity check - the code expects all SDMA chip source
13084         * interrupts to be in the same CSR, starting at bit 0.  Verify
13085         * that this is true by checking the bit location of the start.
13086         */
13087        BUILD_BUG_ON(IS_SDMA_START % 64);
13088
13089        for (i = 0; i < dd->num_msix_entries; i++) {
13090                struct hfi1_msix_entry *me = &dd->msix_entries[i];
13091                const char *err_info;
13092                irq_handler_t handler;
13093                irq_handler_t thread = NULL;
13094                void *arg = NULL;
13095                int idx;
13096                struct hfi1_ctxtdata *rcd = NULL;
13097                struct sdma_engine *sde = NULL;
13098
13099                /* obtain the arguments to request_irq */
13100                if (first_general <= i && i < last_general) {
13101                        idx = i - first_general;
13102                        handler = general_interrupt;
13103                        arg = dd;
13104                        snprintf(me->name, sizeof(me->name),
13105                                 DRIVER_NAME "_%d", dd->unit);
13106                        err_info = "general";
13107                        me->type = IRQ_GENERAL;
13108                } else if (first_sdma <= i && i < last_sdma) {
13109                        idx = i - first_sdma;
13110                        sde = &dd->per_sdma[idx];
13111                        handler = sdma_interrupt;
13112                        arg = sde;
13113                        snprintf(me->name, sizeof(me->name),
13114                                 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13115                        err_info = "sdma";
13116                        remap_sdma_interrupts(dd, idx, i);
13117                        me->type = IRQ_SDMA;
13118                } else if (first_rx <= i && i < last_rx) {
13119                        idx = i - first_rx;
13120                        rcd = hfi1_rcd_get_by_index(dd, idx);
13121                        if (rcd) {
13122                                /*
13123                                 * Set the interrupt register and mask for this
13124                                 * context's interrupt.
13125                                 */
13126                                rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13127                                rcd->imask = ((u64)1) <<
13128                                          ((IS_RCVAVAIL_START + idx) % 64);
13129                                handler = receive_context_interrupt;
13130                                thread = receive_context_thread;
13131                                arg = rcd;
13132                                snprintf(me->name, sizeof(me->name),
13133                                         DRIVER_NAME "_%d kctxt%d",
13134                                         dd->unit, idx);
13135                                err_info = "receive context";
13136                                remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13137                                me->type = IRQ_RCVCTXT;
13138                                rcd->msix_intr = i;
13139                                hfi1_rcd_put(rcd);
13140                        }
13141                } else {
13142                        /* not in our expected range - complain, then
13143                         * ignore it
13144                         */
13145                        dd_dev_err(dd,
13146                                   "Unexpected extra MSI-X interrupt %d\n", i);
13147                        continue;
13148                }
13149                /* no argument, no interrupt */
13150                if (!arg)
13151                        continue;
13152                /* make sure the name is terminated */
13153                me->name[sizeof(me->name) - 1] = 0;
13154                me->irq = pci_irq_vector(dd->pcidev, i);
13155                /*
13156                 * On err return me->irq.  Don't need to clear this
13157                 * because 'arg' has not been set, and cleanup will
13158                 * do the right thing.
13159                 */
13160                if (me->irq < 0)
13161                        return me->irq;
13162
13163                ret = request_threaded_irq(me->irq, handler, thread, 0,
13164                                           me->name, arg);
13165                if (ret) {
13166                        dd_dev_err(dd,
13167                                   "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13168                                   err_info, me->irq, idx, ret);
13169                        return ret;
13170                }
13171                /*
13172                 * assign arg after request_irq call, so it will be
13173                 * cleaned up
13174                 */
13175                me->arg = arg;
13176
13177                ret = hfi1_get_irq_affinity(dd, me);
13178                if (ret)
13179                        dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13180        }
13181
13182        return ret;
13183}
13184
13185void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13186{
13187        int i;
13188
13189        if (!dd->num_msix_entries) {
13190                synchronize_irq(dd->pcidev->irq);
13191                return;
13192        }
13193
13194        for (i = 0; i < dd->vnic.num_ctxt; i++) {
13195                struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13196                struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13197
13198                synchronize_irq(me->irq);
13199        }
13200}
13201
13202void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13203{
13204        struct hfi1_devdata *dd = rcd->dd;
13205        struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13206
13207        if (!me->arg) /* => no irq, no affinity */
13208                return;
13209
13210        hfi1_put_irq_affinity(dd, me);
13211        free_irq(me->irq, me->arg);
13212
13213        me->arg = NULL;
13214}
13215
13216void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13217{
13218        struct hfi1_devdata *dd = rcd->dd;
13219        struct hfi1_msix_entry *me;
13220        int idx = rcd->ctxt;
13221        void *arg = rcd;
13222        int ret;
13223
13224        rcd->msix_intr = dd->vnic.msix_idx++;
13225        me = &dd->msix_entries[rcd->msix_intr];
13226
13227        /*
13228         * Set the interrupt register and mask for this
13229         * context's interrupt.
13230         */
13231        rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13232        rcd->imask = ((u64)1) <<
13233                  ((IS_RCVAVAIL_START + idx) % 64);
13234
13235        snprintf(me->name, sizeof(me->name),
13236                 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13237        me->name[sizeof(me->name) - 1] = 0;
13238        me->type = IRQ_RCVCTXT;
13239        me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13240        if (me->irq < 0) {
13241                dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13242                           idx, me->irq);
13243                return;
13244        }
13245        remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13246
13247        ret = request_threaded_irq(me->irq, receive_context_interrupt,
13248                                   receive_context_thread, 0, me->name, arg);
13249        if (ret) {
13250                dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13251                           me->irq, idx, ret);
13252                return;
13253        }
13254        /*
13255         * assign arg after request_irq call, so it will be
13256         * cleaned up
13257         */
13258        me->arg = arg;
13259
13260        ret = hfi1_get_irq_affinity(dd, me);
13261        if (ret) {
13262                dd_dev_err(dd,
13263                           "unable to pin IRQ %d\n", ret);
13264                free_irq(me->irq, me->arg);
13265        }
13266}
13267
13268/*
13269 * Set the general handler to accept all interrupts, remap all
13270 * chip interrupts back to MSI-X 0.
13271 */
13272static void reset_interrupts(struct hfi1_devdata *dd)
13273{
13274        int i;
13275
13276        /* all interrupts handled by the general handler */
13277        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13278                dd->gi_mask[i] = ~(u64)0;
13279
13280        /* all chip interrupts map to MSI-X 0 */
13281        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13282                write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13283}
13284
13285static int set_up_interrupts(struct hfi1_devdata *dd)
13286{
13287        u32 total;
13288        int ret, request;
13289        int single_interrupt = 0; /* we expect to have all the interrupts */
13290
13291        /*
13292         * Interrupt count:
13293         *      1 general, "slow path" interrupt (includes the SDMA engines
13294         *              slow source, SDMACleanupDone)
13295         *      N interrupts - one per used SDMA engine
13296         *      M interrupt - one per kernel receive context
13297         */
13298        total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
13299
13300        /* ask for MSI-X interrupts */
13301        request = request_msix(dd, total);
13302        if (request < 0) {
13303                ret = request;
13304                goto fail;
13305        } else if (request == 0) {
13306                /* using INTx */
13307                /* dd->num_msix_entries already zero */
13308                single_interrupt = 1;
13309                dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
13310        } else if (request < total) {
13311                /* using MSI-X, with reduced interrupts */
13312                dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13313                           total, request);
13314                ret = -EINVAL;
13315                goto fail;
13316        } else {
13317                dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13318                                           GFP_KERNEL);
13319                if (!dd->msix_entries) {
13320                        ret = -ENOMEM;
13321                        goto fail;
13322                }
13323                /* using MSI-X */
13324                dd->num_msix_entries = total;
13325                dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13326        }
13327
13328        /* mask all interrupts */
13329        set_intr_state(dd, 0);
13330        /* clear all pending interrupts */
13331        clear_all_interrupts(dd);
13332
13333        /* reset general handler mask, chip MSI-X mappings */
13334        reset_interrupts(dd);
13335
13336        if (single_interrupt)
13337                ret = request_intx_irq(dd);
13338        else
13339                ret = request_msix_irqs(dd);
13340        if (ret)
13341                goto fail;
13342
13343        return 0;
13344
13345fail:
13346        clean_up_interrupts(dd);
13347        return ret;
13348}
13349
13350/*
13351 * Set up context values in dd.  Sets:
13352 *
13353 *      num_rcv_contexts - number of contexts being used
13354 *      n_krcv_queues - number of kernel contexts
13355 *      first_dyn_alloc_ctxt - first dynamically allocated context
13356 *                             in array of contexts
13357 *      freectxts  - number of free user contexts
13358 *      num_send_contexts - number of PIO send contexts being used
13359 */
13360static int set_up_context_variables(struct hfi1_devdata *dd)
13361{
13362        unsigned long num_kernel_contexts;
13363        int total_contexts;
13364        int ret;
13365        unsigned ngroups;
13366        int qos_rmt_count;
13367        int user_rmt_reduced;
13368
13369        /*
13370         * Kernel receive contexts:
13371         * - Context 0 - control context (VL15/multicast/error)
13372         * - Context 1 - first kernel context
13373         * - Context 2 - second kernel context
13374         * ...
13375         */
13376        if (n_krcvqs)
13377                /*
13378                 * n_krcvqs is the sum of module parameter kernel receive
13379                 * contexts, krcvqs[].  It does not include the control
13380                 * context, so add that.
13381                 */
13382                num_kernel_contexts = n_krcvqs + 1;
13383        else
13384                num_kernel_contexts = DEFAULT_KRCVQS + 1;
13385        /*
13386         * Every kernel receive context needs an ACK send context.
13387         * one send context is allocated for each VL{0-7} and VL15
13388         */
13389        if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13390                dd_dev_err(dd,
13391                           "Reducing # kernel rcv contexts to: %d, from %lu\n",
13392                           (int)(dd->chip_send_contexts - num_vls - 1),
13393                           num_kernel_contexts);
13394                num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13395        }
13396        /*
13397         * User contexts:
13398         *      - default to 1 user context per real (non-HT) CPU core if
13399         *        num_user_contexts is negative
13400         */
13401        if (num_user_contexts < 0)
13402                num_user_contexts =
13403                        cpumask_weight(&node_affinity.real_cpu_mask);
13404
13405        total_contexts = num_kernel_contexts + num_user_contexts;
13406
13407        /*
13408         * Adjust the counts given a global max.
13409         */
13410        if (total_contexts > dd->chip_rcv_contexts) {
13411                dd_dev_err(dd,
13412                           "Reducing # user receive contexts to: %d, from %d\n",
13413                           (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13414                           (int)num_user_contexts);
13415                num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13416                /* recalculate */
13417                total_contexts = num_kernel_contexts + num_user_contexts;
13418        }
13419
13420        /* each user context requires an entry in the RMT */
13421        qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13422        if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13423                user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13424                dd_dev_err(dd,
13425                           "RMT size is reducing the number of user receive contexts from %d to %d\n",
13426                           (int)num_user_contexts,
13427                           user_rmt_reduced);
13428                /* recalculate */
13429                num_user_contexts = user_rmt_reduced;
13430                total_contexts = num_kernel_contexts + num_user_contexts;
13431        }
13432
13433        /* Accommodate VNIC contexts */
13434        if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
13435                total_contexts += HFI1_NUM_VNIC_CTXT;
13436
13437        /* the first N are kernel contexts, the rest are user/vnic contexts */
13438        dd->num_rcv_contexts = total_contexts;
13439        dd->n_krcv_queues = num_kernel_contexts;
13440        dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13441        dd->num_user_contexts = num_user_contexts;
13442        dd->freectxts = num_user_contexts;
13443        dd_dev_info(dd,
13444                    "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13445                    (int)dd->chip_rcv_contexts,
13446                    (int)dd->num_rcv_contexts,
13447                    (int)dd->n_krcv_queues,
13448                    (int)dd->num_rcv_contexts - dd->n_krcv_queues);
13449
13450        /*
13451         * Receive array allocation:
13452         *   All RcvArray entries are divided into groups of 8. This
13453         *   is required by the hardware and will speed up writes to
13454         *   consecutive entries by using write-combining of the entire
13455         *   cacheline.
13456         *
13457         *   The number of groups are evenly divided among all contexts.
13458         *   any left over groups will be given to the first N user
13459         *   contexts.
13460         */
13461        dd->rcv_entries.group_size = RCV_INCREMENT;
13462        ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13463        dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13464        dd->rcv_entries.nctxt_extra = ngroups -
13465                (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13466        dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13467                    dd->rcv_entries.ngroups,
13468                    dd->rcv_entries.nctxt_extra);
13469        if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13470            MAX_EAGER_ENTRIES * 2) {
13471                dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13472                        dd->rcv_entries.group_size;
13473                dd_dev_info(dd,
13474                            "RcvArray group count too high, change to %u\n",
13475                            dd->rcv_entries.ngroups);
13476                dd->rcv_entries.nctxt_extra = 0;
13477        }
13478        /*
13479         * PIO send contexts
13480         */
13481        ret = init_sc_pools_and_sizes(dd);
13482        if (ret >= 0) { /* success */
13483                dd->num_send_contexts = ret;
13484                dd_dev_info(
13485                        dd,
13486                        "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13487                        dd->chip_send_contexts,
13488                        dd->num_send_contexts,
13489                        dd->sc_sizes[SC_KERNEL].count,
13490                        dd->sc_sizes[SC_ACK].count,
13491                        dd->sc_sizes[SC_USER].count,
13492                        dd->sc_sizes[SC_VL15].count);
13493                ret = 0;        /* success */
13494        }
13495
13496        return ret;
13497}
13498
13499/*
13500 * Set the device/port partition key table. The MAD code
13501 * will ensure that, at least, the partial management
13502 * partition key is present in the table.
13503 */
13504static void set_partition_keys(struct hfi1_pportdata *ppd)
13505{
13506        struct hfi1_devdata *dd = ppd->dd;
13507        u64 reg = 0;
13508        int i;
13509
13510        dd_dev_info(dd, "Setting partition keys\n");
13511        for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13512                reg |= (ppd->pkeys[i] &
13513                        RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13514                        ((i % 4) *
13515                         RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13516                /* Each register holds 4 PKey values. */
13517                if ((i % 4) == 3) {
13518                        write_csr(dd, RCV_PARTITION_KEY +
13519                                  ((i - 3) * 2), reg);
13520                        reg = 0;
13521                }
13522        }
13523
13524        /* Always enable HW pkeys check when pkeys table is set */
13525        add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13526}
13527
13528/*
13529 * These CSRs and memories are uninitialized on reset and must be
13530 * written before reading to set the ECC/parity bits.
13531 *
13532 * NOTE: All user context CSRs that are not mmaped write-only
13533 * (e.g. the TID flows) must be initialized even if the driver never
13534 * reads them.
13535 */
13536static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13537{
13538        int i, j;
13539
13540        /* CceIntMap */
13541        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13542                write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13543
13544        /* SendCtxtCreditReturnAddr */
13545        for (i = 0; i < dd->chip_send_contexts; i++)
13546                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13547
13548        /* PIO Send buffers */
13549        /* SDMA Send buffers */
13550        /*
13551         * These are not normally read, and (presently) have no method
13552         * to be read, so are not pre-initialized
13553         */
13554
13555        /* RcvHdrAddr */
13556        /* RcvHdrTailAddr */
13557        /* RcvTidFlowTable */
13558        for (i = 0; i < dd->chip_rcv_contexts; i++) {
13559                write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13560                write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13561                for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13562                        write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13563        }
13564
13565        /* RcvArray */
13566        for (i = 0; i < dd->chip_rcv_array_count; i++)
13567                hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13568
13569        /* RcvQPMapTable */
13570        for (i = 0; i < 32; i++)
13571                write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13572}
13573
13574/*
13575 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13576 */
13577static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13578                             u64 ctrl_bits)
13579{
13580        unsigned long timeout;
13581        u64 reg;
13582
13583        /* is the condition present? */
13584        reg = read_csr(dd, CCE_STATUS);
13585        if ((reg & status_bits) == 0)
13586                return;
13587
13588        /* clear the condition */
13589        write_csr(dd, CCE_CTRL, ctrl_bits);
13590
13591        /* wait for the condition to clear */
13592        timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13593        while (1) {
13594                reg = read_csr(dd, CCE_STATUS);
13595                if ((reg & status_bits) == 0)
13596                        return;
13597                if (time_after(jiffies, timeout)) {
13598                        dd_dev_err(dd,
13599                                   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13600                                   status_bits, reg & status_bits);
13601                        return;
13602                }
13603                udelay(1);
13604        }
13605}
13606
13607/* set CCE CSRs to chip reset defaults */
13608static void reset_cce_csrs(struct hfi1_devdata *dd)
13609{
13610        int i;
13611
13612        /* CCE_REVISION read-only */
13613        /* CCE_REVISION2 read-only */
13614        /* CCE_CTRL - bits clear automatically */
13615        /* CCE_STATUS read-only, use CceCtrl to clear */
13616        clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13617        clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13618        clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13619        for (i = 0; i < CCE_NUM_SCRATCH; i++)
13620                write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13621        /* CCE_ERR_STATUS read-only */
13622        write_csr(dd, CCE_ERR_MASK, 0);
13623        write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13624        /* CCE_ERR_FORCE leave alone */
13625        for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13626                write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13627        write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13628        /* CCE_PCIE_CTRL leave alone */
13629        for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13630                write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13631                write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13632                          CCE_MSIX_TABLE_UPPER_RESETCSR);
13633        }
13634        for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13635                /* CCE_MSIX_PBA read-only */
13636                write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13637                write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13638        }
13639        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13640                write_csr(dd, CCE_INT_MAP, 0);
13641        for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13642                /* CCE_INT_STATUS read-only */
13643                write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13644                write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13645                /* CCE_INT_FORCE leave alone */
13646                /* CCE_INT_BLOCKED read-only */
13647        }
13648        for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13649                write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13650}
13651
13652/* set MISC CSRs to chip reset defaults */
13653static void reset_misc_csrs(struct hfi1_devdata *dd)
13654{
13655        int i;
13656
13657        for (i = 0; i < 32; i++) {
13658                write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13659                write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13660                write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13661        }
13662        /*
13663         * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13664         * only be written 128-byte chunks
13665         */
13666        /* init RSA engine to clear lingering errors */
13667        write_csr(dd, MISC_CFG_RSA_CMD, 1);
13668        write_csr(dd, MISC_CFG_RSA_MU, 0);
13669        write_csr(dd, MISC_CFG_FW_CTRL, 0);
13670        /* MISC_STS_8051_DIGEST read-only */
13671        /* MISC_STS_SBM_DIGEST read-only */
13672        /* MISC_STS_PCIE_DIGEST read-only */
13673        /* MISC_STS_FAB_DIGEST read-only */
13674        /* MISC_ERR_STATUS read-only */
13675        write_csr(dd, MISC_ERR_MASK, 0);
13676        write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13677        /* MISC_ERR_FORCE leave alone */
13678}
13679
13680/* set TXE CSRs to chip reset defaults */
13681static void reset_txe_csrs(struct hfi1_devdata *dd)
13682{
13683        int i;
13684
13685        /*
13686         * TXE Kernel CSRs
13687         */
13688        write_csr(dd, SEND_CTRL, 0);
13689        __cm_reset(dd, 0);      /* reset CM internal state */
13690        /* SEND_CONTEXTS read-only */
13691        /* SEND_DMA_ENGINES read-only */
13692        /* SEND_PIO_MEM_SIZE read-only */
13693        /* SEND_DMA_MEM_SIZE read-only */
13694        write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13695        pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13696        /* SEND_PIO_ERR_STATUS read-only */
13697        write_csr(dd, SEND_PIO_ERR_MASK, 0);
13698        write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13699        /* SEND_PIO_ERR_FORCE leave alone */
13700        /* SEND_DMA_ERR_STATUS read-only */
13701        write_csr(dd, SEND_DMA_ERR_MASK, 0);
13702        write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13703        /* SEND_DMA_ERR_FORCE leave alone */
13704        /* SEND_EGRESS_ERR_STATUS read-only */
13705        write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13706        write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13707        /* SEND_EGRESS_ERR_FORCE leave alone */
13708        write_csr(dd, SEND_BTH_QP, 0);
13709        write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13710        write_csr(dd, SEND_SC2VLT0, 0);
13711        write_csr(dd, SEND_SC2VLT1, 0);
13712        write_csr(dd, SEND_SC2VLT2, 0);
13713        write_csr(dd, SEND_SC2VLT3, 0);
13714        write_csr(dd, SEND_LEN_CHECK0, 0);
13715        write_csr(dd, SEND_LEN_CHECK1, 0);
13716        /* SEND_ERR_STATUS read-only */
13717        write_csr(dd, SEND_ERR_MASK, 0);
13718        write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13719        /* SEND_ERR_FORCE read-only */
13720        for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13721                write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13722        for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13723                write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13724        for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13725                write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13726        for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13727                write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13728        for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13729                write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13730        write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13731        write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13732        /* SEND_CM_CREDIT_USED_STATUS read-only */
13733        write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13734        write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13735        write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13736        write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13737        write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13738        for (i = 0; i < TXE_NUM_DATA_VL; i++)
13739                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13740        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13741        /* SEND_CM_CREDIT_USED_VL read-only */
13742        /* SEND_CM_CREDIT_USED_VL15 read-only */
13743        /* SEND_EGRESS_CTXT_STATUS read-only */
13744        /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13745        write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13746        /* SEND_EGRESS_ERR_INFO read-only */
13747        /* SEND_EGRESS_ERR_SOURCE read-only */
13748
13749        /*
13750         * TXE Per-Context CSRs
13751         */
13752        for (i = 0; i < dd->chip_send_contexts; i++) {
13753                write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13754                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13755                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13756                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13757                write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13758                write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13759                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13760                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13761                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13762                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13763                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13764                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13765        }
13766
13767        /*
13768         * TXE Per-SDMA CSRs
13769         */
13770        for (i = 0; i < dd->chip_sdma_engines; i++) {
13771                write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13772                /* SEND_DMA_STATUS read-only */
13773                write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13774                write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13775                write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13776                /* SEND_DMA_HEAD read-only */
13777                write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13778                write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13779                /* SEND_DMA_IDLE_CNT read-only */
13780                write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13781                write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13782                /* SEND_DMA_DESC_FETCHED_CNT read-only */
13783                /* SEND_DMA_ENG_ERR_STATUS read-only */
13784                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13785                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13786                /* SEND_DMA_ENG_ERR_FORCE leave alone */
13787                write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13788                write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13789                write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13790                write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13791                write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13792                write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13793                write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13794        }
13795}
13796
13797/*
13798 * Expect on entry:
13799 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13800 */
13801static void init_rbufs(struct hfi1_devdata *dd)
13802{
13803        u64 reg;
13804        int count;
13805
13806        /*
13807         * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13808         * clear.
13809         */
13810        count = 0;
13811        while (1) {
13812                reg = read_csr(dd, RCV_STATUS);
13813                if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13814                            | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13815                        break;
13816                /*
13817                 * Give up after 1ms - maximum wait time.
13818                 *
13819                 * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13820                 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13821                 *      136 KB / (66% * 250MB/s) = 844us
13822                 */
13823                if (count++ > 500) {
13824                        dd_dev_err(dd,
13825                                   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13826                                   __func__, reg);
13827                        break;
13828                }
13829                udelay(2); /* do not busy-wait the CSR */
13830        }
13831
13832        /* start the init - expect RcvCtrl to be 0 */
13833        write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13834
13835        /*
13836         * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13837         * period after the write before RcvStatus.RxRbufInitDone is valid.
13838         * The delay in the first run through the loop below is sufficient and
13839         * required before the first read of RcvStatus.RxRbufInintDone.
13840         */
13841        read_csr(dd, RCV_CTRL);
13842
13843        /* wait for the init to finish */
13844        count = 0;
13845        while (1) {
13846                /* delay is required first time through - see above */
13847                udelay(2); /* do not busy-wait the CSR */
13848                reg = read_csr(dd, RCV_STATUS);
13849                if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13850                        break;
13851
13852                /* give up after 100us - slowest possible at 33MHz is 73us */
13853                if (count++ > 50) {
13854                        dd_dev_err(dd,
13855                                   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13856                                   __func__);
13857                        break;
13858                }
13859        }
13860}
13861
13862/* set RXE CSRs to chip reset defaults */
13863static void reset_rxe_csrs(struct hfi1_devdata *dd)
13864{
13865        int i, j;
13866
13867        /*
13868         * RXE Kernel CSRs
13869         */
13870        write_csr(dd, RCV_CTRL, 0);
13871        init_rbufs(dd);
13872        /* RCV_STATUS read-only */
13873        /* RCV_CONTEXTS read-only */
13874        /* RCV_ARRAY_CNT read-only */
13875        /* RCV_BUF_SIZE read-only */
13876        write_csr(dd, RCV_BTH_QP, 0);
13877        write_csr(dd, RCV_MULTICAST, 0);
13878        write_csr(dd, RCV_BYPASS, 0);
13879        write_csr(dd, RCV_VL15, 0);
13880        /* this is a clear-down */
13881        write_csr(dd, RCV_ERR_INFO,
13882                  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13883        /* RCV_ERR_STATUS read-only */
13884        write_csr(dd, RCV_ERR_MASK, 0);
13885        write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13886        /* RCV_ERR_FORCE leave alone */
13887        for (i = 0; i < 32; i++)
13888                write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13889        for (i = 0; i < 4; i++)
13890                write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13891        for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13892                write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13893        for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13894                write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13895        for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13896                clear_rsm_rule(dd, i);
13897        for (i = 0; i < 32; i++)
13898                write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13899
13900        /*
13901         * RXE Kernel and User Per-Context CSRs
13902         */
13903        for (i = 0; i < dd->chip_rcv_contexts; i++) {
13904                /* kernel */
13905                write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13906                /* RCV_CTXT_STATUS read-only */
13907                write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13908                write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13909                write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13910                write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13911                write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13912                write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13913                write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13914                write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13915                write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13916                write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13917
13918                /* user */
13919                /* RCV_HDR_TAIL read-only */
13920                write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13921                /* RCV_EGR_INDEX_TAIL read-only */
13922                write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13923                /* RCV_EGR_OFFSET_TAIL read-only */
13924                for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13925                        write_uctxt_csr(dd, i,
13926                                        RCV_TID_FLOW_TABLE + (8 * j), 0);
13927                }
13928        }
13929}
13930
13931/*
13932 * Set sc2vl tables.
13933 *
13934 * They power on to zeros, so to avoid send context errors
13935 * they need to be set:
13936 *
13937 * SC 0-7 -> VL 0-7 (respectively)
13938 * SC 15  -> VL 15
13939 * otherwise
13940 *        -> VL 0
13941 */
13942static void init_sc2vl_tables(struct hfi1_devdata *dd)
13943{
13944        int i;
13945        /* init per architecture spec, constrained by hardware capability */
13946
13947        /* HFI maps sent packets */
13948        write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13949                0,
13950                0, 0, 1, 1,
13951                2, 2, 3, 3,
13952                4, 4, 5, 5,
13953                6, 6, 7, 7));
13954        write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13955                1,
13956                8, 0, 9, 0,
13957                10, 0, 11, 0,
13958                12, 0, 13, 0,
13959                14, 0, 15, 15));
13960        write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13961                2,
13962                16, 0, 17, 0,
13963                18, 0, 19, 0,
13964                20, 0, 21, 0,
13965                22, 0, 23, 0));
13966        write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13967                3,
13968                24, 0, 25, 0,
13969                26, 0, 27, 0,
13970                28, 0, 29, 0,
13971                30, 0, 31, 0));
13972
13973        /* DC maps received packets */
13974        write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13975                15_0,
13976                0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13977                8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13978        write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13979                31_16,
13980                16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13981                24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13982
13983        /* initialize the cached sc2vl values consistently with h/w */
13984        for (i = 0; i < 32; i++) {
13985                if (i < 8 || i == 15)
13986                        *((u8 *)(dd->sc2vl) + i) = (u8)i;
13987                else
13988                        *((u8 *)(dd->sc2vl) + i) = 0;
13989        }
13990}
13991
13992/*
13993 * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13994 * depend on the chip going through a power-on reset - a driver may be loaded
13995 * and unloaded many times.
13996 *
13997 * Do not write any CSR values to the chip in this routine - there may be
13998 * a reset following the (possible) FLR in this routine.
13999 *
14000 */
14001static int init_chip(struct hfi1_devdata *dd)
14002{
14003        int i;
14004        int ret = 0;
14005
14006        /*
14007         * Put the HFI CSRs in a known state.
14008         * Combine this with a DC reset.
14009         *
14010         * Stop the device from doing anything while we do a
14011         * reset.  We know there are no other active users of
14012         * the device since we are now in charge.  Turn off
14013         * off all outbound and inbound traffic and make sure
14014         * the device does not generate any interrupts.
14015         */
14016
14017        /* disable send contexts and SDMA engines */
14018        write_csr(dd, SEND_CTRL, 0);
14019        for (i = 0; i < dd->chip_send_contexts; i++)
14020                write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14021        for (i = 0; i < dd->chip_sdma_engines; i++)
14022                write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14023        /* disable port (turn off RXE inbound traffic) and contexts */
14024        write_csr(dd, RCV_CTRL, 0);
14025        for (i = 0; i < dd->chip_rcv_contexts; i++)
14026                write_csr(dd, RCV_CTXT_CTRL, 0);
14027        /* mask all interrupt sources */
14028        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14029                write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14030
14031        /*
14032         * DC Reset: do a full DC reset before the register clear.
14033         * A recommended length of time to hold is one CSR read,
14034         * so reread the CceDcCtrl.  Then, hold the DC in reset
14035         * across the clear.
14036         */
14037        write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14038        (void)read_csr(dd, CCE_DC_CTRL);
14039
14040        if (use_flr) {
14041                /*
14042                 * A FLR will reset the SPC core and part of the PCIe.
14043                 * The parts that need to be restored have already been
14044                 * saved.
14045                 */
14046                dd_dev_info(dd, "Resetting CSRs with FLR\n");
14047
14048                /* do the FLR, the DC reset will remain */
14049                pcie_flr(dd->pcidev);
14050
14051                /* restore command and BARs */
14052                ret = restore_pci_variables(dd);
14053                if (ret) {
14054                        dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14055                                   __func__);
14056                        return ret;
14057                }
14058
14059                if (is_ax(dd)) {
14060                        dd_dev_info(dd, "Resetting CSRs with FLR\n");
14061                        pcie_flr(dd->pcidev);
14062                        ret = restore_pci_variables(dd);
14063                        if (ret) {
14064                                dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14065                                           __func__);
14066                                return ret;
14067                        }
14068                }
14069        } else {
14070                dd_dev_info(dd, "Resetting CSRs with writes\n");
14071                reset_cce_csrs(dd);
14072                reset_txe_csrs(dd);
14073                reset_rxe_csrs(dd);
14074                reset_misc_csrs(dd);
14075        }
14076        /* clear the DC reset */
14077        write_csr(dd, CCE_DC_CTRL, 0);
14078
14079        /* Set the LED off */
14080        setextled(dd, 0);
14081
14082        /*
14083         * Clear the QSFP reset.
14084         * An FLR enforces a 0 on all out pins. The driver does not touch
14085         * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
14086         * anything plugged constantly in reset, if it pays attention
14087         * to RESET_N.
14088         * Prime examples of this are optical cables. Set all pins high.
14089         * I2CCLK and I2CDAT will change per direction, and INT_N and
14090         * MODPRS_N are input only and their value is ignored.
14091         */
14092        write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14093        write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14094        init_chip_resources(dd);
14095        return ret;
14096}
14097
14098static void init_early_variables(struct hfi1_devdata *dd)
14099{
14100        int i;
14101
14102        /* assign link credit variables */
14103        dd->vau = CM_VAU;
14104        dd->link_credits = CM_GLOBAL_CREDITS;
14105        if (is_ax(dd))
14106                dd->link_credits--;
14107        dd->vcu = cu_to_vcu(hfi1_cu);
14108        /* enough room for 8 MAD packets plus header - 17K */
14109        dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14110        if (dd->vl15_init > dd->link_credits)
14111                dd->vl15_init = dd->link_credits;
14112
14113        write_uninitialized_csrs_and_memories(dd);
14114
14115        if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14116                for (i = 0; i < dd->num_pports; i++) {
14117                        struct hfi1_pportdata *ppd = &dd->pport[i];
14118
14119                        set_partition_keys(ppd);
14120                }
14121        init_sc2vl_tables(dd);
14122}
14123
14124static void init_kdeth_qp(struct hfi1_devdata *dd)
14125{
14126        /* user changed the KDETH_QP */
14127        if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14128                /* out of range or illegal value */
14129                dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14130                kdeth_qp = 0;
14131        }
14132        if (kdeth_qp == 0)      /* not set, or failed range check */
14133                kdeth_qp = DEFAULT_KDETH_QP;
14134
14135        write_csr(dd, SEND_BTH_QP,
14136                  (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14137                  SEND_BTH_QP_KDETH_QP_SHIFT);
14138
14139        write_csr(dd, RCV_BTH_QP,
14140                  (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14141                  RCV_BTH_QP_KDETH_QP_SHIFT);
14142}
14143
14144/**
14145 * init_qpmap_table
14146 * @dd - device data
14147 * @first_ctxt - first context
14148 * @last_ctxt - first context
14149 *
14150 * This return sets the qpn mapping table that
14151 * is indexed by qpn[8:1].
14152 *
14153 * The routine will round robin the 256 settings
14154 * from first_ctxt to last_ctxt.
14155 *
14156 * The first/last looks ahead to having specialized
14157 * receive contexts for mgmt and bypass.  Normal
14158 * verbs traffic will assumed to be on a range
14159 * of receive contexts.
14160 */
14161static void init_qpmap_table(struct hfi1_devdata *dd,
14162                             u32 first_ctxt,
14163                             u32 last_ctxt)
14164{
14165        u64 reg = 0;
14166        u64 regno = RCV_QP_MAP_TABLE;
14167        int i;
14168        u64 ctxt = first_ctxt;
14169
14170        for (i = 0; i < 256; i++) {
14171                reg |= ctxt << (8 * (i % 8));
14172                ctxt++;
14173                if (ctxt > last_ctxt)
14174                        ctxt = first_ctxt;
14175                if (i % 8 == 7) {
14176                        write_csr(dd, regno, reg);
14177                        reg = 0;
14178                        regno += 8;
14179                }
14180        }
14181
14182        add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14183                        | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14184}
14185
14186struct rsm_map_table {
14187        u64 map[NUM_MAP_REGS];
14188        unsigned int used;
14189};
14190
14191struct rsm_rule_data {
14192        u8 offset;
14193        u8 pkt_type;
14194        u32 field1_off;
14195        u32 field2_off;
14196        u32 index1_off;
14197        u32 index1_width;
14198        u32 index2_off;
14199        u32 index2_width;
14200        u32 mask1;
14201        u32 value1;
14202        u32 mask2;
14203        u32 value2;
14204};
14205
14206/*
14207 * Return an initialized RMT map table for users to fill in.  OK if it
14208 * returns NULL, indicating no table.
14209 */
14210static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14211{
14212        struct rsm_map_table *rmt;
14213        u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14214
14215        rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14216        if (rmt) {
14217                memset(rmt->map, rxcontext, sizeof(rmt->map));
14218                rmt->used = 0;
14219        }
14220
14221        return rmt;
14222}
14223
14224/*
14225 * Write the final RMT map table to the chip and free the table.  OK if
14226 * table is NULL.
14227 */
14228static void complete_rsm_map_table(struct hfi1_devdata *dd,
14229                                   struct rsm_map_table *rmt)
14230{
14231        int i;
14232
14233        if (rmt) {
14234                /* write table to chip */
14235                for (i = 0; i < NUM_MAP_REGS; i++)
14236                        write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14237
14238                /* enable RSM */
14239                add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14240        }
14241}
14242
14243/*
14244 * Add a receive side mapping rule.
14245 */
14246static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14247                         struct rsm_rule_data *rrd)
14248{
14249        write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14250                  (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14251                  1ull << rule_index | /* enable bit */
14252                  (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14253        write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14254                  (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14255                  (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14256                  (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14257                  (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14258                  (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14259                  (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14260        write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14261                  (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14262                  (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14263                  (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14264                  (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14265}
14266
14267/*
14268 * Clear a receive side mapping rule.
14269 */
14270static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14271{
14272        write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14273        write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14274        write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14275}
14276
14277/* return the number of RSM map table entries that will be used for QOS */
14278static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14279                           unsigned int *np)
14280{
14281        int i;
14282        unsigned int m, n;
14283        u8 max_by_vl = 0;
14284
14285        /* is QOS active at all? */
14286        if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14287            num_vls == 1 ||
14288            krcvqsset <= 1)
14289                goto no_qos;
14290
14291        /* determine bits for qpn */
14292        for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14293                if (krcvqs[i] > max_by_vl)
14294                        max_by_vl = krcvqs[i];
14295        if (max_by_vl > 32)
14296                goto no_qos;
14297        m = ilog2(__roundup_pow_of_two(max_by_vl));
14298
14299        /* determine bits for vl */
14300        n = ilog2(__roundup_pow_of_two(num_vls));
14301
14302        /* reject if too much is used */
14303        if ((m + n) > 7)
14304                goto no_qos;
14305
14306        if (mp)
14307                *mp = m;
14308        if (np)
14309                *np = n;
14310
14311        return 1 << (m + n);
14312
14313no_qos:
14314        if (mp)
14315                *mp = 0;
14316        if (np)
14317                *np = 0;
14318        return 0;
14319}
14320
14321/**
14322 * init_qos - init RX qos
14323 * @dd - device data
14324 * @rmt - RSM map table
14325 *
14326 * This routine initializes Rule 0 and the RSM map table to implement
14327 * quality of service (qos).
14328 *
14329 * If all of the limit tests succeed, qos is applied based on the array
14330 * interpretation of krcvqs where entry 0 is VL0.
14331 *
14332 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14333 * feed both the RSM map table and the single rule.
14334 */
14335static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14336{
14337        struct rsm_rule_data rrd;
14338        unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14339        unsigned int rmt_entries;
14340        u64 reg;
14341
14342        if (!rmt)
14343                goto bail;
14344        rmt_entries = qos_rmt_entries(dd, &m, &n);
14345        if (rmt_entries == 0)
14346                goto bail;
14347        qpns_per_vl = 1 << m;
14348
14349        /* enough room in the map table? */
14350        rmt_entries = 1 << (m + n);
14351        if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14352                goto bail;
14353
14354        /* add qos entries to the the RSM map table */
14355        for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14356                unsigned tctxt;
14357
14358                for (qpn = 0, tctxt = ctxt;
14359                     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14360                        unsigned idx, regoff, regidx;
14361
14362                        /* generate the index the hardware will produce */
14363                        idx = rmt->used + ((qpn << n) ^ i);
14364                        regoff = (idx % 8) * 8;
14365                        regidx = idx / 8;
14366                        /* replace default with context number */
14367                        reg = rmt->map[regidx];
14368                        reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14369                                << regoff);
14370                        reg |= (u64)(tctxt++) << regoff;
14371                        rmt->map[regidx] = reg;
14372                        if (tctxt == ctxt + krcvqs[i])
14373                                tctxt = ctxt;
14374                }
14375                ctxt += krcvqs[i];
14376        }
14377
14378        rrd.offset = rmt->used;
14379        rrd.pkt_type = 2;
14380        rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14381        rrd.field2_off = LRH_SC_MATCH_OFFSET;
14382        rrd.index1_off = LRH_SC_SELECT_OFFSET;
14383        rrd.index1_width = n;
14384        rrd.index2_off = QPN_SELECT_OFFSET;
14385        rrd.index2_width = m + n;
14386        rrd.mask1 = LRH_BTH_MASK;
14387        rrd.value1 = LRH_BTH_VALUE;
14388        rrd.mask2 = LRH_SC_MASK;
14389        rrd.value2 = LRH_SC_VALUE;
14390
14391        /* add rule 0 */
14392        add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14393
14394        /* mark RSM map entries as used */
14395        rmt->used += rmt_entries;
14396        /* map everything else to the mcast/err/vl15 context */
14397        init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14398        dd->qos_shift = n + 1;
14399        return;
14400bail:
14401        dd->qos_shift = 1;
14402        init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14403}
14404
14405static void init_user_fecn_handling(struct hfi1_devdata *dd,
14406                                    struct rsm_map_table *rmt)
14407{
14408        struct rsm_rule_data rrd;
14409        u64 reg;
14410        int i, idx, regoff, regidx;
14411        u8 offset;
14412
14413        /* there needs to be enough room in the map table */
14414        if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14415                dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14416                return;
14417        }
14418
14419        /*
14420         * RSM will extract the destination context as an index into the
14421         * map table.  The destination contexts are a sequential block
14422         * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14423         * Map entries are accessed as offset + extracted value.  Adjust
14424         * the added offset so this sequence can be placed anywhere in
14425         * the table - as long as the entries themselves do not wrap.
14426         * There are only enough bits in offset for the table size, so
14427         * start with that to allow for a "negative" offset.
14428         */
14429        offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14430                                                (int)dd->first_dyn_alloc_ctxt);
14431
14432        for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14433                                i < dd->num_rcv_contexts; i++, idx++) {
14434                /* replace with identity mapping */
14435                regoff = (idx % 8) * 8;
14436                regidx = idx / 8;
14437                reg = rmt->map[regidx];
14438                reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14439                reg |= (u64)i << regoff;
14440                rmt->map[regidx] = reg;
14441        }
14442
14443        /*
14444         * For RSM intercept of Expected FECN packets:
14445         * o packet type 0 - expected
14446         * o match on F (bit 95), using select/match 1, and
14447         * o match on SH (bit 133), using select/match 2.
14448         *
14449         * Use index 1 to extract the 8-bit receive context from DestQP
14450         * (start at bit 64).  Use that as the RSM map table index.
14451         */
14452        rrd.offset = offset;
14453        rrd.pkt_type = 0;
14454        rrd.field1_off = 95;
14455        rrd.field2_off = 133;
14456        rrd.index1_off = 64;
14457        rrd.index1_width = 8;
14458        rrd.index2_off = 0;
14459        rrd.index2_width = 0;
14460        rrd.mask1 = 1;
14461        rrd.value1 = 1;
14462        rrd.mask2 = 1;
14463        rrd.value2 = 1;
14464
14465        /* add rule 1 */
14466        add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14467
14468        rmt->used += dd->num_user_contexts;
14469}
14470
14471/* Initialize RSM for VNIC */
14472void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14473{
14474        u8 i, j;
14475        u8 ctx_id = 0;
14476        u64 reg;
14477        u32 regoff;
14478        struct rsm_rule_data rrd;
14479
14480        if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14481                dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14482                           dd->vnic.rmt_start);
14483                return;
14484        }
14485
14486        dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14487                dd->vnic.rmt_start,
14488                dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14489
14490        /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14491        regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14492        reg = read_csr(dd, regoff);
14493        for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14494                /* Update map register with vnic context */
14495                j = (dd->vnic.rmt_start + i) % 8;
14496                reg &= ~(0xffllu << (j * 8));
14497                reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14498                /* Wrap up vnic ctx index */
14499                ctx_id %= dd->vnic.num_ctxt;
14500                /* Write back map register */
14501                if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14502                        dev_dbg(&(dd)->pcidev->dev,
14503                                "Vnic rsm map reg[%d] =0x%llx\n",
14504                                regoff - RCV_RSM_MAP_TABLE, reg);
14505
14506                        write_csr(dd, regoff, reg);
14507                        regoff += 8;
14508                        if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14509                                reg = read_csr(dd, regoff);
14510                }
14511        }
14512
14513        /* Add rule for vnic */
14514        rrd.offset = dd->vnic.rmt_start;
14515        rrd.pkt_type = 4;
14516        /* Match 16B packets */
14517        rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14518        rrd.mask1 = L2_TYPE_MASK;
14519        rrd.value1 = L2_16B_VALUE;
14520        /* Match ETH L4 packets */
14521        rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14522        rrd.mask2 = L4_16B_TYPE_MASK;
14523        rrd.value2 = L4_16B_ETH_VALUE;
14524        /* Calc context from veswid and entropy */
14525        rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14526        rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14527        rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14528        rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14529        add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14530
14531        /* Enable RSM if not already enabled */
14532        add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14533}
14534
14535void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14536{
14537        clear_rsm_rule(dd, RSM_INS_VNIC);
14538
14539        /* Disable RSM if used only by vnic */
14540        if (dd->vnic.rmt_start == 0)
14541                clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14542}
14543
14544static void init_rxe(struct hfi1_devdata *dd)
14545{
14546        struct rsm_map_table *rmt;
14547        u64 val;
14548
14549        /* enable all receive errors */
14550        write_csr(dd, RCV_ERR_MASK, ~0ull);
14551
14552        rmt = alloc_rsm_map_table(dd);
14553        /* set up QOS, including the QPN map table */
14554        init_qos(dd, rmt);
14555        init_user_fecn_handling(dd, rmt);
14556        complete_rsm_map_table(dd, rmt);
14557        /* record number of used rsm map entries for vnic */
14558        dd->vnic.rmt_start = rmt->used;
14559        kfree(rmt);
14560
14561        /*
14562         * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14563         * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14564         * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14565         * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14566         * Max_PayLoad_Size set to its minimum of 128.
14567         *
14568         * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14569         * (64 bytes).  Max_Payload_Size is possibly modified upward in
14570         * tune_pcie_caps() which is called after this routine.
14571         */
14572
14573        /* Have 16 bytes (4DW) of bypass header available in header queue */
14574        val = read_csr(dd, RCV_BYPASS);
14575        val |= (4ull << 16);
14576        write_csr(dd, RCV_BYPASS, val);
14577}
14578
14579static void init_other(struct hfi1_devdata *dd)
14580{
14581        /* enable all CCE errors */
14582        write_csr(dd, CCE_ERR_MASK, ~0ull);
14583        /* enable *some* Misc errors */
14584        write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14585        /* enable all DC errors, except LCB */
14586        write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14587        write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14588}
14589
14590/*
14591 * Fill out the given AU table using the given CU.  A CU is defined in terms
14592 * AUs.  The table is a an encoding: given the index, how many AUs does that
14593 * represent?
14594 *
14595 * NOTE: Assumes that the register layout is the same for the
14596 * local and remote tables.
14597 */
14598static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14599                               u32 csr0to3, u32 csr4to7)
14600{
14601        write_csr(dd, csr0to3,
14602                  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14603                  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14604                  2ull * cu <<
14605                  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14606                  4ull * cu <<
14607                  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14608        write_csr(dd, csr4to7,
14609                  8ull * cu <<
14610                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14611                  16ull * cu <<
14612                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14613                  32ull * cu <<
14614                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14615                  64ull * cu <<
14616                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14617}
14618
14619static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14620{
14621        assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14622                           SEND_CM_LOCAL_AU_TABLE4_TO7);
14623}
14624
14625void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14626{
14627        assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14628                           SEND_CM_REMOTE_AU_TABLE4_TO7);
14629}
14630
14631static void init_txe(struct hfi1_devdata *dd)
14632{
14633        int i;
14634
14635        /* enable all PIO, SDMA, general, and Egress errors */
14636        write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14637        write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14638        write_csr(dd, SEND_ERR_MASK, ~0ull);
14639        write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14640
14641        /* enable all per-context and per-SDMA engine errors */
14642        for (i = 0; i < dd->chip_send_contexts; i++)
14643                write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14644        for (i = 0; i < dd->chip_sdma_engines; i++)
14645                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14646
14647        /* set the local CU to AU mapping */
14648        assign_local_cm_au_table(dd, dd->vcu);
14649
14650        /*
14651         * Set reasonable default for Credit Return Timer
14652         * Don't set on Simulator - causes it to choke.
14653         */
14654        if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14655                write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14656}
14657
14658int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14659                       u16 jkey)
14660{
14661        u8 hw_ctxt;
14662        u64 reg;
14663
14664        if (!rcd || !rcd->sc)
14665                return -EINVAL;
14666
14667        hw_ctxt = rcd->sc->hw_context;
14668        reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14669                ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14670                 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14671        /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14672        if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14673                reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14674        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14675        /*
14676         * Enable send-side J_KEY integrity check, unless this is A0 h/w
14677         */
14678        if (!is_ax(dd)) {
14679                reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14680                reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14681                write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14682        }
14683
14684        /* Enable J_KEY check on receive context. */
14685        reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14686                ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14687                 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14688        write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14689
14690        return 0;
14691}
14692
14693int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14694{
14695        u8 hw_ctxt;
14696        u64 reg;
14697
14698        if (!rcd || !rcd->sc)
14699                return -EINVAL;
14700
14701        hw_ctxt = rcd->sc->hw_context;
14702        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14703        /*
14704         * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14705         * This check would not have been enabled for A0 h/w, see
14706         * set_ctxt_jkey().
14707         */
14708        if (!is_ax(dd)) {
14709                reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14710                reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14711                write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14712        }
14713        /* Turn off the J_KEY on the receive side */
14714        write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14715
14716        return 0;
14717}
14718
14719int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14720                       u16 pkey)
14721{
14722        u8 hw_ctxt;
14723        u64 reg;
14724
14725        if (!rcd || !rcd->sc)
14726                return -EINVAL;
14727
14728        hw_ctxt = rcd->sc->hw_context;
14729        reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14730                SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14731        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14732        reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14733        reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14734        reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14735        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14736
14737        return 0;
14738}
14739
14740int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14741{
14742        u8 hw_ctxt;
14743        u64 reg;
14744
14745        if (!ctxt || !ctxt->sc)
14746                return -EINVAL;
14747
14748        hw_ctxt = ctxt->sc->hw_context;
14749        reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14750        reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14751        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14752        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14753
14754        return 0;
14755}
14756
14757/*
14758 * Start doing the clean up the the chip. Our clean up happens in multiple
14759 * stages and this is just the first.
14760 */
14761void hfi1_start_cleanup(struct hfi1_devdata *dd)
14762{
14763        aspm_exit(dd);
14764        free_cntrs(dd);
14765        free_rcverr(dd);
14766        clean_up_interrupts(dd);
14767        finish_chip_resources(dd);
14768}
14769
14770#define HFI_BASE_GUID(dev) \
14771        ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14772
14773/*
14774 * Information can be shared between the two HFIs on the same ASIC
14775 * in the same OS.  This function finds the peer device and sets
14776 * up a shared structure.
14777 */
14778static int init_asic_data(struct hfi1_devdata *dd)
14779{
14780        unsigned long flags;
14781        struct hfi1_devdata *tmp, *peer = NULL;
14782        struct hfi1_asic_data *asic_data;
14783        int ret = 0;
14784
14785        /* pre-allocate the asic structure in case we are the first device */
14786        asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14787        if (!asic_data)
14788                return -ENOMEM;
14789
14790        spin_lock_irqsave(&hfi1_devs_lock, flags);
14791        /* Find our peer device */
14792        list_for_each_entry(tmp, &hfi1_dev_list, list) {
14793                if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14794                    dd->unit != tmp->unit) {
14795                        peer = tmp;
14796                        break;
14797                }
14798        }
14799
14800        if (peer) {
14801                /* use already allocated structure */
14802                dd->asic_data = peer->asic_data;
14803                kfree(asic_data);
14804        } else {
14805                dd->asic_data = asic_data;
14806                mutex_init(&dd->asic_data->asic_resource_mutex);
14807        }
14808        dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14809        spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14810
14811        /* first one through - set up i2c devices */
14812        if (!peer)
14813                ret = set_up_i2c(dd, dd->asic_data);
14814
14815        return ret;
14816}
14817
14818/*
14819 * Set dd->boardname.  Use a generic name if a name is not returned from
14820 * EFI variable space.
14821 *
14822 * Return 0 on success, -ENOMEM if space could not be allocated.
14823 */
14824static int obtain_boardname(struct hfi1_devdata *dd)
14825{
14826        /* generic board description */
14827        const char generic[] =
14828                "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14829        unsigned long size;
14830        int ret;
14831
14832        ret = read_hfi1_efi_var(dd, "description", &size,
14833                                (void **)&dd->boardname);
14834        if (ret) {
14835                dd_dev_info(dd, "Board description not found\n");
14836                /* use generic description */
14837                dd->boardname = kstrdup(generic, GFP_KERNEL);
14838                if (!dd->boardname)
14839                        return -ENOMEM;
14840        }
14841        return 0;
14842}
14843
14844/*
14845 * Check the interrupt registers to make sure that they are mapped correctly.
14846 * It is intended to help user identify any mismapping by VMM when the driver
14847 * is running in a VM. This function should only be called before interrupt
14848 * is set up properly.
14849 *
14850 * Return 0 on success, -EINVAL on failure.
14851 */
14852static int check_int_registers(struct hfi1_devdata *dd)
14853{
14854        u64 reg;
14855        u64 all_bits = ~(u64)0;
14856        u64 mask;
14857
14858        /* Clear CceIntMask[0] to avoid raising any interrupts */
14859        mask = read_csr(dd, CCE_INT_MASK);
14860        write_csr(dd, CCE_INT_MASK, 0ull);
14861        reg = read_csr(dd, CCE_INT_MASK);
14862        if (reg)
14863                goto err_exit;
14864
14865        /* Clear all interrupt status bits */
14866        write_csr(dd, CCE_INT_CLEAR, all_bits);
14867        reg = read_csr(dd, CCE_INT_STATUS);
14868        if (reg)
14869                goto err_exit;
14870
14871        /* Set all interrupt status bits */
14872        write_csr(dd, CCE_INT_FORCE, all_bits);
14873        reg = read_csr(dd, CCE_INT_STATUS);
14874        if (reg != all_bits)
14875                goto err_exit;
14876
14877        /* Restore the interrupt mask */
14878        write_csr(dd, CCE_INT_CLEAR, all_bits);
14879        write_csr(dd, CCE_INT_MASK, mask);
14880
14881        return 0;
14882err_exit:
14883        write_csr(dd, CCE_INT_MASK, mask);
14884        dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14885        return -EINVAL;
14886}
14887
14888/**
14889 * Allocate and initialize the device structure for the hfi.
14890 * @dev: the pci_dev for hfi1_ib device
14891 * @ent: pci_device_id struct for this dev
14892 *
14893 * Also allocates, initializes, and returns the devdata struct for this
14894 * device instance
14895 *
14896 * This is global, and is called directly at init to set up the
14897 * chip-specific function pointers for later use.
14898 */
14899struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14900                                  const struct pci_device_id *ent)
14901{
14902        struct hfi1_devdata *dd;
14903        struct hfi1_pportdata *ppd;
14904        u64 reg;
14905        int i, ret;
14906        static const char * const inames[] = { /* implementation names */
14907                "RTL silicon",
14908                "RTL VCS simulation",
14909                "RTL FPGA emulation",
14910                "Functional simulator"
14911        };
14912        struct pci_dev *parent = pdev->bus->self;
14913
14914        dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14915                                sizeof(struct hfi1_pportdata));
14916        if (IS_ERR(dd))
14917                goto bail;
14918        ppd = dd->pport;
14919        for (i = 0; i < dd->num_pports; i++, ppd++) {
14920                int vl;
14921                /* init common fields */
14922                hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14923                /* DC supports 4 link widths */
14924                ppd->link_width_supported =
14925                        OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14926                        OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14927                ppd->link_width_downgrade_supported =
14928                        ppd->link_width_supported;
14929                /* start out enabling only 4X */
14930                ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14931                ppd->link_width_downgrade_enabled =
14932                                        ppd->link_width_downgrade_supported;
14933                /* link width active is 0 when link is down */
14934                /* link width downgrade active is 0 when link is down */
14935
14936                if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14937                    num_vls > HFI1_MAX_VLS_SUPPORTED) {
14938                        hfi1_early_err(&pdev->dev,
14939                                       "Invalid num_vls %u, using %u VLs\n",
14940                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
14941                        num_vls = HFI1_MAX_VLS_SUPPORTED;
14942                }
14943                ppd->vls_supported = num_vls;
14944                ppd->vls_operational = ppd->vls_supported;
14945                /* Set the default MTU. */
14946                for (vl = 0; vl < num_vls; vl++)
14947                        dd->vld[vl].mtu = hfi1_max_mtu;
14948                dd->vld[15].mtu = MAX_MAD_PACKET;
14949                /*
14950                 * Set the initial values to reasonable default, will be set
14951                 * for real when link is up.
14952                 */
14953                ppd->overrun_threshold = 0x4;
14954                ppd->phy_error_threshold = 0xf;
14955                ppd->port_crc_mode_enabled = link_crc_mask;
14956                /* initialize supported LTP CRC mode */
14957                ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14958                /* initialize enabled LTP CRC mode */
14959                ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14960                /* start in offline */
14961                ppd->host_link_state = HLS_DN_OFFLINE;
14962                init_vl_arb_caches(ppd);
14963        }
14964
14965        dd->link_default = HLS_DN_POLL;
14966
14967        /*
14968         * Do remaining PCIe setup and save PCIe values in dd.
14969         * Any error printing is already done by the init code.
14970         * On return, we have the chip mapped.
14971         */
14972        ret = hfi1_pcie_ddinit(dd, pdev);
14973        if (ret < 0)
14974                goto bail_free;
14975
14976        /* Save PCI space registers to rewrite after device reset */
14977        ret = save_pci_variables(dd);
14978        if (ret < 0)
14979                goto bail_cleanup;
14980
14981        /* verify that reads actually work, save revision for reset check */
14982        dd->revision = read_csr(dd, CCE_REVISION);
14983        if (dd->revision == ~(u64)0) {
14984                dd_dev_err(dd, "cannot read chip CSRs\n");
14985                ret = -EINVAL;
14986                goto bail_cleanup;
14987        }
14988        dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14989                        & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14990        dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14991                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
14992
14993        /*
14994         * Check interrupt registers mapping if the driver has no access to
14995         * the upstream component. In this case, it is likely that the driver
14996         * is running in a VM.
14997         */
14998        if (!parent) {
14999                ret = check_int_registers(dd);
15000                if (ret)
15001                        goto bail_cleanup;
15002        }
15003
15004        /*
15005         * obtain the hardware ID - NOT related to unit, which is a
15006         * software enumeration
15007         */
15008        reg = read_csr(dd, CCE_REVISION2);
15009        dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15010                                        & CCE_REVISION2_HFI_ID_MASK;
15011        /* the variable size will remove unwanted bits */
15012        dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15013        dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15014        dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15015                    dd->icode < ARRAY_SIZE(inames) ?
15016                    inames[dd->icode] : "unknown", (int)dd->irev);
15017
15018        /* speeds the hardware can support */
15019        dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15020        /* speeds allowed to run at */
15021        dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15022        /* give a reasonable active value, will be set on link up */
15023        dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15024
15025        dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
15026        dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
15027        dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
15028        dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
15029        dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
15030        /* fix up link widths for emulation _p */
15031        ppd = dd->pport;
15032        if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15033                ppd->link_width_supported =
15034                        ppd->link_width_enabled =
15035                        ppd->link_width_downgrade_supported =
15036                        ppd->link_width_downgrade_enabled =
15037                                OPA_LINK_WIDTH_1X;
15038        }
15039        /* insure num_vls isn't larger than number of sdma engines */
15040        if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
15041                dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15042                           num_vls, dd->chip_sdma_engines);
15043                num_vls = dd->chip_sdma_engines;
15044                ppd->vls_supported = dd->chip_sdma_engines;
15045                ppd->vls_operational = ppd->vls_supported;
15046        }
15047
15048        /*
15049         * Convert the ns parameter to the 64 * cclocks used in the CSR.
15050         * Limit the max if larger than the field holds.  If timeout is
15051         * non-zero, then the calculated field will be at least 1.
15052         *
15053         * Must be after icode is set up - the cclock rate depends
15054         * on knowing the hardware being used.
15055         */
15056        dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15057        if (dd->rcv_intr_timeout_csr >
15058                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15059                dd->rcv_intr_timeout_csr =
15060                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15061        else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15062                dd->rcv_intr_timeout_csr = 1;
15063
15064        /* needs to be done before we look for the peer device */
15065        read_guid(dd);
15066
15067        /* set up shared ASIC data with peer device */
15068        ret = init_asic_data(dd);
15069        if (ret)
15070                goto bail_cleanup;
15071
15072        /* obtain chip sizes, reset chip CSRs */
15073        ret = init_chip(dd);
15074        if (ret)
15075                goto bail_cleanup;
15076
15077        /* read in the PCIe link speed information */
15078        ret = pcie_speeds(dd);
15079        if (ret)
15080                goto bail_cleanup;
15081
15082        /* call before get_platform_config(), after init_chip_resources() */
15083        ret = eprom_init(dd);
15084        if (ret)
15085                goto bail_free_rcverr;
15086
15087        /* Needs to be called before hfi1_firmware_init */
15088        get_platform_config(dd);
15089
15090        /* read in firmware */
15091        ret = hfi1_firmware_init(dd);
15092        if (ret)
15093                goto bail_cleanup;
15094
15095        /*
15096         * In general, the PCIe Gen3 transition must occur after the
15097         * chip has been idled (so it won't initiate any PCIe transactions
15098         * e.g. an interrupt) and before the driver changes any registers
15099         * (the transition will reset the registers).
15100         *
15101         * In particular, place this call after:
15102         * - init_chip()     - the chip will not initiate any PCIe transactions
15103         * - pcie_speeds()   - reads the current link speed
15104         * - hfi1_firmware_init() - the needed firmware is ready to be
15105         *                          downloaded
15106         */
15107        ret = do_pcie_gen3_transition(dd);
15108        if (ret)
15109                goto bail_cleanup;
15110
15111        /* start setting dd values and adjusting CSRs */
15112        init_early_variables(dd);
15113
15114        parse_platform_config(dd);
15115
15116        ret = obtain_boardname(dd);
15117        if (ret)
15118                goto bail_cleanup;
15119
15120        snprintf(dd->boardversion, BOARD_VERS_MAX,
15121                 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15122                 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15123                 (u32)dd->majrev,
15124                 (u32)dd->minrev,
15125                 (dd->revision >> CCE_REVISION_SW_SHIFT)
15126                    & CCE_REVISION_SW_MASK);
15127
15128        ret = set_up_context_variables(dd);
15129        if (ret)
15130                goto bail_cleanup;
15131
15132        /* set initial RXE CSRs */
15133        init_rxe(dd);
15134        /* set initial TXE CSRs */
15135        init_txe(dd);
15136        /* set initial non-RXE, non-TXE CSRs */
15137        init_other(dd);
15138        /* set up KDETH QP prefix in both RX and TX CSRs */
15139        init_kdeth_qp(dd);
15140
15141        ret = hfi1_dev_affinity_init(dd);
15142        if (ret)
15143                goto bail_cleanup;
15144
15145        /* send contexts must be set up before receive contexts */
15146        ret = init_send_contexts(dd);
15147        if (ret)
15148                goto bail_cleanup;
15149
15150        ret = hfi1_create_kctxts(dd);
15151        if (ret)
15152                goto bail_cleanup;
15153
15154        /*
15155         * Initialize aspm, to be done after gen3 transition and setting up
15156         * contexts and before enabling interrupts
15157         */
15158        aspm_init(dd);
15159
15160        dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15161        /*
15162         * rcd[0] is guaranteed to be valid by this point. Also, all
15163         * context are using the same value, as per the module parameter.
15164         */
15165        dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15166
15167        ret = init_pervl_scs(dd);
15168        if (ret)
15169                goto bail_cleanup;
15170
15171        /* sdma init */
15172        for (i = 0; i < dd->num_pports; ++i) {
15173                ret = sdma_init(dd, i);
15174                if (ret)
15175                        goto bail_cleanup;
15176        }
15177
15178        /* use contexts created by hfi1_create_kctxts */
15179        ret = set_up_interrupts(dd);
15180        if (ret)
15181                goto bail_cleanup;
15182
15183        /* set up LCB access - must be after set_up_interrupts() */
15184        init_lcb_access(dd);
15185
15186        /*
15187         * Serial number is created from the base guid:
15188         * [27:24] = base guid [38:35]
15189         * [23: 0] = base guid [23: 0]
15190         */
15191        snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15192                 (dd->base_guid & 0xFFFFFF) |
15193                     ((dd->base_guid >> 11) & 0xF000000));
15194
15195        dd->oui1 = dd->base_guid >> 56 & 0xFF;
15196        dd->oui2 = dd->base_guid >> 48 & 0xFF;
15197        dd->oui3 = dd->base_guid >> 40 & 0xFF;
15198
15199        ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15200        if (ret)
15201                goto bail_clear_intr;
15202
15203        thermal_init(dd);
15204
15205        ret = init_cntrs(dd);
15206        if (ret)
15207                goto bail_clear_intr;
15208
15209        ret = init_rcverr(dd);
15210        if (ret)
15211                goto bail_free_cntrs;
15212
15213        init_completion(&dd->user_comp);
15214
15215        /* The user refcount starts with one to inidicate an active device */
15216        atomic_set(&dd->user_refcount, 1);
15217
15218        goto bail;
15219
15220bail_free_rcverr:
15221        free_rcverr(dd);
15222bail_free_cntrs:
15223        free_cntrs(dd);
15224bail_clear_intr:
15225        clean_up_interrupts(dd);
15226bail_cleanup:
15227        hfi1_pcie_ddcleanup(dd);
15228bail_free:
15229        hfi1_free_devdata(dd);
15230        dd = ERR_PTR(ret);
15231bail:
15232        return dd;
15233}
15234
15235static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15236                        u32 dw_len)
15237{
15238        u32 delta_cycles;
15239        u32 current_egress_rate = ppd->current_egress_rate;
15240        /* rates here are in units of 10^6 bits/sec */
15241
15242        if (desired_egress_rate == -1)
15243                return 0; /* shouldn't happen */
15244
15245        if (desired_egress_rate >= current_egress_rate)
15246                return 0; /* we can't help go faster, only slower */
15247
15248        delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15249                        egress_cycles(dw_len * 4, current_egress_rate);
15250
15251        return (u16)delta_cycles;
15252}
15253
15254/**
15255 * create_pbc - build a pbc for transmission
15256 * @flags: special case flags or-ed in built pbc
15257 * @srate: static rate
15258 * @vl: vl
15259 * @dwlen: dword length (header words + data words + pbc words)
15260 *
15261 * Create a PBC with the given flags, rate, VL, and length.
15262 *
15263 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15264 * for verbs, which does not use this PSM feature.  The lone other caller
15265 * is for the diagnostic interface which calls this if the user does not
15266 * supply their own PBC.
15267 */
15268u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15269               u32 dw_len)
15270{
15271        u64 pbc, delay = 0;
15272
15273        if (unlikely(srate_mbs))
15274                delay = delay_cycles(ppd, srate_mbs, dw_len);
15275
15276        pbc = flags
15277                | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15278                | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15279                | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15280                | (dw_len & PBC_LENGTH_DWS_MASK)
15281                        << PBC_LENGTH_DWS_SHIFT;
15282
15283        return pbc;
15284}
15285
15286#define SBUS_THERMAL    0x4f
15287#define SBUS_THERM_MONITOR_MODE 0x1
15288
15289#define THERM_FAILURE(dev, ret, reason) \
15290        dd_dev_err((dd),                                                \
15291                   "Thermal sensor initialization failed: %s (%d)\n",   \
15292                   (reason), (ret))
15293
15294/*
15295 * Initialize the thermal sensor.
15296 *
15297 * After initialization, enable polling of thermal sensor through
15298 * SBus interface. In order for this to work, the SBus Master
15299 * firmware has to be loaded due to the fact that the HW polling
15300 * logic uses SBus interrupts, which are not supported with
15301 * default firmware. Otherwise, no data will be returned through
15302 * the ASIC_STS_THERM CSR.
15303 */
15304static int thermal_init(struct hfi1_devdata *dd)
15305{
15306        int ret = 0;
15307
15308        if (dd->icode != ICODE_RTL_SILICON ||
15309            check_chip_resource(dd, CR_THERM_INIT, NULL))
15310                return ret;
15311
15312        ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15313        if (ret) {
15314                THERM_FAILURE(dd, ret, "Acquire SBus");
15315                return ret;
15316        }
15317
15318        dd_dev_info(dd, "Initializing thermal sensor\n");
15319        /* Disable polling of thermal readings */
15320        write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15321        msleep(100);
15322        /* Thermal Sensor Initialization */
15323        /*    Step 1: Reset the Thermal SBus Receiver */
15324        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15325                                RESET_SBUS_RECEIVER, 0);
15326        if (ret) {
15327                THERM_FAILURE(dd, ret, "Bus Reset");
15328                goto done;
15329        }
15330        /*    Step 2: Set Reset bit in Thermal block */
15331        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15332                                WRITE_SBUS_RECEIVER, 0x1);
15333        if (ret) {
15334                THERM_FAILURE(dd, ret, "Therm Block Reset");
15335                goto done;
15336        }
15337        /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15338        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15339                                WRITE_SBUS_RECEIVER, 0x32);
15340        if (ret) {
15341                THERM_FAILURE(dd, ret, "Write Clock Div");
15342                goto done;
15343        }
15344        /*    Step 4: Select temperature mode */
15345        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15346                                WRITE_SBUS_RECEIVER,
15347                                SBUS_THERM_MONITOR_MODE);
15348        if (ret) {
15349                THERM_FAILURE(dd, ret, "Write Mode Sel");
15350                goto done;
15351        }
15352        /*    Step 5: De-assert block reset and start conversion */
15353        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15354                                WRITE_SBUS_RECEIVER, 0x2);
15355        if (ret) {
15356                THERM_FAILURE(dd, ret, "Write Reset Deassert");
15357                goto done;
15358        }
15359        /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15360        msleep(22);
15361
15362        /* Enable polling of thermal readings */
15363        write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15364
15365        /* Set initialized flag */
15366        ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15367        if (ret)
15368                THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15369
15370done:
15371        release_chip_resource(dd, CR_SBUS);
15372        return ret;
15373}
15374
15375static void handle_temp_err(struct hfi1_devdata *dd)
15376{
15377        struct hfi1_pportdata *ppd = &dd->pport[0];
15378        /*
15379         * Thermal Critical Interrupt
15380         * Put the device into forced freeze mode, take link down to
15381         * offline, and put DC into reset.
15382         */
15383        dd_dev_emerg(dd,
15384                     "Critical temperature reached! Forcing device into freeze mode!\n");
15385        dd->flags |= HFI1_FORCED_FREEZE;
15386        start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15387        /*
15388         * Shut DC down as much and as quickly as possible.
15389         *
15390         * Step 1: Take the link down to OFFLINE. This will cause the
15391         *         8051 to put the Serdes in reset. However, we don't want to
15392         *         go through the entire link state machine since we want to
15393         *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15394         *         but rather an attempt to save the chip.
15395         *         Code below is almost the same as quiet_serdes() but avoids
15396         *         all the extra work and the sleeps.
15397         */
15398        ppd->driver_link_ready = 0;
15399        ppd->link_enabled = 0;
15400        set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15401                                PLS_OFFLINE);
15402        /*
15403         * Step 2: Shutdown LCB and 8051
15404         *         After shutdown, do not restore DC_CFG_RESET value.
15405         */
15406        dc_shutdown(dd);
15407}
15408