linux/drivers/staging/rdma/hfi1/chip.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2015, 2016 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47
  48/*
  49 * This file contains all of the code that is specific to the HFI chip
  50 */
  51
  52#include <linux/pci.h>
  53#include <linux/delay.h>
  54#include <linux/interrupt.h>
  55#include <linux/module.h>
  56
  57#include "hfi.h"
  58#include "trace.h"
  59#include "mad.h"
  60#include "pio.h"
  61#include "sdma.h"
  62#include "eprom.h"
  63#include "efivar.h"
  64#include "platform.h"
  65#include "aspm.h"
  66
  67#define NUM_IB_PORTS 1
  68
  69uint kdeth_qp;
  70module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
  71MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
  72
  73uint num_vls = HFI1_MAX_VLS_SUPPORTED;
  74module_param(num_vls, uint, S_IRUGO);
  75MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  76
  77/*
  78 * Default time to aggregate two 10K packets from the idle state
  79 * (timer not running). The timer starts at the end of the first packet,
  80 * so only the time for one 10K packet and header plus a bit extra is needed.
  81 * 10 * 1024 + 64 header byte = 10304 byte
  82 * 10304 byte / 12.5 GB/s = 824.32ns
  83 */
  84uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
  85module_param(rcv_intr_timeout, uint, S_IRUGO);
  86MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
  87
  88uint rcv_intr_count = 16; /* same as qib */
  89module_param(rcv_intr_count, uint, S_IRUGO);
  90MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
  91
  92ushort link_crc_mask = SUPPORTED_CRCS;
  93module_param(link_crc_mask, ushort, S_IRUGO);
  94MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
  95
  96uint loopback;
  97module_param_named(loopback, loopback, uint, S_IRUGO);
  98MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
  99
 100/* Other driver tunables */
 101uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
 102static ushort crc_14b_sideband = 1;
 103static uint use_flr = 1;
 104uint quick_linkup; /* skip LNI */
 105
 106struct flag_table {
 107        u64 flag;       /* the flag */
 108        char *str;      /* description string */
 109        u16 extra;      /* extra information */
 110        u16 unused0;
 111        u32 unused1;
 112};
 113
 114/* str must be a string constant */
 115#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
 116#define FLAG_ENTRY0(str, flag) {flag, str, 0}
 117
 118/* Send Error Consequences */
 119#define SEC_WRITE_DROPPED       0x1
 120#define SEC_PACKET_DROPPED      0x2
 121#define SEC_SC_HALTED           0x4     /* per-context only */
 122#define SEC_SPC_FREEZE          0x8     /* per-HFI only */
 123
 124#define MIN_KERNEL_KCTXTS         2
 125#define FIRST_KERNEL_KCTXT        1
 126#define NUM_MAP_REGS             32
 127
 128/* Bit offset into the GUID which carries HFI id information */
 129#define GUID_HFI_INDEX_SHIFT     39
 130
 131/* extract the emulation revision */
 132#define emulator_rev(dd) ((dd)->irev >> 8)
 133/* parallel and serial emulation versions are 3 and 4 respectively */
 134#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
 135#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
 136
 137/* RSM fields */
 138
 139/* packet type */
 140#define IB_PACKET_TYPE         2ull
 141#define QW_SHIFT               6ull
 142/* QPN[7..1] */
 143#define QPN_WIDTH              7ull
 144
 145/* LRH.BTH: QW 0, OFFSET 48 - for match */
 146#define LRH_BTH_QW             0ull
 147#define LRH_BTH_BIT_OFFSET     48ull
 148#define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
 149#define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
 150#define LRH_BTH_SELECT
 151#define LRH_BTH_MASK           3ull
 152#define LRH_BTH_VALUE          2ull
 153
 154/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
 155#define LRH_SC_QW              0ull
 156#define LRH_SC_BIT_OFFSET      56ull
 157#define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
 158#define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
 159#define LRH_SC_MASK            128ull
 160#define LRH_SC_VALUE           0ull
 161
 162/* SC[n..0] QW 0, OFFSET 60 - for select */
 163#define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
 164
 165/* QPN[m+n:1] QW 1, OFFSET 1 */
 166#define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
 167
 168/* defines to build power on SC2VL table */
 169#define SC2VL_VAL( \
 170        num, \
 171        sc0, sc0val, \
 172        sc1, sc1val, \
 173        sc2, sc2val, \
 174        sc3, sc3val, \
 175        sc4, sc4val, \
 176        sc5, sc5val, \
 177        sc6, sc6val, \
 178        sc7, sc7val) \
 179( \
 180        ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
 181        ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
 182        ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
 183        ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
 184        ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
 185        ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
 186        ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
 187        ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
 188)
 189
 190#define DC_SC_VL_VAL( \
 191        range, \
 192        e0, e0val, \
 193        e1, e1val, \
 194        e2, e2val, \
 195        e3, e3val, \
 196        e4, e4val, \
 197        e5, e5val, \
 198        e6, e6val, \
 199        e7, e7val, \
 200        e8, e8val, \
 201        e9, e9val, \
 202        e10, e10val, \
 203        e11, e11val, \
 204        e12, e12val, \
 205        e13, e13val, \
 206        e14, e14val, \
 207        e15, e15val) \
 208( \
 209        ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
 210        ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
 211        ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
 212        ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
 213        ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
 214        ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
 215        ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
 216        ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
 217        ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
 218        ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
 219        ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
 220        ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
 221        ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
 222        ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
 223        ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
 224        ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
 225)
 226
 227/* all CceStatus sub-block freeze bits */
 228#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
 229                        | CCE_STATUS_RXE_FROZE_SMASK \
 230                        | CCE_STATUS_TXE_FROZE_SMASK \
 231                        | CCE_STATUS_TXE_PIO_FROZE_SMASK)
 232/* all CceStatus sub-block TXE pause bits */
 233#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
 234                        | CCE_STATUS_TXE_PAUSED_SMASK \
 235                        | CCE_STATUS_SDMA_PAUSED_SMASK)
 236/* all CceStatus sub-block RXE pause bits */
 237#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
 238
 239/*
 240 * CCE Error flags.
 241 */
 242static struct flag_table cce_err_status_flags[] = {
 243/* 0*/  FLAG_ENTRY0("CceCsrParityErr",
 244                CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
 245/* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
 246                CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
 247/* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
 248                CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
 249/* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
 250                CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
 251/* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
 252                CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
 253/* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
 254                CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
 255/* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
 256                CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
 257/* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
 258                CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
 259/* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
 260                CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
 261/* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 262            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
 263/*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 264            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
 265/*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
 266            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
 267/*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
 268                CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
 269/*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 270                CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
 271/*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 272                CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
 273/*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 274                CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
 275/*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 276                CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
 277/*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 278                CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
 279/*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
 280                CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
 281/*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
 282                CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
 283/*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
 284                CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
 285/*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
 286                CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
 287/*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
 288                CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
 289/*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
 290                CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
 291/*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
 292                CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
 293/*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
 294                CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
 295/*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
 296                CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
 297/*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
 298                CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
 299/*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
 300                CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
 301/*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
 302                CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
 303/*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
 304                CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
 305/*31*/  FLAG_ENTRY0("LATriggered",
 306                CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
 307/*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
 308                CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
 309/*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
 310                CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
 311/*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
 312                CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
 313/*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
 314                CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
 315/*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
 316                CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
 317/*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
 318                CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
 319/*38*/  FLAG_ENTRY0("CceIntMapCorErr",
 320                CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
 321/*39*/  FLAG_ENTRY0("CceIntMapUncErr",
 322                CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
 323/*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
 324                CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
 325/*41-63 reserved*/
 326};
 327
 328/*
 329 * Misc Error flags
 330 */
 331#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
 332static struct flag_table misc_err_status_flags[] = {
 333/* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
 334/* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
 335/* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
 336/* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
 337/* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
 338/* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
 339/* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
 340/* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
 341/* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
 342/* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
 343/*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
 344/*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
 345/*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
 346};
 347
 348/*
 349 * TXE PIO Error flags and consequences
 350 */
 351static struct flag_table pio_err_status_flags[] = {
 352/* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
 353        SEC_WRITE_DROPPED,
 354        SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
 355/* 1*/  FLAG_ENTRY("PioWriteAddrParity",
 356        SEC_SPC_FREEZE,
 357        SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
 358/* 2*/  FLAG_ENTRY("PioCsrParity",
 359        SEC_SPC_FREEZE,
 360        SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
 361/* 3*/  FLAG_ENTRY("PioSbMemFifo0",
 362        SEC_SPC_FREEZE,
 363        SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
 364/* 4*/  FLAG_ENTRY("PioSbMemFifo1",
 365        SEC_SPC_FREEZE,
 366        SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
 367/* 5*/  FLAG_ENTRY("PioPccFifoParity",
 368        SEC_SPC_FREEZE,
 369        SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
 370/* 6*/  FLAG_ENTRY("PioPecFifoParity",
 371        SEC_SPC_FREEZE,
 372        SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
 373/* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
 374        SEC_SPC_FREEZE,
 375        SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
 376/* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
 377        SEC_SPC_FREEZE,
 378        SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
 379/* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
 380        SEC_SPC_FREEZE,
 381        SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
 382/*10*/  FLAG_ENTRY("PioSmPktResetParity",
 383        SEC_SPC_FREEZE,
 384        SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
 385/*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
 386        SEC_SPC_FREEZE,
 387        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
 388/*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
 389        SEC_SPC_FREEZE,
 390        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
 391/*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
 392        0,
 393        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
 394/*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
 395        0,
 396        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
 397/*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
 398        SEC_SPC_FREEZE,
 399        SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
 400/*16*/  FLAG_ENTRY("PioPpmcPblFifo",
 401        SEC_SPC_FREEZE,
 402        SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
 403/*17*/  FLAG_ENTRY("PioInitSmIn",
 404        0,
 405        SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
 406/*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
 407        SEC_SPC_FREEZE,
 408        SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
 409/*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
 410        SEC_SPC_FREEZE,
 411        SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
 412/*20*/  FLAG_ENTRY("PioHostAddrMemCor",
 413        0,
 414        SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
 415/*21*/  FLAG_ENTRY("PioWriteDataParity",
 416        SEC_SPC_FREEZE,
 417        SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
 418/*22*/  FLAG_ENTRY("PioStateMachine",
 419        SEC_SPC_FREEZE,
 420        SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
 421/*23*/  FLAG_ENTRY("PioWriteQwValidParity",
 422        SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 423        SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
 424/*24*/  FLAG_ENTRY("PioBlockQwCountParity",
 425        SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 426        SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
 427/*25*/  FLAG_ENTRY("PioVlfVlLenParity",
 428        SEC_SPC_FREEZE,
 429        SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
 430/*26*/  FLAG_ENTRY("PioVlfSopParity",
 431        SEC_SPC_FREEZE,
 432        SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
 433/*27*/  FLAG_ENTRY("PioVlFifoParity",
 434        SEC_SPC_FREEZE,
 435        SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
 436/*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
 437        SEC_SPC_FREEZE,
 438        SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
 439/*29*/  FLAG_ENTRY("PioPpmcSopLen",
 440        SEC_SPC_FREEZE,
 441        SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
 442/*30-31 reserved*/
 443/*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
 444        SEC_SPC_FREEZE,
 445        SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
 446/*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
 447        SEC_SPC_FREEZE,
 448        SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
 449/*34*/  FLAG_ENTRY("PioPccSopHeadParity",
 450        SEC_SPC_FREEZE,
 451        SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
 452/*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
 453        SEC_SPC_FREEZE,
 454        SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
 455/*36-63 reserved*/
 456};
 457
 458/* TXE PIO errors that cause an SPC freeze */
 459#define ALL_PIO_FREEZE_ERR \
 460        (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
 461        | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
 462        | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
 463        | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
 464        | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
 465        | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
 466        | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
 467        | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
 468        | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
 469        | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
 470        | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
 471        | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
 472        | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
 473        | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
 474        | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
 475        | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
 476        | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
 477        | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
 478        | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
 479        | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
 480        | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
 481        | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
 482        | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
 483        | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
 484        | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
 485        | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
 486        | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
 487        | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
 488        | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
 489
 490/*
 491 * TXE SDMA Error flags
 492 */
 493static struct flag_table sdma_err_status_flags[] = {
 494/* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
 495                SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
 496/* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
 497                SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
 498/* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
 499                SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
 500/* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
 501                SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
 502/*04-63 reserved*/
 503};
 504
 505/* TXE SDMA errors that cause an SPC freeze */
 506#define ALL_SDMA_FREEZE_ERR  \
 507                (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
 508                | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
 509                | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
 510
 511/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
 512#define PORT_DISCARD_EGRESS_ERRS \
 513        (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
 514        | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
 515        | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
 516
 517/*
 518 * TXE Egress Error flags
 519 */
 520#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
 521static struct flag_table egress_err_status_flags[] = {
 522/* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
 523/* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
 524/* 2 reserved */
 525/* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
 526                SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
 527/* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
 528/* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
 529/* 6 reserved */
 530/* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
 531                SEES(TX_PIO_LAUNCH_INTF_PARITY)),
 532/* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
 533                SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
 534/* 9-10 reserved */
 535/*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
 536                SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
 537/*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
 538/*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
 539/*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
 540/*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
 541/*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
 542                SEES(TX_SDMA0_DISALLOWED_PACKET)),
 543/*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
 544                SEES(TX_SDMA1_DISALLOWED_PACKET)),
 545/*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
 546                SEES(TX_SDMA2_DISALLOWED_PACKET)),
 547/*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
 548                SEES(TX_SDMA3_DISALLOWED_PACKET)),
 549/*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
 550                SEES(TX_SDMA4_DISALLOWED_PACKET)),
 551/*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
 552                SEES(TX_SDMA5_DISALLOWED_PACKET)),
 553/*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
 554                SEES(TX_SDMA6_DISALLOWED_PACKET)),
 555/*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
 556                SEES(TX_SDMA7_DISALLOWED_PACKET)),
 557/*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
 558                SEES(TX_SDMA8_DISALLOWED_PACKET)),
 559/*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
 560                SEES(TX_SDMA9_DISALLOWED_PACKET)),
 561/*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
 562                SEES(TX_SDMA10_DISALLOWED_PACKET)),
 563/*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
 564                SEES(TX_SDMA11_DISALLOWED_PACKET)),
 565/*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
 566                SEES(TX_SDMA12_DISALLOWED_PACKET)),
 567/*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
 568                SEES(TX_SDMA13_DISALLOWED_PACKET)),
 569/*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
 570                SEES(TX_SDMA14_DISALLOWED_PACKET)),
 571/*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
 572                SEES(TX_SDMA15_DISALLOWED_PACKET)),
 573/*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
 574                SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
 575/*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
 576                SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
 577/*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
 578                SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
 579/*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
 580                SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
 581/*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
 582                SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
 583/*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
 584                SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
 585/*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
 586                SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
 587/*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
 588                SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
 589/*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
 590                SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
 591/*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
 592/*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
 593/*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
 594/*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
 595/*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
 596/*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
 597/*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
 598/*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
 599/*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
 600/*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
 601/*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
 602/*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
 603/*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
 604/*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
 605/*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
 606/*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
 607/*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
 608/*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
 609/*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
 610/*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
 611/*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
 612/*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
 613                SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
 614/*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
 615                SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
 616};
 617
 618/*
 619 * TXE Egress Error Info flags
 620 */
 621#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
 622static struct flag_table egress_err_info_flags[] = {
 623/* 0*/  FLAG_ENTRY0("Reserved", 0ull),
 624/* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
 625/* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 626/* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 627/* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
 628/* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
 629/* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
 630/* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
 631/* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
 632/* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
 633/*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
 634/*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
 635/*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
 636/*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
 637/*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
 638/*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
 639/*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
 640/*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
 641/*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
 642/*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
 643/*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
 644/*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
 645};
 646
 647/* TXE Egress errors that cause an SPC freeze */
 648#define ALL_TXE_EGRESS_FREEZE_ERR \
 649        (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
 650        | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
 651        | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
 652        | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
 653        | SEES(TX_LAUNCH_CSR_PARITY) \
 654        | SEES(TX_SBRD_CTL_CSR_PARITY) \
 655        | SEES(TX_CONFIG_PARITY) \
 656        | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
 657        | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
 658        | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
 659        | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
 660        | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
 661        | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
 662        | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
 663        | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
 664        | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
 665        | SEES(TX_CREDIT_RETURN_PARITY))
 666
 667/*
 668 * TXE Send error flags
 669 */
 670#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
 671static struct flag_table send_err_status_flags[] = {
 672/* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
 673/* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
 674/* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
 675};
 676
 677/*
 678 * TXE Send Context Error flags and consequences
 679 */
 680static struct flag_table sc_err_status_flags[] = {
 681/* 0*/  FLAG_ENTRY("InconsistentSop",
 682                SEC_PACKET_DROPPED | SEC_SC_HALTED,
 683                SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
 684/* 1*/  FLAG_ENTRY("DisallowedPacket",
 685                SEC_PACKET_DROPPED | SEC_SC_HALTED,
 686                SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
 687/* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
 688                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 689                SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
 690/* 3*/  FLAG_ENTRY("WriteOverflow",
 691                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 692                SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
 693/* 4*/  FLAG_ENTRY("WriteOutOfBounds",
 694                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 695                SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
 696/* 5-63 reserved*/
 697};
 698
 699/*
 700 * RXE Receive Error flags
 701 */
 702#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
 703static struct flag_table rxe_err_status_flags[] = {
 704/* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
 705/* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
 706/* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
 707/* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
 708/* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
 709/* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
 710/* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
 711/* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
 712/* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
 713/* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
 714/*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
 715/*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
 716/*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
 717/*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
 718/*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
 719/*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
 720/*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
 721                RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
 722/*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
 723/*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
 724/*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
 725                RXES(RBUF_BLOCK_LIST_READ_UNC)),
 726/*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
 727                RXES(RBUF_BLOCK_LIST_READ_COR)),
 728/*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
 729                RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
 730/*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
 731                RXES(RBUF_CSR_QENT_CNT_PARITY)),
 732/*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
 733                RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
 734/*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
 735                RXES(RBUF_CSR_QVLD_BIT_PARITY)),
 736/*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
 737/*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
 738/*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
 739                RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
 740/*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
 741/*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
 742/*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
 743/*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
 744/*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
 745/*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
 746/*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
 747/*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
 748                RXES(RBUF_FL_INITDONE_PARITY)),
 749/*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
 750                RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
 751/*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
 752/*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
 753/*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
 754/*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
 755                RXES(LOOKUP_DES_PART1_UNC_COR)),
 756/*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
 757                RXES(LOOKUP_DES_PART2_PARITY)),
 758/*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
 759/*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
 760/*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
 761/*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
 762/*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
 763/*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
 764/*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
 765/*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
 766/*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
 767/*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
 768/*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
 769/*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
 770/*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
 771/*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
 772/*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
 773/*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
 774/*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
 775/*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
 776/*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
 777/*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
 778/*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
 779/*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
 780};
 781
 782/* RXE errors that will trigger an SPC freeze */
 783#define ALL_RXE_FREEZE_ERR  \
 784        (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
 785        | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
 786        | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
 787        | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
 788        | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
 789        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
 790        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
 791        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
 792        | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
 793        | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
 794        | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
 795        | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
 796        | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
 797        | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
 798        | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
 799        | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
 800        | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
 801        | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
 802        | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
 803        | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
 804        | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
 805        | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
 806        | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
 807        | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
 808        | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
 809        | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
 810        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
 811        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
 812        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
 813        | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
 814        | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
 815        | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
 816        | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
 817        | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
 818        | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
 819        | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
 820        | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
 821        | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
 822        | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
 823        | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
 824        | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
 825        | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
 826        | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
 827        | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
 828
 829#define RXE_FREEZE_ABORT_MASK \
 830        (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
 831        RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
 832        RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
 833
 834/*
 835 * DCC Error Flags
 836 */
 837#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
 838static struct flag_table dcc_err_flags[] = {
 839        FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
 840        FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
 841        FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
 842        FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
 843        FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
 844        FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
 845        FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
 846        FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
 847        FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
 848        FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
 849        FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
 850        FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
 851        FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
 852        FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
 853        FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
 854        FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
 855        FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
 856        FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
 857        FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
 858        FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
 859        FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
 860        FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
 861        FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
 862        FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
 863        FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
 864        FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
 865        FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
 866        FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
 867        FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
 868        FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
 869        FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
 870        FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
 871        FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
 872        FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
 873        FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
 874        FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
 875        FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
 876        FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
 877        FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
 878        FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
 879        FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
 880        FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
 881        FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
 882        FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
 883        FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
 884        FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
 885};
 886
 887/*
 888 * LCB error flags
 889 */
 890#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
 891static struct flag_table lcb_err_flags[] = {
 892/* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
 893/* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
 894/* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
 895/* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
 896                LCBE(ALL_LNS_FAILED_REINIT_TEST)),
 897/* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
 898/* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
 899/* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
 900/* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
 901/* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
 902/* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
 903/*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
 904/*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
 905/*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
 906/*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
 907                LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
 908/*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
 909/*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
 910/*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
 911/*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
 912/*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
 913/*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
 914                LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
 915/*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
 916/*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
 917/*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
 918/*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
 919/*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
 920/*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
 921/*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
 922                LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
 923/*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
 924/*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
 925                LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
 926/*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
 927                LCBE(REDUNDANT_FLIT_PARITY_ERR))
 928};
 929
 930/*
 931 * DC8051 Error Flags
 932 */
 933#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
 934static struct flag_table dc8051_err_flags[] = {
 935        FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
 936        FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
 937        FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
 938        FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
 939        FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
 940        FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
 941        FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
 942        FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
 943        FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
 944                    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
 945        FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
 946};
 947
 948/*
 949 * DC8051 Information Error flags
 950 *
 951 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
 952 */
 953static struct flag_table dc8051_info_err_flags[] = {
 954        FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
 955        FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
 956        FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
 957        FLAG_ENTRY0("Serdes internal loopback failure",
 958                    FAILED_SERDES_INTERNAL_LOOPBACK),
 959        FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
 960        FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
 961        FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
 962        FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
 963        FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
 964        FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
 965        FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
 966        FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
 967        FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT)
 968};
 969
 970/*
 971 * DC8051 Information Host Information flags
 972 *
 973 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
 974 */
 975static struct flag_table dc8051_info_host_msg_flags[] = {
 976        FLAG_ENTRY0("Host request done", 0x0001),
 977        FLAG_ENTRY0("BC SMA message", 0x0002),
 978        FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
 979        FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
 980        FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
 981        FLAG_ENTRY0("External device config request", 0x0020),
 982        FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
 983        FLAG_ENTRY0("LinkUp achieved", 0x0080),
 984        FLAG_ENTRY0("Link going down", 0x0100),
 985};
 986
 987static u32 encoded_size(u32 size);
 988static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
 989static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
 990static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
 991                               u8 *continuous);
 992static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
 993                                  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
 994static void read_vc_remote_link_width(struct hfi1_devdata *dd,
 995                                      u8 *remote_tx_rate, u16 *link_widths);
 996static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
 997                                     u8 *flag_bits, u16 *link_widths);
 998static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
 999                                  u8 *device_rev);
1000static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1001static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1002static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1003                            u8 *tx_polarity_inversion,
1004                            u8 *rx_polarity_inversion, u8 *max_rate);
1005static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1006                                unsigned int context, u64 err_status);
1007static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1008static void handle_dcc_err(struct hfi1_devdata *dd,
1009                           unsigned int context, u64 err_status);
1010static void handle_lcb_err(struct hfi1_devdata *dd,
1011                           unsigned int context, u64 err_status);
1012static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void set_partition_keys(struct hfi1_pportdata *);
1021static const char *link_state_name(u32 state);
1022static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1023                                          u32 state);
1024static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1025                           u64 *out_data);
1026static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1027static int thermal_init(struct hfi1_devdata *dd);
1028
1029static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1030                                  int msecs);
1031static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1032static void handle_temp_err(struct hfi1_devdata *);
1033static void dc_shutdown(struct hfi1_devdata *);
1034static void dc_start(struct hfi1_devdata *);
1035
1036/*
1037 * Error interrupt table entry.  This is used as input to the interrupt
1038 * "clear down" routine used for all second tier error interrupt register.
1039 * Second tier interrupt registers have a single bit representing them
1040 * in the top-level CceIntStatus.
1041 */
1042struct err_reg_info {
1043        u32 status;             /* status CSR offset */
1044        u32 clear;              /* clear CSR offset */
1045        u32 mask;               /* mask CSR offset */
1046        void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1047        const char *desc;
1048};
1049
1050#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1051#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1052#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1053
1054/*
1055 * Helpers for building HFI and DC error interrupt table entries.  Different
1056 * helpers are needed because of inconsistent register names.
1057 */
1058#define EE(reg, handler, desc) \
1059        { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1060                handler, desc }
1061#define DC_EE1(reg, handler, desc) \
1062        { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1063#define DC_EE2(reg, handler, desc) \
1064        { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1065
1066/*
1067 * Table of the "misc" grouping of error interrupts.  Each entry refers to
1068 * another register containing more information.
1069 */
1070static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1071/* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1072/* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1073/* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1074/* 3*/  { 0, 0, 0, NULL }, /* reserved */
1075/* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1076/* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1077/* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1078/* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1079        /* the rest are reserved */
1080};
1081
1082/*
1083 * Index into the Various section of the interrupt sources
1084 * corresponding to the Critical Temperature interrupt.
1085 */
1086#define TCRIT_INT_SOURCE 4
1087
1088/*
1089 * SDMA error interrupt entry - refers to another register containing more
1090 * information.
1091 */
1092static const struct err_reg_info sdma_eng_err =
1093        EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1094
1095static const struct err_reg_info various_err[NUM_VARIOUS] = {
1096/* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1097/* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1098/* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1099/* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1100/* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1101        /* rest are reserved */
1102};
1103
1104/*
1105 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1106 * register can not be derived from the MTU value because 10K is not
1107 * a power of 2. Therefore, we need a constant. Everything else can
1108 * be calculated.
1109 */
1110#define DCC_CFG_PORT_MTU_CAP_10240 7
1111
1112/*
1113 * Table of the DC grouping of error interrupts.  Each entry refers to
1114 * another register containing more information.
1115 */
1116static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1117/* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1118/* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1119/* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1120/* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1121        /* the rest are reserved */
1122};
1123
1124struct cntr_entry {
1125        /*
1126         * counter name
1127         */
1128        char *name;
1129
1130        /*
1131         * csr to read for name (if applicable)
1132         */
1133        u64 csr;
1134
1135        /*
1136         * offset into dd or ppd to store the counter's value
1137         */
1138        int offset;
1139
1140        /*
1141         * flags
1142         */
1143        u8 flags;
1144
1145        /*
1146         * accessor for stat element, context either dd or ppd
1147         */
1148        u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1149                       int mode, u64 data);
1150};
1151
1152#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1153#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1154
1155#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1156{ \
1157        name, \
1158        csr, \
1159        offset, \
1160        flags, \
1161        accessor \
1162}
1163
1164/* 32bit RXE */
1165#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1166CNTR_ELEM(#name, \
1167          (counter * 8 + RCV_COUNTER_ARRAY32), \
1168          0, flags | CNTR_32BIT, \
1169          port_access_u32_csr)
1170
1171#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1172CNTR_ELEM(#name, \
1173          (counter * 8 + RCV_COUNTER_ARRAY32), \
1174          0, flags | CNTR_32BIT, \
1175          dev_access_u32_csr)
1176
1177/* 64bit RXE */
1178#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1179CNTR_ELEM(#name, \
1180          (counter * 8 + RCV_COUNTER_ARRAY64), \
1181          0, flags, \
1182          port_access_u64_csr)
1183
1184#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1185CNTR_ELEM(#name, \
1186          (counter * 8 + RCV_COUNTER_ARRAY64), \
1187          0, flags, \
1188          dev_access_u64_csr)
1189
1190#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1191#define OVR_ELM(ctx) \
1192CNTR_ELEM("RcvHdrOvr" #ctx, \
1193          (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1194          0, CNTR_NORMAL, port_access_u64_csr)
1195
1196/* 32bit TXE */
1197#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1198CNTR_ELEM(#name, \
1199          (counter * 8 + SEND_COUNTER_ARRAY32), \
1200          0, flags | CNTR_32BIT, \
1201          port_access_u32_csr)
1202
1203/* 64bit TXE */
1204#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1205CNTR_ELEM(#name, \
1206          (counter * 8 + SEND_COUNTER_ARRAY64), \
1207          0, flags, \
1208          port_access_u64_csr)
1209
1210# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1211CNTR_ELEM(#name,\
1212          counter * 8 + SEND_COUNTER_ARRAY64, \
1213          0, \
1214          flags, \
1215          dev_access_u64_csr)
1216
1217/* CCE */
1218#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1219CNTR_ELEM(#name, \
1220          (counter * 8 + CCE_COUNTER_ARRAY32), \
1221          0, flags | CNTR_32BIT, \
1222          dev_access_u32_csr)
1223
1224#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226          (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1227          0, flags | CNTR_32BIT, \
1228          dev_access_u32_csr)
1229
1230/* DC */
1231#define DC_PERF_CNTR(name, counter, flags) \
1232CNTR_ELEM(#name, \
1233          counter, \
1234          0, \
1235          flags, \
1236          dev_access_u64_csr)
1237
1238#define DC_PERF_CNTR_LCB(name, counter, flags) \
1239CNTR_ELEM(#name, \
1240          counter, \
1241          0, \
1242          flags, \
1243          dc_access_lcb_cntr)
1244
1245/* ibp counters */
1246#define SW_IBP_CNTR(name, cntr) \
1247CNTR_ELEM(#name, \
1248          0, \
1249          0, \
1250          CNTR_SYNTH, \
1251          access_ibp_##cntr)
1252
1253u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1254{
1255        if (dd->flags & HFI1_PRESENT) {
1256                return readq((void __iomem *)dd->kregbase + offset);
1257        }
1258        return -1;
1259}
1260
1261void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1262{
1263        if (dd->flags & HFI1_PRESENT)
1264                writeq(value, (void __iomem *)dd->kregbase + offset);
1265}
1266
1267void __iomem *get_csr_addr(
1268        struct hfi1_devdata *dd,
1269        u32 offset)
1270{
1271        return (void __iomem *)dd->kregbase + offset;
1272}
1273
1274static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1275                                 int mode, u64 value)
1276{
1277        u64 ret;
1278
1279        if (mode == CNTR_MODE_R) {
1280                ret = read_csr(dd, csr);
1281        } else if (mode == CNTR_MODE_W) {
1282                write_csr(dd, csr, value);
1283                ret = value;
1284        } else {
1285                dd_dev_err(dd, "Invalid cntr register access mode");
1286                return 0;
1287        }
1288
1289        hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1290        return ret;
1291}
1292
1293/* Dev Access */
1294static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1295                              void *context, int vl, int mode, u64 data)
1296{
1297        struct hfi1_devdata *dd = context;
1298        u64 csr = entry->csr;
1299
1300        if (entry->flags & CNTR_SDMA) {
1301                if (vl == CNTR_INVALID_VL)
1302                        return 0;
1303                csr += 0x100 * vl;
1304        } else {
1305                if (vl != CNTR_INVALID_VL)
1306                        return 0;
1307        }
1308        return read_write_csr(dd, csr, mode, data);
1309}
1310
1311static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1312                              void *context, int idx, int mode, u64 data)
1313{
1314        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1315
1316        if (dd->per_sdma && idx < dd->num_sdma)
1317                return dd->per_sdma[idx].err_cnt;
1318        return 0;
1319}
1320
1321static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1322                              void *context, int idx, int mode, u64 data)
1323{
1324        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1325
1326        if (dd->per_sdma && idx < dd->num_sdma)
1327                return dd->per_sdma[idx].sdma_int_cnt;
1328        return 0;
1329}
1330
1331static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1332                                   void *context, int idx, int mode, u64 data)
1333{
1334        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1335
1336        if (dd->per_sdma && idx < dd->num_sdma)
1337                return dd->per_sdma[idx].idle_int_cnt;
1338        return 0;
1339}
1340
1341static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1342                                       void *context, int idx, int mode,
1343                                       u64 data)
1344{
1345        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1346
1347        if (dd->per_sdma && idx < dd->num_sdma)
1348                return dd->per_sdma[idx].progress_int_cnt;
1349        return 0;
1350}
1351
1352static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1353                              int vl, int mode, u64 data)
1354{
1355        struct hfi1_devdata *dd = context;
1356
1357        u64 val = 0;
1358        u64 csr = entry->csr;
1359
1360        if (entry->flags & CNTR_VL) {
1361                if (vl == CNTR_INVALID_VL)
1362                        return 0;
1363                csr += 8 * vl;
1364        } else {
1365                if (vl != CNTR_INVALID_VL)
1366                        return 0;
1367        }
1368
1369        val = read_write_csr(dd, csr, mode, data);
1370        return val;
1371}
1372
1373static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1374                              int vl, int mode, u64 data)
1375{
1376        struct hfi1_devdata *dd = context;
1377        u32 csr = entry->csr;
1378        int ret = 0;
1379
1380        if (vl != CNTR_INVALID_VL)
1381                return 0;
1382        if (mode == CNTR_MODE_R)
1383                ret = read_lcb_csr(dd, csr, &data);
1384        else if (mode == CNTR_MODE_W)
1385                ret = write_lcb_csr(dd, csr, data);
1386
1387        if (ret) {
1388                dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1389                return 0;
1390        }
1391
1392        hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1393        return data;
1394}
1395
1396/* Port Access */
1397static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1398                               int vl, int mode, u64 data)
1399{
1400        struct hfi1_pportdata *ppd = context;
1401
1402        if (vl != CNTR_INVALID_VL)
1403                return 0;
1404        return read_write_csr(ppd->dd, entry->csr, mode, data);
1405}
1406
1407static u64 port_access_u64_csr(const struct cntr_entry *entry,
1408                               void *context, int vl, int mode, u64 data)
1409{
1410        struct hfi1_pportdata *ppd = context;
1411        u64 val;
1412        u64 csr = entry->csr;
1413
1414        if (entry->flags & CNTR_VL) {
1415                if (vl == CNTR_INVALID_VL)
1416                        return 0;
1417                csr += 8 * vl;
1418        } else {
1419                if (vl != CNTR_INVALID_VL)
1420                        return 0;
1421        }
1422        val = read_write_csr(ppd->dd, csr, mode, data);
1423        return val;
1424}
1425
1426/* Software defined */
1427static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1428                                u64 data)
1429{
1430        u64 ret;
1431
1432        if (mode == CNTR_MODE_R) {
1433                ret = *cntr;
1434        } else if (mode == CNTR_MODE_W) {
1435                *cntr = data;
1436                ret = data;
1437        } else {
1438                dd_dev_err(dd, "Invalid cntr sw access mode");
1439                return 0;
1440        }
1441
1442        hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1443
1444        return ret;
1445}
1446
1447static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1448                                 int vl, int mode, u64 data)
1449{
1450        struct hfi1_pportdata *ppd = context;
1451
1452        if (vl != CNTR_INVALID_VL)
1453                return 0;
1454        return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1455}
1456
1457static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1458                                 int vl, int mode, u64 data)
1459{
1460        struct hfi1_pportdata *ppd = context;
1461
1462        if (vl != CNTR_INVALID_VL)
1463                return 0;
1464        return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1465}
1466
1467static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1468                                       void *context, int vl, int mode,
1469                                       u64 data)
1470{
1471        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1472
1473        if (vl != CNTR_INVALID_VL)
1474                return 0;
1475        return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1476}
1477
1478static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1479                                   void *context, int vl, int mode, u64 data)
1480{
1481        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1482        u64 zero = 0;
1483        u64 *counter;
1484
1485        if (vl == CNTR_INVALID_VL)
1486                counter = &ppd->port_xmit_discards;
1487        else if (vl >= 0 && vl < C_VL_COUNT)
1488                counter = &ppd->port_xmit_discards_vl[vl];
1489        else
1490                counter = &zero;
1491
1492        return read_write_sw(ppd->dd, counter, mode, data);
1493}
1494
1495static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1496                                       void *context, int vl, int mode,
1497                                       u64 data)
1498{
1499        struct hfi1_pportdata *ppd = context;
1500
1501        if (vl != CNTR_INVALID_VL)
1502                return 0;
1503
1504        return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1505                             mode, data);
1506}
1507
1508static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1509                                      void *context, int vl, int mode, u64 data)
1510{
1511        struct hfi1_pportdata *ppd = context;
1512
1513        if (vl != CNTR_INVALID_VL)
1514                return 0;
1515
1516        return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1517                             mode, data);
1518}
1519
1520u64 get_all_cpu_total(u64 __percpu *cntr)
1521{
1522        int cpu;
1523        u64 counter = 0;
1524
1525        for_each_possible_cpu(cpu)
1526                counter += *per_cpu_ptr(cntr, cpu);
1527        return counter;
1528}
1529
1530static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1531                          u64 __percpu *cntr,
1532                          int vl, int mode, u64 data)
1533{
1534        u64 ret = 0;
1535
1536        if (vl != CNTR_INVALID_VL)
1537                return 0;
1538
1539        if (mode == CNTR_MODE_R) {
1540                ret = get_all_cpu_total(cntr) - *z_val;
1541        } else if (mode == CNTR_MODE_W) {
1542                /* A write can only zero the counter */
1543                if (data == 0)
1544                        *z_val = get_all_cpu_total(cntr);
1545                else
1546                        dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1547        } else {
1548                dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1549                return 0;
1550        }
1551
1552        return ret;
1553}
1554
1555static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1556                              void *context, int vl, int mode, u64 data)
1557{
1558        struct hfi1_devdata *dd = context;
1559
1560        return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1561                              mode, data);
1562}
1563
1564static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1565                                   void *context, int vl, int mode, u64 data)
1566{
1567        struct hfi1_devdata *dd = context;
1568
1569        return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1570                              mode, data);
1571}
1572
1573static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1574                              void *context, int vl, int mode, u64 data)
1575{
1576        struct hfi1_devdata *dd = context;
1577
1578        return dd->verbs_dev.n_piowait;
1579}
1580
1581static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1582                               void *context, int vl, int mode, u64 data)
1583{
1584        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1585
1586        return dd->verbs_dev.n_piodrain;
1587}
1588
1589static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1590                              void *context, int vl, int mode, u64 data)
1591{
1592        struct hfi1_devdata *dd = context;
1593
1594        return dd->verbs_dev.n_txwait;
1595}
1596
1597static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1598                               void *context, int vl, int mode, u64 data)
1599{
1600        struct hfi1_devdata *dd = context;
1601
1602        return dd->verbs_dev.n_kmem_wait;
1603}
1604
1605static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1606                                   void *context, int vl, int mode, u64 data)
1607{
1608        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1609
1610        return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1611                              mode, data);
1612}
1613
1614/* Software counters for the error status bits within MISC_ERR_STATUS */
1615static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1616                                             void *context, int vl, int mode,
1617                                             u64 data)
1618{
1619        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1620
1621        return dd->misc_err_status_cnt[12];
1622}
1623
1624static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1625                                          void *context, int vl, int mode,
1626                                          u64 data)
1627{
1628        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1629
1630        return dd->misc_err_status_cnt[11];
1631}
1632
1633static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1634                                               void *context, int vl, int mode,
1635                                               u64 data)
1636{
1637        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1638
1639        return dd->misc_err_status_cnt[10];
1640}
1641
1642static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1643                                                 void *context, int vl,
1644                                                 int mode, u64 data)
1645{
1646        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1647
1648        return dd->misc_err_status_cnt[9];
1649}
1650
1651static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1652                                           void *context, int vl, int mode,
1653                                           u64 data)
1654{
1655        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1656
1657        return dd->misc_err_status_cnt[8];
1658}
1659
1660static u64 access_misc_efuse_read_bad_addr_err_cnt(
1661                                const struct cntr_entry *entry,
1662                                void *context, int vl, int mode, u64 data)
1663{
1664        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1665
1666        return dd->misc_err_status_cnt[7];
1667}
1668
1669static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1670                                                void *context, int vl,
1671                                                int mode, u64 data)
1672{
1673        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1674
1675        return dd->misc_err_status_cnt[6];
1676}
1677
1678static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1679                                              void *context, int vl, int mode,
1680                                              u64 data)
1681{
1682        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1683
1684        return dd->misc_err_status_cnt[5];
1685}
1686
1687static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1688                                            void *context, int vl, int mode,
1689                                            u64 data)
1690{
1691        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1692
1693        return dd->misc_err_status_cnt[4];
1694}
1695
1696static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1697                                                 void *context, int vl,
1698                                                 int mode, u64 data)
1699{
1700        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1701
1702        return dd->misc_err_status_cnt[3];
1703}
1704
1705static u64 access_misc_csr_write_bad_addr_err_cnt(
1706                                const struct cntr_entry *entry,
1707                                void *context, int vl, int mode, u64 data)
1708{
1709        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1710
1711        return dd->misc_err_status_cnt[2];
1712}
1713
1714static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1715                                                 void *context, int vl,
1716                                                 int mode, u64 data)
1717{
1718        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720        return dd->misc_err_status_cnt[1];
1721}
1722
1723static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1724                                          void *context, int vl, int mode,
1725                                          u64 data)
1726{
1727        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729        return dd->misc_err_status_cnt[0];
1730}
1731
1732/*
1733 * Software counter for the aggregate of
1734 * individual CceErrStatus counters
1735 */
1736static u64 access_sw_cce_err_status_aggregated_cnt(
1737                                const struct cntr_entry *entry,
1738                                void *context, int vl, int mode, u64 data)
1739{
1740        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742        return dd->sw_cce_err_status_aggregate;
1743}
1744
1745/*
1746 * Software counters corresponding to each of the
1747 * error status bits within CceErrStatus
1748 */
1749static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1750                                              void *context, int vl, int mode,
1751                                              u64 data)
1752{
1753        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755        return dd->cce_err_status_cnt[40];
1756}
1757
1758static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1759                                          void *context, int vl, int mode,
1760                                          u64 data)
1761{
1762        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1763
1764        return dd->cce_err_status_cnt[39];
1765}
1766
1767static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1768                                          void *context, int vl, int mode,
1769                                          u64 data)
1770{
1771        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1772
1773        return dd->cce_err_status_cnt[38];
1774}
1775
1776static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1777                                             void *context, int vl, int mode,
1778                                             u64 data)
1779{
1780        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1781
1782        return dd->cce_err_status_cnt[37];
1783}
1784
1785static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1786                                             void *context, int vl, int mode,
1787                                             u64 data)
1788{
1789        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1790
1791        return dd->cce_err_status_cnt[36];
1792}
1793
1794static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1795                                const struct cntr_entry *entry,
1796                                void *context, int vl, int mode, u64 data)
1797{
1798        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1799
1800        return dd->cce_err_status_cnt[35];
1801}
1802
1803static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1804                                const struct cntr_entry *entry,
1805                                void *context, int vl, int mode, u64 data)
1806{
1807        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1808
1809        return dd->cce_err_status_cnt[34];
1810}
1811
1812static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1813                                                 void *context, int vl,
1814                                                 int mode, u64 data)
1815{
1816        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1817
1818        return dd->cce_err_status_cnt[33];
1819}
1820
1821static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1822                                                void *context, int vl, int mode,
1823                                                u64 data)
1824{
1825        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1826
1827        return dd->cce_err_status_cnt[32];
1828}
1829
1830static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1831                                   void *context, int vl, int mode, u64 data)
1832{
1833        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1834
1835        return dd->cce_err_status_cnt[31];
1836}
1837
1838static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1839                                               void *context, int vl, int mode,
1840                                               u64 data)
1841{
1842        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1843
1844        return dd->cce_err_status_cnt[30];
1845}
1846
1847static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1848                                              void *context, int vl, int mode,
1849                                              u64 data)
1850{
1851        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1852
1853        return dd->cce_err_status_cnt[29];
1854}
1855
1856static u64 access_pcic_transmit_back_parity_err_cnt(
1857                                const struct cntr_entry *entry,
1858                                void *context, int vl, int mode, u64 data)
1859{
1860        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1861
1862        return dd->cce_err_status_cnt[28];
1863}
1864
1865static u64 access_pcic_transmit_front_parity_err_cnt(
1866                                const struct cntr_entry *entry,
1867                                void *context, int vl, int mode, u64 data)
1868{
1869        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1870
1871        return dd->cce_err_status_cnt[27];
1872}
1873
1874static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1875                                             void *context, int vl, int mode,
1876                                             u64 data)
1877{
1878        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1879
1880        return dd->cce_err_status_cnt[26];
1881}
1882
1883static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1884                                            void *context, int vl, int mode,
1885                                            u64 data)
1886{
1887        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1888
1889        return dd->cce_err_status_cnt[25];
1890}
1891
1892static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1893                                              void *context, int vl, int mode,
1894                                              u64 data)
1895{
1896        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1897
1898        return dd->cce_err_status_cnt[24];
1899}
1900
1901static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1902                                             void *context, int vl, int mode,
1903                                             u64 data)
1904{
1905        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1906
1907        return dd->cce_err_status_cnt[23];
1908}
1909
1910static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1911                                                 void *context, int vl,
1912                                                 int mode, u64 data)
1913{
1914        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1915
1916        return dd->cce_err_status_cnt[22];
1917}
1918
1919static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1920                                         void *context, int vl, int mode,
1921                                         u64 data)
1922{
1923        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1924
1925        return dd->cce_err_status_cnt[21];
1926}
1927
1928static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1929                                const struct cntr_entry *entry,
1930                                void *context, int vl, int mode, u64 data)
1931{
1932        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934        return dd->cce_err_status_cnt[20];
1935}
1936
1937static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1938                                                 void *context, int vl,
1939                                                 int mode, u64 data)
1940{
1941        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943        return dd->cce_err_status_cnt[19];
1944}
1945
1946static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1947                                             void *context, int vl, int mode,
1948                                             u64 data)
1949{
1950        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952        return dd->cce_err_status_cnt[18];
1953}
1954
1955static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1956                                            void *context, int vl, int mode,
1957                                            u64 data)
1958{
1959        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961        return dd->cce_err_status_cnt[17];
1962}
1963
1964static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1965                                              void *context, int vl, int mode,
1966                                              u64 data)
1967{
1968        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970        return dd->cce_err_status_cnt[16];
1971}
1972
1973static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1974                                             void *context, int vl, int mode,
1975                                             u64 data)
1976{
1977        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979        return dd->cce_err_status_cnt[15];
1980}
1981
1982static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1983                                                 void *context, int vl,
1984                                                 int mode, u64 data)
1985{
1986        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988        return dd->cce_err_status_cnt[14];
1989}
1990
1991static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1992                                             void *context, int vl, int mode,
1993                                             u64 data)
1994{
1995        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997        return dd->cce_err_status_cnt[13];
1998}
1999
2000static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2001                                const struct cntr_entry *entry,
2002                                void *context, int vl, int mode, u64 data)
2003{
2004        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006        return dd->cce_err_status_cnt[12];
2007}
2008
2009static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2010                                const struct cntr_entry *entry,
2011                                void *context, int vl, int mode, u64 data)
2012{
2013        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015        return dd->cce_err_status_cnt[11];
2016}
2017
2018static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2019                                const struct cntr_entry *entry,
2020                                void *context, int vl, int mode, u64 data)
2021{
2022        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024        return dd->cce_err_status_cnt[10];
2025}
2026
2027static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2028                                const struct cntr_entry *entry,
2029                                void *context, int vl, int mode, u64 data)
2030{
2031        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033        return dd->cce_err_status_cnt[9];
2034}
2035
2036static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2037                                const struct cntr_entry *entry,
2038                                void *context, int vl, int mode, u64 data)
2039{
2040        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042        return dd->cce_err_status_cnt[8];
2043}
2044
2045static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2046                                                 void *context, int vl,
2047                                                 int mode, u64 data)
2048{
2049        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051        return dd->cce_err_status_cnt[7];
2052}
2053
2054static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2055                                const struct cntr_entry *entry,
2056                                void *context, int vl, int mode, u64 data)
2057{
2058        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060        return dd->cce_err_status_cnt[6];
2061}
2062
2063static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2064                                               void *context, int vl, int mode,
2065                                               u64 data)
2066{
2067        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069        return dd->cce_err_status_cnt[5];
2070}
2071
2072static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2073                                          void *context, int vl, int mode,
2074                                          u64 data)
2075{
2076        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078        return dd->cce_err_status_cnt[4];
2079}
2080
2081static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2082                                const struct cntr_entry *entry,
2083                                void *context, int vl, int mode, u64 data)
2084{
2085        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087        return dd->cce_err_status_cnt[3];
2088}
2089
2090static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2091                                                 void *context, int vl,
2092                                                 int mode, u64 data)
2093{
2094        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096        return dd->cce_err_status_cnt[2];
2097}
2098
2099static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2100                                                void *context, int vl,
2101                                                int mode, u64 data)
2102{
2103        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105        return dd->cce_err_status_cnt[1];
2106}
2107
2108static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2109                                         void *context, int vl, int mode,
2110                                         u64 data)
2111{
2112        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114        return dd->cce_err_status_cnt[0];
2115}
2116
2117/*
2118 * Software counters corresponding to each of the
2119 * error status bits within RcvErrStatus
2120 */
2121static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2122                                        void *context, int vl, int mode,
2123                                        u64 data)
2124{
2125        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126
2127        return dd->rcv_err_status_cnt[63];
2128}
2129
2130static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2131                                                void *context, int vl,
2132                                                int mode, u64 data)
2133{
2134        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2135
2136        return dd->rcv_err_status_cnt[62];
2137}
2138
2139static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2140                                               void *context, int vl, int mode,
2141                                               u64 data)
2142{
2143        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2144
2145        return dd->rcv_err_status_cnt[61];
2146}
2147
2148static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2149                                         void *context, int vl, int mode,
2150                                         u64 data)
2151{
2152        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2153
2154        return dd->rcv_err_status_cnt[60];
2155}
2156
2157static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2158                                                 void *context, int vl,
2159                                                 int mode, u64 data)
2160{
2161        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2162
2163        return dd->rcv_err_status_cnt[59];
2164}
2165
2166static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2167                                                 void *context, int vl,
2168                                                 int mode, u64 data)
2169{
2170        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2171
2172        return dd->rcv_err_status_cnt[58];
2173}
2174
2175static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2176                                            void *context, int vl, int mode,
2177                                            u64 data)
2178{
2179        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2180
2181        return dd->rcv_err_status_cnt[57];
2182}
2183
2184static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2185                                           void *context, int vl, int mode,
2186                                           u64 data)
2187{
2188        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2189
2190        return dd->rcv_err_status_cnt[56];
2191}
2192
2193static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2194                                           void *context, int vl, int mode,
2195                                           u64 data)
2196{
2197        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2198
2199        return dd->rcv_err_status_cnt[55];
2200}
2201
2202static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2203                                const struct cntr_entry *entry,
2204                                void *context, int vl, int mode, u64 data)
2205{
2206        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2207
2208        return dd->rcv_err_status_cnt[54];
2209}
2210
2211static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2212                                const struct cntr_entry *entry,
2213                                void *context, int vl, int mode, u64 data)
2214{
2215        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2216
2217        return dd->rcv_err_status_cnt[53];
2218}
2219
2220static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2221                                                 void *context, int vl,
2222                                                 int mode, u64 data)
2223{
2224        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226        return dd->rcv_err_status_cnt[52];
2227}
2228
2229static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2230                                                 void *context, int vl,
2231                                                 int mode, u64 data)
2232{
2233        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235        return dd->rcv_err_status_cnt[51];
2236}
2237
2238static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2239                                                 void *context, int vl,
2240                                                 int mode, u64 data)
2241{
2242        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244        return dd->rcv_err_status_cnt[50];
2245}
2246
2247static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2248                                                 void *context, int vl,
2249                                                 int mode, u64 data)
2250{
2251        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253        return dd->rcv_err_status_cnt[49];
2254}
2255
2256static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2257                                                 void *context, int vl,
2258                                                 int mode, u64 data)
2259{
2260        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262        return dd->rcv_err_status_cnt[48];
2263}
2264
2265static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2266                                                 void *context, int vl,
2267                                                 int mode, u64 data)
2268{
2269        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271        return dd->rcv_err_status_cnt[47];
2272}
2273
2274static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2275                                         void *context, int vl, int mode,
2276                                         u64 data)
2277{
2278        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280        return dd->rcv_err_status_cnt[46];
2281}
2282
2283static u64 access_rx_hq_intr_csr_parity_err_cnt(
2284                                const struct cntr_entry *entry,
2285                                void *context, int vl, int mode, u64 data)
2286{
2287        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289        return dd->rcv_err_status_cnt[45];
2290}
2291
2292static u64 access_rx_lookup_csr_parity_err_cnt(
2293                                const struct cntr_entry *entry,
2294                                void *context, int vl, int mode, u64 data)
2295{
2296        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298        return dd->rcv_err_status_cnt[44];
2299}
2300
2301static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2302                                const struct cntr_entry *entry,
2303                                void *context, int vl, int mode, u64 data)
2304{
2305        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307        return dd->rcv_err_status_cnt[43];
2308}
2309
2310static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2311                                const struct cntr_entry *entry,
2312                                void *context, int vl, int mode, u64 data)
2313{
2314        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316        return dd->rcv_err_status_cnt[42];
2317}
2318
2319static u64 access_rx_lookup_des_part2_parity_err_cnt(
2320                                const struct cntr_entry *entry,
2321                                void *context, int vl, int mode, u64 data)
2322{
2323        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325        return dd->rcv_err_status_cnt[41];
2326}
2327
2328static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2329                                const struct cntr_entry *entry,
2330                                void *context, int vl, int mode, u64 data)
2331{
2332        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334        return dd->rcv_err_status_cnt[40];
2335}
2336
2337static u64 access_rx_lookup_des_part1_unc_err_cnt(
2338                                const struct cntr_entry *entry,
2339                                void *context, int vl, int mode, u64 data)
2340{
2341        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343        return dd->rcv_err_status_cnt[39];
2344}
2345
2346static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2347                                const struct cntr_entry *entry,
2348                                void *context, int vl, int mode, u64 data)
2349{
2350        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352        return dd->rcv_err_status_cnt[38];
2353}
2354
2355static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2356                                const struct cntr_entry *entry,
2357                                void *context, int vl, int mode, u64 data)
2358{
2359        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361        return dd->rcv_err_status_cnt[37];
2362}
2363
2364static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2365                                const struct cntr_entry *entry,
2366                                void *context, int vl, int mode, u64 data)
2367{
2368        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370        return dd->rcv_err_status_cnt[36];
2371}
2372
2373static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2374                                const struct cntr_entry *entry,
2375                                void *context, int vl, int mode, u64 data)
2376{
2377        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379        return dd->rcv_err_status_cnt[35];
2380}
2381
2382static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2383                                const struct cntr_entry *entry,
2384                                void *context, int vl, int mode, u64 data)
2385{
2386        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388        return dd->rcv_err_status_cnt[34];
2389}
2390
2391static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2392                                const struct cntr_entry *entry,
2393                                void *context, int vl, int mode, u64 data)
2394{
2395        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397        return dd->rcv_err_status_cnt[33];
2398}
2399
2400static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2401                                        void *context, int vl, int mode,
2402                                        u64 data)
2403{
2404        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406        return dd->rcv_err_status_cnt[32];
2407}
2408
2409static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2410                                       void *context, int vl, int mode,
2411                                       u64 data)
2412{
2413        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415        return dd->rcv_err_status_cnt[31];
2416}
2417
2418static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2419                                          void *context, int vl, int mode,
2420                                          u64 data)
2421{
2422        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424        return dd->rcv_err_status_cnt[30];
2425}
2426
2427static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2428                                             void *context, int vl, int mode,
2429                                             u64 data)
2430{
2431        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433        return dd->rcv_err_status_cnt[29];
2434}
2435
2436static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2437                                                 void *context, int vl,
2438                                                 int mode, u64 data)
2439{
2440        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442        return dd->rcv_err_status_cnt[28];
2443}
2444
2445static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2446                                const struct cntr_entry *entry,
2447                                void *context, int vl, int mode, u64 data)
2448{
2449        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451        return dd->rcv_err_status_cnt[27];
2452}
2453
2454static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2455                                const struct cntr_entry *entry,
2456                                void *context, int vl, int mode, u64 data)
2457{
2458        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460        return dd->rcv_err_status_cnt[26];
2461}
2462
2463static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2464                                const struct cntr_entry *entry,
2465                                void *context, int vl, int mode, u64 data)
2466{
2467        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469        return dd->rcv_err_status_cnt[25];
2470}
2471
2472static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2473                                const struct cntr_entry *entry,
2474                                void *context, int vl, int mode, u64 data)
2475{
2476        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478        return dd->rcv_err_status_cnt[24];
2479}
2480
2481static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2482                                const struct cntr_entry *entry,
2483                                void *context, int vl, int mode, u64 data)
2484{
2485        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487        return dd->rcv_err_status_cnt[23];
2488}
2489
2490static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2491                                const struct cntr_entry *entry,
2492                                void *context, int vl, int mode, u64 data)
2493{
2494        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496        return dd->rcv_err_status_cnt[22];
2497}
2498
2499static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2500                                const struct cntr_entry *entry,
2501                                void *context, int vl, int mode, u64 data)
2502{
2503        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505        return dd->rcv_err_status_cnt[21];
2506}
2507
2508static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2509                                const struct cntr_entry *entry,
2510                                void *context, int vl, int mode, u64 data)
2511{
2512        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514        return dd->rcv_err_status_cnt[20];
2515}
2516
2517static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2518                                const struct cntr_entry *entry,
2519                                void *context, int vl, int mode, u64 data)
2520{
2521        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523        return dd->rcv_err_status_cnt[19];
2524}
2525
2526static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2527                                                 void *context, int vl,
2528                                                 int mode, u64 data)
2529{
2530        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532        return dd->rcv_err_status_cnt[18];
2533}
2534
2535static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2536                                                 void *context, int vl,
2537                                                 int mode, u64 data)
2538{
2539        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541        return dd->rcv_err_status_cnt[17];
2542}
2543
2544static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2545                                const struct cntr_entry *entry,
2546                                void *context, int vl, int mode, u64 data)
2547{
2548        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550        return dd->rcv_err_status_cnt[16];
2551}
2552
2553static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2554                                const struct cntr_entry *entry,
2555                                void *context, int vl, int mode, u64 data)
2556{
2557        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559        return dd->rcv_err_status_cnt[15];
2560}
2561
2562static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2563                                                void *context, int vl,
2564                                                int mode, u64 data)
2565{
2566        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568        return dd->rcv_err_status_cnt[14];
2569}
2570
2571static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2572                                                void *context, int vl,
2573                                                int mode, u64 data)
2574{
2575        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577        return dd->rcv_err_status_cnt[13];
2578}
2579
2580static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2581                                              void *context, int vl, int mode,
2582                                              u64 data)
2583{
2584        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586        return dd->rcv_err_status_cnt[12];
2587}
2588
2589static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2590                                          void *context, int vl, int mode,
2591                                          u64 data)
2592{
2593        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595        return dd->rcv_err_status_cnt[11];
2596}
2597
2598static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2599                                          void *context, int vl, int mode,
2600                                          u64 data)
2601{
2602        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604        return dd->rcv_err_status_cnt[10];
2605}
2606
2607static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2608                                               void *context, int vl, int mode,
2609                                               u64 data)
2610{
2611        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613        return dd->rcv_err_status_cnt[9];
2614}
2615
2616static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2617                                            void *context, int vl, int mode,
2618                                            u64 data)
2619{
2620        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622        return dd->rcv_err_status_cnt[8];
2623}
2624
2625static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2626                                const struct cntr_entry *entry,
2627                                void *context, int vl, int mode, u64 data)
2628{
2629        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631        return dd->rcv_err_status_cnt[7];
2632}
2633
2634static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2635                                const struct cntr_entry *entry,
2636                                void *context, int vl, int mode, u64 data)
2637{
2638        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640        return dd->rcv_err_status_cnt[6];
2641}
2642
2643static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2644                                          void *context, int vl, int mode,
2645                                          u64 data)
2646{
2647        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649        return dd->rcv_err_status_cnt[5];
2650}
2651
2652static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2653                                          void *context, int vl, int mode,
2654                                          u64 data)
2655{
2656        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658        return dd->rcv_err_status_cnt[4];
2659}
2660
2661static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2662                                         void *context, int vl, int mode,
2663                                         u64 data)
2664{
2665        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667        return dd->rcv_err_status_cnt[3];
2668}
2669
2670static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2671                                         void *context, int vl, int mode,
2672                                         u64 data)
2673{
2674        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676        return dd->rcv_err_status_cnt[2];
2677}
2678
2679static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2680                                            void *context, int vl, int mode,
2681                                            u64 data)
2682{
2683        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685        return dd->rcv_err_status_cnt[1];
2686}
2687
2688static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2689                                         void *context, int vl, int mode,
2690                                         u64 data)
2691{
2692        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694        return dd->rcv_err_status_cnt[0];
2695}
2696
2697/*
2698 * Software counters corresponding to each of the
2699 * error status bits within SendPioErrStatus
2700 */
2701static u64 access_pio_pec_sop_head_parity_err_cnt(
2702                                const struct cntr_entry *entry,
2703                                void *context, int vl, int mode, u64 data)
2704{
2705        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706
2707        return dd->send_pio_err_status_cnt[35];
2708}
2709
2710static u64 access_pio_pcc_sop_head_parity_err_cnt(
2711                                const struct cntr_entry *entry,
2712                                void *context, int vl, int mode, u64 data)
2713{
2714        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2715
2716        return dd->send_pio_err_status_cnt[34];
2717}
2718
2719static u64 access_pio_last_returned_cnt_parity_err_cnt(
2720                                const struct cntr_entry *entry,
2721                                void *context, int vl, int mode, u64 data)
2722{
2723        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2724
2725        return dd->send_pio_err_status_cnt[33];
2726}
2727
2728static u64 access_pio_current_free_cnt_parity_err_cnt(
2729                                const struct cntr_entry *entry,
2730                                void *context, int vl, int mode, u64 data)
2731{
2732        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2733
2734        return dd->send_pio_err_status_cnt[32];
2735}
2736
2737static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2738                                          void *context, int vl, int mode,
2739                                          u64 data)
2740{
2741        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2742
2743        return dd->send_pio_err_status_cnt[31];
2744}
2745
2746static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2747                                          void *context, int vl, int mode,
2748                                          u64 data)
2749{
2750        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2751
2752        return dd->send_pio_err_status_cnt[30];
2753}
2754
2755static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2756                                           void *context, int vl, int mode,
2757                                           u64 data)
2758{
2759        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2760
2761        return dd->send_pio_err_status_cnt[29];
2762}
2763
2764static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2765                                const struct cntr_entry *entry,
2766                                void *context, int vl, int mode, u64 data)
2767{
2768        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2769
2770        return dd->send_pio_err_status_cnt[28];
2771}
2772
2773static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2774                                             void *context, int vl, int mode,
2775                                             u64 data)
2776{
2777        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2778
2779        return dd->send_pio_err_status_cnt[27];
2780}
2781
2782static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2783                                             void *context, int vl, int mode,
2784                                             u64 data)
2785{
2786        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2787
2788        return dd->send_pio_err_status_cnt[26];
2789}
2790
2791static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2792                                                void *context, int vl,
2793                                                int mode, u64 data)
2794{
2795        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2796
2797        return dd->send_pio_err_status_cnt[25];
2798}
2799
2800static u64 access_pio_block_qw_count_parity_err_cnt(
2801                                const struct cntr_entry *entry,
2802                                void *context, int vl, int mode, u64 data)
2803{
2804        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806        return dd->send_pio_err_status_cnt[24];
2807}
2808
2809static u64 access_pio_write_qw_valid_parity_err_cnt(
2810                                const struct cntr_entry *entry,
2811                                void *context, int vl, int mode, u64 data)
2812{
2813        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815        return dd->send_pio_err_status_cnt[23];
2816}
2817
2818static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2819                                            void *context, int vl, int mode,
2820                                            u64 data)
2821{
2822        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824        return dd->send_pio_err_status_cnt[22];
2825}
2826
2827static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2828                                                void *context, int vl,
2829                                                int mode, u64 data)
2830{
2831        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833        return dd->send_pio_err_status_cnt[21];
2834}
2835
2836static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2837                                                void *context, int vl,
2838                                                int mode, u64 data)
2839{
2840        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842        return dd->send_pio_err_status_cnt[20];
2843}
2844
2845static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2846                                                void *context, int vl,
2847                                                int mode, u64 data)
2848{
2849        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851        return dd->send_pio_err_status_cnt[19];
2852}
2853
2854static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2855                                const struct cntr_entry *entry,
2856                                void *context, int vl, int mode, u64 data)
2857{
2858        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860        return dd->send_pio_err_status_cnt[18];
2861}
2862
2863static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2864                                         void *context, int vl, int mode,
2865                                         u64 data)
2866{
2867        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869        return dd->send_pio_err_status_cnt[17];
2870}
2871
2872static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2873                                            void *context, int vl, int mode,
2874                                            u64 data)
2875{
2876        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878        return dd->send_pio_err_status_cnt[16];
2879}
2880
2881static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2882                                const struct cntr_entry *entry,
2883                                void *context, int vl, int mode, u64 data)
2884{
2885        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887        return dd->send_pio_err_status_cnt[15];
2888}
2889
2890static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2891                                const struct cntr_entry *entry,
2892                                void *context, int vl, int mode, u64 data)
2893{
2894        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896        return dd->send_pio_err_status_cnt[14];
2897}
2898
2899static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2900                                const struct cntr_entry *entry,
2901                                void *context, int vl, int mode, u64 data)
2902{
2903        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905        return dd->send_pio_err_status_cnt[13];
2906}
2907
2908static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2909                                const struct cntr_entry *entry,
2910                                void *context, int vl, int mode, u64 data)
2911{
2912        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914        return dd->send_pio_err_status_cnt[12];
2915}
2916
2917static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2918                                const struct cntr_entry *entry,
2919                                void *context, int vl, int mode, u64 data)
2920{
2921        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923        return dd->send_pio_err_status_cnt[11];
2924}
2925
2926static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2927                                const struct cntr_entry *entry,
2928                                void *context, int vl, int mode, u64 data)
2929{
2930        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932        return dd->send_pio_err_status_cnt[10];
2933}
2934
2935static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2936                                const struct cntr_entry *entry,
2937                                void *context, int vl, int mode, u64 data)
2938{
2939        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941        return dd->send_pio_err_status_cnt[9];
2942}
2943
2944static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2945                                const struct cntr_entry *entry,
2946                                void *context, int vl, int mode, u64 data)
2947{
2948        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950        return dd->send_pio_err_status_cnt[8];
2951}
2952
2953static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2954                                const struct cntr_entry *entry,
2955                                void *context, int vl, int mode, u64 data)
2956{
2957        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959        return dd->send_pio_err_status_cnt[7];
2960}
2961
2962static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2963                                              void *context, int vl, int mode,
2964                                              u64 data)
2965{
2966        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968        return dd->send_pio_err_status_cnt[6];
2969}
2970
2971static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2972                                              void *context, int vl, int mode,
2973                                              u64 data)
2974{
2975        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977        return dd->send_pio_err_status_cnt[5];
2978}
2979
2980static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2981                                           void *context, int vl, int mode,
2982                                           u64 data)
2983{
2984        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986        return dd->send_pio_err_status_cnt[4];
2987}
2988
2989static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2990                                           void *context, int vl, int mode,
2991                                           u64 data)
2992{
2993        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995        return dd->send_pio_err_status_cnt[3];
2996}
2997
2998static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2999                                         void *context, int vl, int mode,
3000                                         u64 data)
3001{
3002        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004        return dd->send_pio_err_status_cnt[2];
3005}
3006
3007static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3008                                                void *context, int vl,
3009                                                int mode, u64 data)
3010{
3011        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013        return dd->send_pio_err_status_cnt[1];
3014}
3015
3016static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3017                                             void *context, int vl, int mode,
3018                                             u64 data)
3019{
3020        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022        return dd->send_pio_err_status_cnt[0];
3023}
3024
3025/*
3026 * Software counters corresponding to each of the
3027 * error status bits within SendDmaErrStatus
3028 */
3029static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3030                                const struct cntr_entry *entry,
3031                                void *context, int vl, int mode, u64 data)
3032{
3033        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034
3035        return dd->send_dma_err_status_cnt[3];
3036}
3037
3038static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3039                                const struct cntr_entry *entry,
3040                                void *context, int vl, int mode, u64 data)
3041{
3042        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3043
3044        return dd->send_dma_err_status_cnt[2];
3045}
3046
3047static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3048                                          void *context, int vl, int mode,
3049                                          u64 data)
3050{
3051        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3052
3053        return dd->send_dma_err_status_cnt[1];
3054}
3055
3056static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3057                                       void *context, int vl, int mode,
3058                                       u64 data)
3059{
3060        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3061
3062        return dd->send_dma_err_status_cnt[0];
3063}
3064
3065/*
3066 * Software counters corresponding to each of the
3067 * error status bits within SendEgressErrStatus
3068 */
3069static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3070                                const struct cntr_entry *entry,
3071                                void *context, int vl, int mode, u64 data)
3072{
3073        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075        return dd->send_egress_err_status_cnt[63];
3076}
3077
3078static u64 access_tx_read_sdma_memory_csr_err_cnt(
3079                                const struct cntr_entry *entry,
3080                                void *context, int vl, int mode, u64 data)
3081{
3082        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3083
3084        return dd->send_egress_err_status_cnt[62];
3085}
3086
3087static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3088                                             void *context, int vl, int mode,
3089                                             u64 data)
3090{
3091        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3092
3093        return dd->send_egress_err_status_cnt[61];
3094}
3095
3096static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3097                                                 void *context, int vl,
3098                                                 int mode, u64 data)
3099{
3100        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3101
3102        return dd->send_egress_err_status_cnt[60];
3103}
3104
3105static u64 access_tx_read_sdma_memory_cor_err_cnt(
3106                                const struct cntr_entry *entry,
3107                                void *context, int vl, int mode, u64 data)
3108{
3109        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3110
3111        return dd->send_egress_err_status_cnt[59];
3112}
3113
3114static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3115                                        void *context, int vl, int mode,
3116                                        u64 data)
3117{
3118        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119
3120        return dd->send_egress_err_status_cnt[58];
3121}
3122
3123static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3124                                            void *context, int vl, int mode,
3125                                            u64 data)
3126{
3127        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128
3129        return dd->send_egress_err_status_cnt[57];
3130}
3131
3132static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3133                                              void *context, int vl, int mode,
3134                                              u64 data)
3135{
3136        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3137
3138        return dd->send_egress_err_status_cnt[56];
3139}
3140
3141static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3142                                              void *context, int vl, int mode,
3143                                              u64 data)
3144{
3145        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3146
3147        return dd->send_egress_err_status_cnt[55];
3148}
3149
3150static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3151                                              void *context, int vl, int mode,
3152                                              u64 data)
3153{
3154        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3155
3156        return dd->send_egress_err_status_cnt[54];
3157}
3158
3159static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3160                                              void *context, int vl, int mode,
3161                                              u64 data)
3162{
3163        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3164
3165        return dd->send_egress_err_status_cnt[53];
3166}
3167
3168static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3169                                              void *context, int vl, int mode,
3170                                              u64 data)
3171{
3172        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174        return dd->send_egress_err_status_cnt[52];
3175}
3176
3177static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3178                                              void *context, int vl, int mode,
3179                                              u64 data)
3180{
3181        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183        return dd->send_egress_err_status_cnt[51];
3184}
3185
3186static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3187                                              void *context, int vl, int mode,
3188                                              u64 data)
3189{
3190        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192        return dd->send_egress_err_status_cnt[50];
3193}
3194
3195static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3196                                              void *context, int vl, int mode,
3197                                              u64 data)
3198{
3199        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201        return dd->send_egress_err_status_cnt[49];
3202}
3203
3204static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3205                                              void *context, int vl, int mode,
3206                                              u64 data)
3207{
3208        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210        return dd->send_egress_err_status_cnt[48];
3211}
3212
3213static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3214                                              void *context, int vl, int mode,
3215                                              u64 data)
3216{
3217        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219        return dd->send_egress_err_status_cnt[47];
3220}
3221
3222static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3223                                            void *context, int vl, int mode,
3224                                            u64 data)
3225{
3226        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228        return dd->send_egress_err_status_cnt[46];
3229}
3230
3231static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3232                                             void *context, int vl, int mode,
3233                                             u64 data)
3234{
3235        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237        return dd->send_egress_err_status_cnt[45];
3238}
3239
3240static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3241                                                 void *context, int vl,
3242                                                 int mode, u64 data)
3243{
3244        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246        return dd->send_egress_err_status_cnt[44];
3247}
3248
3249static u64 access_tx_read_sdma_memory_unc_err_cnt(
3250                                const struct cntr_entry *entry,
3251                                void *context, int vl, int mode, u64 data)
3252{
3253        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255        return dd->send_egress_err_status_cnt[43];
3256}
3257
3258static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3259                                        void *context, int vl, int mode,
3260                                        u64 data)
3261{
3262        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264        return dd->send_egress_err_status_cnt[42];
3265}
3266
3267static u64 access_tx_credit_return_partiy_err_cnt(
3268                                const struct cntr_entry *entry,
3269                                void *context, int vl, int mode, u64 data)
3270{
3271        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273        return dd->send_egress_err_status_cnt[41];
3274}
3275
3276static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3277                                const struct cntr_entry *entry,
3278                                void *context, int vl, int mode, u64 data)
3279{
3280        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282        return dd->send_egress_err_status_cnt[40];
3283}
3284
3285static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3286                                const struct cntr_entry *entry,
3287                                void *context, int vl, int mode, u64 data)
3288{
3289        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291        return dd->send_egress_err_status_cnt[39];
3292}
3293
3294static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3295                                const struct cntr_entry *entry,
3296                                void *context, int vl, int mode, u64 data)
3297{
3298        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300        return dd->send_egress_err_status_cnt[38];
3301}
3302
3303static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3304                                const struct cntr_entry *entry,
3305                                void *context, int vl, int mode, u64 data)
3306{
3307        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309        return dd->send_egress_err_status_cnt[37];
3310}
3311
3312static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3313                                const struct cntr_entry *entry,
3314                                void *context, int vl, int mode, u64 data)
3315{
3316        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318        return dd->send_egress_err_status_cnt[36];
3319}
3320
3321static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3322                                const struct cntr_entry *entry,
3323                                void *context, int vl, int mode, u64 data)
3324{
3325        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327        return dd->send_egress_err_status_cnt[35];
3328}
3329
3330static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3331                                const struct cntr_entry *entry,
3332                                void *context, int vl, int mode, u64 data)
3333{
3334        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336        return dd->send_egress_err_status_cnt[34];
3337}
3338
3339static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3340                                const struct cntr_entry *entry,
3341                                void *context, int vl, int mode, u64 data)
3342{
3343        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345        return dd->send_egress_err_status_cnt[33];
3346}
3347
3348static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3349                                const struct cntr_entry *entry,
3350                                void *context, int vl, int mode, u64 data)
3351{
3352        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354        return dd->send_egress_err_status_cnt[32];
3355}
3356
3357static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3358                                const struct cntr_entry *entry,
3359                                void *context, int vl, int mode, u64 data)
3360{
3361        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363        return dd->send_egress_err_status_cnt[31];
3364}
3365
3366static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3367                                const struct cntr_entry *entry,
3368                                void *context, int vl, int mode, u64 data)
3369{
3370        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372        return dd->send_egress_err_status_cnt[30];
3373}
3374
3375static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3376                                const struct cntr_entry *entry,
3377                                void *context, int vl, int mode, u64 data)
3378{
3379        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381        return dd->send_egress_err_status_cnt[29];
3382}
3383
3384static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3385                                const struct cntr_entry *entry,
3386                                void *context, int vl, int mode, u64 data)
3387{
3388        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390        return dd->send_egress_err_status_cnt[28];
3391}
3392
3393static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3394                                const struct cntr_entry *entry,
3395                                void *context, int vl, int mode, u64 data)
3396{
3397        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399        return dd->send_egress_err_status_cnt[27];
3400}
3401
3402static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3403                                const struct cntr_entry *entry,
3404                                void *context, int vl, int mode, u64 data)
3405{
3406        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408        return dd->send_egress_err_status_cnt[26];
3409}
3410
3411static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3412                                const struct cntr_entry *entry,
3413                                void *context, int vl, int mode, u64 data)
3414{
3415        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417        return dd->send_egress_err_status_cnt[25];
3418}
3419
3420static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3421                                const struct cntr_entry *entry,
3422                                void *context, int vl, int mode, u64 data)
3423{
3424        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426        return dd->send_egress_err_status_cnt[24];
3427}
3428
3429static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3430                                const struct cntr_entry *entry,
3431                                void *context, int vl, int mode, u64 data)
3432{
3433        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435        return dd->send_egress_err_status_cnt[23];
3436}
3437
3438static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3439                                const struct cntr_entry *entry,
3440                                void *context, int vl, int mode, u64 data)
3441{
3442        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444        return dd->send_egress_err_status_cnt[22];
3445}
3446
3447static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3448                                const struct cntr_entry *entry,
3449                                void *context, int vl, int mode, u64 data)
3450{
3451        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453        return dd->send_egress_err_status_cnt[21];
3454}
3455
3456static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3457                                const struct cntr_entry *entry,
3458                                void *context, int vl, int mode, u64 data)
3459{
3460        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462        return dd->send_egress_err_status_cnt[20];
3463}
3464
3465static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3466                                const struct cntr_entry *entry,
3467                                void *context, int vl, int mode, u64 data)
3468{
3469        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471        return dd->send_egress_err_status_cnt[19];
3472}
3473
3474static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3475                                const struct cntr_entry *entry,
3476                                void *context, int vl, int mode, u64 data)
3477{
3478        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480        return dd->send_egress_err_status_cnt[18];
3481}
3482
3483static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3484                                const struct cntr_entry *entry,
3485                                void *context, int vl, int mode, u64 data)
3486{
3487        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489        return dd->send_egress_err_status_cnt[17];
3490}
3491
3492static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3493                                const struct cntr_entry *entry,
3494                                void *context, int vl, int mode, u64 data)
3495{
3496        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498        return dd->send_egress_err_status_cnt[16];
3499}
3500
3501static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3502                                           void *context, int vl, int mode,
3503                                           u64 data)
3504{
3505        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507        return dd->send_egress_err_status_cnt[15];
3508}
3509
3510static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3511                                                 void *context, int vl,
3512                                                 int mode, u64 data)
3513{
3514        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516        return dd->send_egress_err_status_cnt[14];
3517}
3518
3519static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3520                                               void *context, int vl, int mode,
3521                                               u64 data)
3522{
3523        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525        return dd->send_egress_err_status_cnt[13];
3526}
3527
3528static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3529                                        void *context, int vl, int mode,
3530                                        u64 data)
3531{
3532        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534        return dd->send_egress_err_status_cnt[12];
3535}
3536
3537static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3538                                const struct cntr_entry *entry,
3539                                void *context, int vl, int mode, u64 data)
3540{
3541        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543        return dd->send_egress_err_status_cnt[11];
3544}
3545
3546static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3547                                             void *context, int vl, int mode,
3548                                             u64 data)
3549{
3550        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552        return dd->send_egress_err_status_cnt[10];
3553}
3554
3555static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3556                                            void *context, int vl, int mode,
3557                                            u64 data)
3558{
3559        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561        return dd->send_egress_err_status_cnt[9];
3562}
3563
3564static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3565                                const struct cntr_entry *entry,
3566                                void *context, int vl, int mode, u64 data)
3567{
3568        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570        return dd->send_egress_err_status_cnt[8];
3571}
3572
3573static u64 access_tx_pio_launch_intf_parity_err_cnt(
3574                                const struct cntr_entry *entry,
3575                                void *context, int vl, int mode, u64 data)
3576{
3577        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579        return dd->send_egress_err_status_cnt[7];
3580}
3581
3582static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3583                                            void *context, int vl, int mode,
3584                                            u64 data)
3585{
3586        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588        return dd->send_egress_err_status_cnt[6];
3589}
3590
3591static u64 access_tx_incorrect_link_state_err_cnt(
3592                                const struct cntr_entry *entry,
3593                                void *context, int vl, int mode, u64 data)
3594{
3595        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597        return dd->send_egress_err_status_cnt[5];
3598}
3599
3600static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3601                                      void *context, int vl, int mode,
3602                                      u64 data)
3603{
3604        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606        return dd->send_egress_err_status_cnt[4];
3607}
3608
3609static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3610                                const struct cntr_entry *entry,
3611                                void *context, int vl, int mode, u64 data)
3612{
3613        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615        return dd->send_egress_err_status_cnt[3];
3616}
3617
3618static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3619                                            void *context, int vl, int mode,
3620                                            u64 data)
3621{
3622        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624        return dd->send_egress_err_status_cnt[2];
3625}
3626
3627static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3628                                const struct cntr_entry *entry,
3629                                void *context, int vl, int mode, u64 data)
3630{
3631        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633        return dd->send_egress_err_status_cnt[1];
3634}
3635
3636static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3637                                const struct cntr_entry *entry,
3638                                void *context, int vl, int mode, u64 data)
3639{
3640        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642        return dd->send_egress_err_status_cnt[0];
3643}
3644
3645/*
3646 * Software counters corresponding to each of the
3647 * error status bits within SendErrStatus
3648 */
3649static u64 access_send_csr_write_bad_addr_err_cnt(
3650                                const struct cntr_entry *entry,
3651                                void *context, int vl, int mode, u64 data)
3652{
3653        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654
3655        return dd->send_err_status_cnt[2];
3656}
3657
3658static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3659                                                 void *context, int vl,
3660                                                 int mode, u64 data)
3661{
3662        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3663
3664        return dd->send_err_status_cnt[1];
3665}
3666
3667static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3668                                      void *context, int vl, int mode,
3669                                      u64 data)
3670{
3671        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3672
3673        return dd->send_err_status_cnt[0];
3674}
3675
3676/*
3677 * Software counters corresponding to each of the
3678 * error status bits within SendCtxtErrStatus
3679 */
3680static u64 access_pio_write_out_of_bounds_err_cnt(
3681                                const struct cntr_entry *entry,
3682                                void *context, int vl, int mode, u64 data)
3683{
3684        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685
3686        return dd->sw_ctxt_err_status_cnt[4];
3687}
3688
3689static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3690                                             void *context, int vl, int mode,
3691                                             u64 data)
3692{
3693        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3694
3695        return dd->sw_ctxt_err_status_cnt[3];
3696}
3697
3698static u64 access_pio_write_crosses_boundary_err_cnt(
3699                                const struct cntr_entry *entry,
3700                                void *context, int vl, int mode, u64 data)
3701{
3702        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3703
3704        return dd->sw_ctxt_err_status_cnt[2];
3705}
3706
3707static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3708                                                void *context, int vl,
3709                                                int mode, u64 data)
3710{
3711        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3712
3713        return dd->sw_ctxt_err_status_cnt[1];
3714}
3715
3716static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3717                                               void *context, int vl, int mode,
3718                                               u64 data)
3719{
3720        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721
3722        return dd->sw_ctxt_err_status_cnt[0];
3723}
3724
3725/*
3726 * Software counters corresponding to each of the
3727 * error status bits within SendDmaEngErrStatus
3728 */
3729static u64 access_sdma_header_request_fifo_cor_err_cnt(
3730                                const struct cntr_entry *entry,
3731                                void *context, int vl, int mode, u64 data)
3732{
3733        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734
3735        return dd->sw_send_dma_eng_err_status_cnt[23];
3736}
3737
3738static u64 access_sdma_header_storage_cor_err_cnt(
3739                                const struct cntr_entry *entry,
3740                                void *context, int vl, int mode, u64 data)
3741{
3742        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3743
3744        return dd->sw_send_dma_eng_err_status_cnt[22];
3745}
3746
3747static u64 access_sdma_packet_tracking_cor_err_cnt(
3748                                const struct cntr_entry *entry,
3749                                void *context, int vl, int mode, u64 data)
3750{
3751        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3752
3753        return dd->sw_send_dma_eng_err_status_cnt[21];
3754}
3755
3756static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3757                                            void *context, int vl, int mode,
3758                                            u64 data)
3759{
3760        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3761
3762        return dd->sw_send_dma_eng_err_status_cnt[20];
3763}
3764
3765static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3766                                              void *context, int vl, int mode,
3767                                              u64 data)
3768{
3769        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770
3771        return dd->sw_send_dma_eng_err_status_cnt[19];
3772}
3773
3774static u64 access_sdma_header_request_fifo_unc_err_cnt(
3775                                const struct cntr_entry *entry,
3776                                void *context, int vl, int mode, u64 data)
3777{
3778        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779
3780        return dd->sw_send_dma_eng_err_status_cnt[18];
3781}
3782
3783static u64 access_sdma_header_storage_unc_err_cnt(
3784                                const struct cntr_entry *entry,
3785                                void *context, int vl, int mode, u64 data)
3786{
3787        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3788
3789        return dd->sw_send_dma_eng_err_status_cnt[17];
3790}
3791
3792static u64 access_sdma_packet_tracking_unc_err_cnt(
3793                                const struct cntr_entry *entry,
3794                                void *context, int vl, int mode, u64 data)
3795{
3796        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3797
3798        return dd->sw_send_dma_eng_err_status_cnt[16];
3799}
3800
3801static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3802                                            void *context, int vl, int mode,
3803                                            u64 data)
3804{
3805        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3806
3807        return dd->sw_send_dma_eng_err_status_cnt[15];
3808}
3809
3810static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3811                                              void *context, int vl, int mode,
3812                                              u64 data)
3813{
3814        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3815
3816        return dd->sw_send_dma_eng_err_status_cnt[14];
3817}
3818
3819static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3820                                       void *context, int vl, int mode,
3821                                       u64 data)
3822{
3823        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3824
3825        return dd->sw_send_dma_eng_err_status_cnt[13];
3826}
3827
3828static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3829                                             void *context, int vl, int mode,
3830                                             u64 data)
3831{
3832        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834        return dd->sw_send_dma_eng_err_status_cnt[12];
3835}
3836
3837static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3838                                              void *context, int vl, int mode,
3839                                              u64 data)
3840{
3841        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843        return dd->sw_send_dma_eng_err_status_cnt[11];
3844}
3845
3846static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3847                                             void *context, int vl, int mode,
3848                                             u64 data)
3849{
3850        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852        return dd->sw_send_dma_eng_err_status_cnt[10];
3853}
3854
3855static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3856                                          void *context, int vl, int mode,
3857                                          u64 data)
3858{
3859        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861        return dd->sw_send_dma_eng_err_status_cnt[9];
3862}
3863
3864static u64 access_sdma_packet_desc_overflow_err_cnt(
3865                                const struct cntr_entry *entry,
3866                                void *context, int vl, int mode, u64 data)
3867{
3868        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870        return dd->sw_send_dma_eng_err_status_cnt[8];
3871}
3872
3873static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3874                                               void *context, int vl,
3875                                               int mode, u64 data)
3876{
3877        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879        return dd->sw_send_dma_eng_err_status_cnt[7];
3880}
3881
3882static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3883                                    void *context, int vl, int mode, u64 data)
3884{
3885        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3886
3887        return dd->sw_send_dma_eng_err_status_cnt[6];
3888}
3889
3890static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3891                                        void *context, int vl, int mode,
3892                                        u64 data)
3893{
3894        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3895
3896        return dd->sw_send_dma_eng_err_status_cnt[5];
3897}
3898
3899static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3900                                          void *context, int vl, int mode,
3901                                          u64 data)
3902{
3903        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3904
3905        return dd->sw_send_dma_eng_err_status_cnt[4];
3906}
3907
3908static u64 access_sdma_tail_out_of_bounds_err_cnt(
3909                                const struct cntr_entry *entry,
3910                                void *context, int vl, int mode, u64 data)
3911{
3912        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3913
3914        return dd->sw_send_dma_eng_err_status_cnt[3];
3915}
3916
3917static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3918                                        void *context, int vl, int mode,
3919                                        u64 data)
3920{
3921        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3922
3923        return dd->sw_send_dma_eng_err_status_cnt[2];
3924}
3925
3926static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3927                                            void *context, int vl, int mode,
3928                                            u64 data)
3929{
3930        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3931
3932        return dd->sw_send_dma_eng_err_status_cnt[1];
3933}
3934
3935static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3936                                        void *context, int vl, int mode,
3937                                        u64 data)
3938{
3939        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3940
3941        return dd->sw_send_dma_eng_err_status_cnt[0];
3942}
3943
3944#define def_access_sw_cpu(cntr) \
3945static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
3946                              void *context, int vl, int mode, u64 data)      \
3947{                                                                             \
3948        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3949        return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
3950                              ppd->ibport_data.rvp.cntr, vl,                  \
3951                              mode, data);                                    \
3952}
3953
3954def_access_sw_cpu(rc_acks);
3955def_access_sw_cpu(rc_qacks);
3956def_access_sw_cpu(rc_delayed_comp);
3957
3958#define def_access_ibp_counter(cntr) \
3959static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
3960                                void *context, int vl, int mode, u64 data)    \
3961{                                                                             \
3962        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3963                                                                              \
3964        if (vl != CNTR_INVALID_VL)                                            \
3965                return 0;                                                     \
3966                                                                              \
3967        return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
3968                             mode, data);                                     \
3969}
3970
3971def_access_ibp_counter(loop_pkts);
3972def_access_ibp_counter(rc_resends);
3973def_access_ibp_counter(rnr_naks);
3974def_access_ibp_counter(other_naks);
3975def_access_ibp_counter(rc_timeouts);
3976def_access_ibp_counter(pkt_drops);
3977def_access_ibp_counter(dmawait);
3978def_access_ibp_counter(rc_seqnak);
3979def_access_ibp_counter(rc_dupreq);
3980def_access_ibp_counter(rdma_seq);
3981def_access_ibp_counter(unaligned);
3982def_access_ibp_counter(seq_naks);
3983
3984static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3985[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3986[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3987                        CNTR_NORMAL),
3988[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3989                        CNTR_NORMAL),
3990[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3991                        RCV_TID_FLOW_GEN_MISMATCH_CNT,
3992                        CNTR_NORMAL),
3993[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3994                        CNTR_NORMAL),
3995[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3996                        RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3997[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3998                        CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3999[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4000                        CNTR_NORMAL),
4001[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4002                        CNTR_NORMAL),
4003[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4004                        CNTR_NORMAL),
4005[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4006                        CNTR_NORMAL),
4007[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4008                        CNTR_NORMAL),
4009[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4010                        CNTR_NORMAL),
4011[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4012                        CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4013[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4014                        CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4015[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4016                              CNTR_SYNTH),
4017[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4018[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4019                                 CNTR_SYNTH),
4020[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4021                                  CNTR_SYNTH),
4022[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4023                                  CNTR_SYNTH),
4024[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4025                                   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4026[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4027                                  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4028                                  CNTR_SYNTH),
4029[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4030                                DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4031[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4032                               CNTR_SYNTH),
4033[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4034                              CNTR_SYNTH),
4035[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4036                               CNTR_SYNTH),
4037[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4038                                 CNTR_SYNTH),
4039[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4040                                CNTR_SYNTH),
4041[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4042                                CNTR_SYNTH),
4043[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4044                               CNTR_SYNTH),
4045[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4046                                 CNTR_SYNTH | CNTR_VL),
4047[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4048                                CNTR_SYNTH | CNTR_VL),
4049[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4050[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4051                                 CNTR_SYNTH | CNTR_VL),
4052[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4053[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4054                                 CNTR_SYNTH | CNTR_VL),
4055[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4056                              CNTR_SYNTH),
4057[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4058                                 CNTR_SYNTH | CNTR_VL),
4059[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4060                                CNTR_SYNTH),
4061[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4062                                   CNTR_SYNTH | CNTR_VL),
4063[C_DC_TOTAL_CRC] =
4064        DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4065                         CNTR_SYNTH),
4066[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4067                                  CNTR_SYNTH),
4068[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4069                                  CNTR_SYNTH),
4070[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4071                                  CNTR_SYNTH),
4072[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4073                                  CNTR_SYNTH),
4074[C_DC_CRC_MULT_LN] =
4075        DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4076                         CNTR_SYNTH),
4077[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4078                                    CNTR_SYNTH),
4079[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4080                                    CNTR_SYNTH),
4081[C_DC_SEQ_CRC_CNT] =
4082        DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4083                         CNTR_SYNTH),
4084[C_DC_ESC0_ONLY_CNT] =
4085        DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4086                         CNTR_SYNTH),
4087[C_DC_ESC0_PLUS1_CNT] =
4088        DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4089                         CNTR_SYNTH),
4090[C_DC_ESC0_PLUS2_CNT] =
4091        DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4092                         CNTR_SYNTH),
4093[C_DC_REINIT_FROM_PEER_CNT] =
4094        DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4095                         CNTR_SYNTH),
4096[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4097                                  CNTR_SYNTH),
4098[C_DC_MISC_FLG_CNT] =
4099        DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4100                         CNTR_SYNTH),
4101[C_DC_PRF_GOOD_LTP_CNT] =
4102        DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4103[C_DC_PRF_ACCEPTED_LTP_CNT] =
4104        DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4105                         CNTR_SYNTH),
4106[C_DC_PRF_RX_FLIT_CNT] =
4107        DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4108[C_DC_PRF_TX_FLIT_CNT] =
4109        DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4110[C_DC_PRF_CLK_CNTR] =
4111        DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4112[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4113        DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4114[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4115        DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4116                         CNTR_SYNTH),
4117[C_DC_PG_STS_TX_SBE_CNT] =
4118        DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4119[C_DC_PG_STS_TX_MBE_CNT] =
4120        DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4121                         CNTR_SYNTH),
4122[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4123                            access_sw_cpu_intr),
4124[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4125                            access_sw_cpu_rcv_limit),
4126[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4127                            access_sw_vtx_wait),
4128[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4129                            access_sw_pio_wait),
4130[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4131                            access_sw_pio_drain),
4132[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4133                            access_sw_kmem_wait),
4134[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4135                            access_sw_send_schedule),
4136[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4137                                      SEND_DMA_DESC_FETCHED_CNT, 0,
4138                                      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4139                                      dev_access_u32_csr),
4140[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4141                             CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4142                             access_sde_int_cnt),
4143[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4144                             CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4145                             access_sde_err_cnt),
4146[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4147                                  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4148                                  access_sde_idle_int_cnt),
4149[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4150                                      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4151                                      access_sde_progress_int_cnt),
4152/* MISC_ERR_STATUS */
4153[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4154                                CNTR_NORMAL,
4155                                access_misc_pll_lock_fail_err_cnt),
4156[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4157                                CNTR_NORMAL,
4158                                access_misc_mbist_fail_err_cnt),
4159[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4160                                CNTR_NORMAL,
4161                                access_misc_invalid_eep_cmd_err_cnt),
4162[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4163                                CNTR_NORMAL,
4164                                access_misc_efuse_done_parity_err_cnt),
4165[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4166                                CNTR_NORMAL,
4167                                access_misc_efuse_write_err_cnt),
4168[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4169                                0, CNTR_NORMAL,
4170                                access_misc_efuse_read_bad_addr_err_cnt),
4171[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4172                                CNTR_NORMAL,
4173                                access_misc_efuse_csr_parity_err_cnt),
4174[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4175                                CNTR_NORMAL,
4176                                access_misc_fw_auth_failed_err_cnt),
4177[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4178                                CNTR_NORMAL,
4179                                access_misc_key_mismatch_err_cnt),
4180[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4181                                CNTR_NORMAL,
4182                                access_misc_sbus_write_failed_err_cnt),
4183[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4184                                CNTR_NORMAL,
4185                                access_misc_csr_write_bad_addr_err_cnt),
4186[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4187                                CNTR_NORMAL,
4188                                access_misc_csr_read_bad_addr_err_cnt),
4189[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4190                                CNTR_NORMAL,
4191                                access_misc_csr_parity_err_cnt),
4192/* CceErrStatus */
4193[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4194                                CNTR_NORMAL,
4195                                access_sw_cce_err_status_aggregated_cnt),
4196[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4197                                CNTR_NORMAL,
4198                                access_cce_msix_csr_parity_err_cnt),
4199[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4200                                CNTR_NORMAL,
4201                                access_cce_int_map_unc_err_cnt),
4202[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4203                                CNTR_NORMAL,
4204                                access_cce_int_map_cor_err_cnt),
4205[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4206                                CNTR_NORMAL,
4207                                access_cce_msix_table_unc_err_cnt),
4208[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4209                                CNTR_NORMAL,
4210                                access_cce_msix_table_cor_err_cnt),
4211[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4212                                0, CNTR_NORMAL,
4213                                access_cce_rxdma_conv_fifo_parity_err_cnt),
4214[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4215                                0, CNTR_NORMAL,
4216                                access_cce_rcpl_async_fifo_parity_err_cnt),
4217[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4218                                CNTR_NORMAL,
4219                                access_cce_seg_write_bad_addr_err_cnt),
4220[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4221                                CNTR_NORMAL,
4222                                access_cce_seg_read_bad_addr_err_cnt),
4223[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4224                                CNTR_NORMAL,
4225                                access_la_triggered_cnt),
4226[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4227                                CNTR_NORMAL,
4228                                access_cce_trgt_cpl_timeout_err_cnt),
4229[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4230                                CNTR_NORMAL,
4231                                access_pcic_receive_parity_err_cnt),
4232[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4233                                CNTR_NORMAL,
4234                                access_pcic_transmit_back_parity_err_cnt),
4235[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4236                                0, CNTR_NORMAL,
4237                                access_pcic_transmit_front_parity_err_cnt),
4238[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4239                                CNTR_NORMAL,
4240                                access_pcic_cpl_dat_q_unc_err_cnt),
4241[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4242                                CNTR_NORMAL,
4243                                access_pcic_cpl_hd_q_unc_err_cnt),
4244[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4245                                CNTR_NORMAL,
4246                                access_pcic_post_dat_q_unc_err_cnt),
4247[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4248                                CNTR_NORMAL,
4249                                access_pcic_post_hd_q_unc_err_cnt),
4250[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4251                                CNTR_NORMAL,
4252                                access_pcic_retry_sot_mem_unc_err_cnt),
4253[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4254                                CNTR_NORMAL,
4255                                access_pcic_retry_mem_unc_err),
4256[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4257                                CNTR_NORMAL,
4258                                access_pcic_n_post_dat_q_parity_err_cnt),
4259[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4260                                CNTR_NORMAL,
4261                                access_pcic_n_post_h_q_parity_err_cnt),
4262[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4263                                CNTR_NORMAL,
4264                                access_pcic_cpl_dat_q_cor_err_cnt),
4265[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4266                                CNTR_NORMAL,
4267                                access_pcic_cpl_hd_q_cor_err_cnt),
4268[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4269                                CNTR_NORMAL,
4270                                access_pcic_post_dat_q_cor_err_cnt),
4271[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4272                                CNTR_NORMAL,
4273                                access_pcic_post_hd_q_cor_err_cnt),
4274[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4275                                CNTR_NORMAL,
4276                                access_pcic_retry_sot_mem_cor_err_cnt),
4277[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4278                                CNTR_NORMAL,
4279                                access_pcic_retry_mem_cor_err_cnt),
4280[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4281                                "CceCli1AsyncFifoDbgParityError", 0, 0,
4282                                CNTR_NORMAL,
4283                                access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4284[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4285                                "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4286                                CNTR_NORMAL,
4287                                access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4288                                ),
4289[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4290                        "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4291                        CNTR_NORMAL,
4292                        access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4293[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4294                        "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4295                        CNTR_NORMAL,
4296                        access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4297[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4298                        0, CNTR_NORMAL,
4299                        access_cce_cli2_async_fifo_parity_err_cnt),
4300[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4301                        CNTR_NORMAL,
4302                        access_cce_csr_cfg_bus_parity_err_cnt),
4303[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4304                        0, CNTR_NORMAL,
4305                        access_cce_cli0_async_fifo_parity_err_cnt),
4306[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4307                        CNTR_NORMAL,
4308                        access_cce_rspd_data_parity_err_cnt),
4309[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4310                        CNTR_NORMAL,
4311                        access_cce_trgt_access_err_cnt),
4312[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4313                        0, CNTR_NORMAL,
4314                        access_cce_trgt_async_fifo_parity_err_cnt),
4315[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4316                        CNTR_NORMAL,
4317                        access_cce_csr_write_bad_addr_err_cnt),
4318[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4319                        CNTR_NORMAL,
4320                        access_cce_csr_read_bad_addr_err_cnt),
4321[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4322                        CNTR_NORMAL,
4323                        access_ccs_csr_parity_err_cnt),
4324
4325/* RcvErrStatus */
4326[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4327                        CNTR_NORMAL,
4328                        access_rx_csr_parity_err_cnt),
4329[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4330                        CNTR_NORMAL,
4331                        access_rx_csr_write_bad_addr_err_cnt),
4332[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4333                        CNTR_NORMAL,
4334                        access_rx_csr_read_bad_addr_err_cnt),
4335[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4336                        CNTR_NORMAL,
4337                        access_rx_dma_csr_unc_err_cnt),
4338[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4339                        CNTR_NORMAL,
4340                        access_rx_dma_dq_fsm_encoding_err_cnt),
4341[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4342                        CNTR_NORMAL,
4343                        access_rx_dma_eq_fsm_encoding_err_cnt),
4344[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4345                        CNTR_NORMAL,
4346                        access_rx_dma_csr_parity_err_cnt),
4347[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4348                        CNTR_NORMAL,
4349                        access_rx_rbuf_data_cor_err_cnt),
4350[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4351                        CNTR_NORMAL,
4352                        access_rx_rbuf_data_unc_err_cnt),
4353[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4354                        CNTR_NORMAL,
4355                        access_rx_dma_data_fifo_rd_cor_err_cnt),
4356[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4357                        CNTR_NORMAL,
4358                        access_rx_dma_data_fifo_rd_unc_err_cnt),
4359[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4360                        CNTR_NORMAL,
4361                        access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4362[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4363                        CNTR_NORMAL,
4364                        access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4365[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4366                        CNTR_NORMAL,
4367                        access_rx_rbuf_desc_part2_cor_err_cnt),
4368[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4369                        CNTR_NORMAL,
4370                        access_rx_rbuf_desc_part2_unc_err_cnt),
4371[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4372                        CNTR_NORMAL,
4373                        access_rx_rbuf_desc_part1_cor_err_cnt),
4374[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4375                        CNTR_NORMAL,
4376                        access_rx_rbuf_desc_part1_unc_err_cnt),
4377[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4378                        CNTR_NORMAL,
4379                        access_rx_hq_intr_fsm_err_cnt),
4380[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4381                        CNTR_NORMAL,
4382                        access_rx_hq_intr_csr_parity_err_cnt),
4383[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4384                        CNTR_NORMAL,
4385                        access_rx_lookup_csr_parity_err_cnt),
4386[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4387                        CNTR_NORMAL,
4388                        access_rx_lookup_rcv_array_cor_err_cnt),
4389[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4390                        CNTR_NORMAL,
4391                        access_rx_lookup_rcv_array_unc_err_cnt),
4392[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4393                        0, CNTR_NORMAL,
4394                        access_rx_lookup_des_part2_parity_err_cnt),
4395[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4396                        0, CNTR_NORMAL,
4397                        access_rx_lookup_des_part1_unc_cor_err_cnt),
4398[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4399                        CNTR_NORMAL,
4400                        access_rx_lookup_des_part1_unc_err_cnt),
4401[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4402                        CNTR_NORMAL,
4403                        access_rx_rbuf_next_free_buf_cor_err_cnt),
4404[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4405                        CNTR_NORMAL,
4406                        access_rx_rbuf_next_free_buf_unc_err_cnt),
4407[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4408                        "RxRbufFlInitWrAddrParityErr", 0, 0,
4409                        CNTR_NORMAL,
4410                        access_rbuf_fl_init_wr_addr_parity_err_cnt),
4411[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4412                        0, CNTR_NORMAL,
4413                        access_rx_rbuf_fl_initdone_parity_err_cnt),
4414[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4415                        0, CNTR_NORMAL,
4416                        access_rx_rbuf_fl_write_addr_parity_err_cnt),
4417[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4418                        CNTR_NORMAL,
4419                        access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4420[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4421                        CNTR_NORMAL,
4422                        access_rx_rbuf_empty_err_cnt),
4423[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4424                        CNTR_NORMAL,
4425                        access_rx_rbuf_full_err_cnt),
4426[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4427                        CNTR_NORMAL,
4428                        access_rbuf_bad_lookup_err_cnt),
4429[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4430                        CNTR_NORMAL,
4431                        access_rbuf_ctx_id_parity_err_cnt),
4432[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4433                        CNTR_NORMAL,
4434                        access_rbuf_csr_qeopdw_parity_err_cnt),
4435[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4436                        "RxRbufCsrQNumOfPktParityErr", 0, 0,
4437                        CNTR_NORMAL,
4438                        access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4439[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4440                        "RxRbufCsrQTlPtrParityErr", 0, 0,
4441                        CNTR_NORMAL,
4442                        access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4443[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4444                        0, CNTR_NORMAL,
4445                        access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4446[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4447                        0, CNTR_NORMAL,
4448                        access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4449[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4450                        0, 0, CNTR_NORMAL,
4451                        access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4452[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4453                        0, CNTR_NORMAL,
4454                        access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4455[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4456                        "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4457                        CNTR_NORMAL,
4458                        access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4459[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4460                        0, CNTR_NORMAL,
4461                        access_rx_rbuf_block_list_read_cor_err_cnt),
4462[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4463                        0, CNTR_NORMAL,
4464                        access_rx_rbuf_block_list_read_unc_err_cnt),
4465[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4466                        CNTR_NORMAL,
4467                        access_rx_rbuf_lookup_des_cor_err_cnt),
4468[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4469                        CNTR_NORMAL,
4470                        access_rx_rbuf_lookup_des_unc_err_cnt),
4471[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4472                        "RxRbufLookupDesRegUncCorErr", 0, 0,
4473                        CNTR_NORMAL,
4474                        access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4475[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4476                        CNTR_NORMAL,
4477                        access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4478[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4479                        CNTR_NORMAL,
4480                        access_rx_rbuf_free_list_cor_err_cnt),
4481[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4482                        CNTR_NORMAL,
4483                        access_rx_rbuf_free_list_unc_err_cnt),
4484[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4485                        CNTR_NORMAL,
4486                        access_rx_rcv_fsm_encoding_err_cnt),
4487[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4488                        CNTR_NORMAL,
4489                        access_rx_dma_flag_cor_err_cnt),
4490[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4491                        CNTR_NORMAL,
4492                        access_rx_dma_flag_unc_err_cnt),
4493[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4494                        CNTR_NORMAL,
4495                        access_rx_dc_sop_eop_parity_err_cnt),
4496[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4497                        CNTR_NORMAL,
4498                        access_rx_rcv_csr_parity_err_cnt),
4499[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4500                        CNTR_NORMAL,
4501                        access_rx_rcv_qp_map_table_cor_err_cnt),
4502[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4503                        CNTR_NORMAL,
4504                        access_rx_rcv_qp_map_table_unc_err_cnt),
4505[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4506                        CNTR_NORMAL,
4507                        access_rx_rcv_data_cor_err_cnt),
4508[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4509                        CNTR_NORMAL,
4510                        access_rx_rcv_data_unc_err_cnt),
4511[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4512                        CNTR_NORMAL,
4513                        access_rx_rcv_hdr_cor_err_cnt),
4514[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4515                        CNTR_NORMAL,
4516                        access_rx_rcv_hdr_unc_err_cnt),
4517[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4518                        CNTR_NORMAL,
4519                        access_rx_dc_intf_parity_err_cnt),
4520[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4521                        CNTR_NORMAL,
4522                        access_rx_dma_csr_cor_err_cnt),
4523/* SendPioErrStatus */
4524[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4525                        CNTR_NORMAL,
4526                        access_pio_pec_sop_head_parity_err_cnt),
4527[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4528                        CNTR_NORMAL,
4529                        access_pio_pcc_sop_head_parity_err_cnt),
4530[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4531                        0, 0, CNTR_NORMAL,
4532                        access_pio_last_returned_cnt_parity_err_cnt),
4533[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4534                        0, CNTR_NORMAL,
4535                        access_pio_current_free_cnt_parity_err_cnt),
4536[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4537                        CNTR_NORMAL,
4538                        access_pio_reserved_31_err_cnt),
4539[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4540                        CNTR_NORMAL,
4541                        access_pio_reserved_30_err_cnt),
4542[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4543                        CNTR_NORMAL,
4544                        access_pio_ppmc_sop_len_err_cnt),
4545[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4546                        CNTR_NORMAL,
4547                        access_pio_ppmc_bqc_mem_parity_err_cnt),
4548[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4549                        CNTR_NORMAL,
4550                        access_pio_vl_fifo_parity_err_cnt),
4551[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4552                        CNTR_NORMAL,
4553                        access_pio_vlf_sop_parity_err_cnt),
4554[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4555                        CNTR_NORMAL,
4556                        access_pio_vlf_v1_len_parity_err_cnt),
4557[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4558                        CNTR_NORMAL,
4559                        access_pio_block_qw_count_parity_err_cnt),
4560[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4561                        CNTR_NORMAL,
4562                        access_pio_write_qw_valid_parity_err_cnt),
4563[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4564                        CNTR_NORMAL,
4565                        access_pio_state_machine_err_cnt),
4566[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4567                        CNTR_NORMAL,
4568                        access_pio_write_data_parity_err_cnt),
4569[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4570                        CNTR_NORMAL,
4571                        access_pio_host_addr_mem_cor_err_cnt),
4572[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4573                        CNTR_NORMAL,
4574                        access_pio_host_addr_mem_unc_err_cnt),
4575[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4576                        CNTR_NORMAL,
4577                        access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4578[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4579                        CNTR_NORMAL,
4580                        access_pio_init_sm_in_err_cnt),
4581[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4582                        CNTR_NORMAL,
4583                        access_pio_ppmc_pbl_fifo_err_cnt),
4584[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4585                        0, CNTR_NORMAL,
4586                        access_pio_credit_ret_fifo_parity_err_cnt),
4587[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4588                        CNTR_NORMAL,
4589                        access_pio_v1_len_mem_bank1_cor_err_cnt),
4590[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4591                        CNTR_NORMAL,
4592                        access_pio_v1_len_mem_bank0_cor_err_cnt),
4593[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4594                        CNTR_NORMAL,
4595                        access_pio_v1_len_mem_bank1_unc_err_cnt),
4596[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4597                        CNTR_NORMAL,
4598                        access_pio_v1_len_mem_bank0_unc_err_cnt),
4599[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4600                        CNTR_NORMAL,
4601                        access_pio_sm_pkt_reset_parity_err_cnt),
4602[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4603                        CNTR_NORMAL,
4604                        access_pio_pkt_evict_fifo_parity_err_cnt),
4605[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4606                        "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4607                        CNTR_NORMAL,
4608                        access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4609[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4610                        CNTR_NORMAL,
4611                        access_pio_sbrdctl_crrel_parity_err_cnt),
4612[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4613                        CNTR_NORMAL,
4614                        access_pio_pec_fifo_parity_err_cnt),
4615[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4616                        CNTR_NORMAL,
4617                        access_pio_pcc_fifo_parity_err_cnt),
4618[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4619                        CNTR_NORMAL,
4620                        access_pio_sb_mem_fifo1_err_cnt),
4621[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4622                        CNTR_NORMAL,
4623                        access_pio_sb_mem_fifo0_err_cnt),
4624[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4625                        CNTR_NORMAL,
4626                        access_pio_csr_parity_err_cnt),
4627[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4628                        CNTR_NORMAL,
4629                        access_pio_write_addr_parity_err_cnt),
4630[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4631                        CNTR_NORMAL,
4632                        access_pio_write_bad_ctxt_err_cnt),
4633/* SendDmaErrStatus */
4634[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4635                        0, CNTR_NORMAL,
4636                        access_sdma_pcie_req_tracking_cor_err_cnt),
4637[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4638                        0, CNTR_NORMAL,
4639                        access_sdma_pcie_req_tracking_unc_err_cnt),
4640[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4641                        CNTR_NORMAL,
4642                        access_sdma_csr_parity_err_cnt),
4643[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4644                        CNTR_NORMAL,
4645                        access_sdma_rpy_tag_err_cnt),
4646/* SendEgressErrStatus */
4647[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4648                        CNTR_NORMAL,
4649                        access_tx_read_pio_memory_csr_unc_err_cnt),
4650[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4651                        0, CNTR_NORMAL,
4652                        access_tx_read_sdma_memory_csr_err_cnt),
4653[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4654                        CNTR_NORMAL,
4655                        access_tx_egress_fifo_cor_err_cnt),
4656[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4657                        CNTR_NORMAL,
4658                        access_tx_read_pio_memory_cor_err_cnt),
4659[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4660                        CNTR_NORMAL,
4661                        access_tx_read_sdma_memory_cor_err_cnt),
4662[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4663                        CNTR_NORMAL,
4664                        access_tx_sb_hdr_cor_err_cnt),
4665[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4666                        CNTR_NORMAL,
4667                        access_tx_credit_overrun_err_cnt),
4668[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4669                        CNTR_NORMAL,
4670                        access_tx_launch_fifo8_cor_err_cnt),
4671[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4672                        CNTR_NORMAL,
4673                        access_tx_launch_fifo7_cor_err_cnt),
4674[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4675                        CNTR_NORMAL,
4676                        access_tx_launch_fifo6_cor_err_cnt),
4677[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4678                        CNTR_NORMAL,
4679                        access_tx_launch_fifo5_cor_err_cnt),
4680[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4681                        CNTR_NORMAL,
4682                        access_tx_launch_fifo4_cor_err_cnt),
4683[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4684                        CNTR_NORMAL,
4685                        access_tx_launch_fifo3_cor_err_cnt),
4686[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4687                        CNTR_NORMAL,
4688                        access_tx_launch_fifo2_cor_err_cnt),
4689[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4690                        CNTR_NORMAL,
4691                        access_tx_launch_fifo1_cor_err_cnt),
4692[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4693                        CNTR_NORMAL,
4694                        access_tx_launch_fifo0_cor_err_cnt),
4695[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4696                        CNTR_NORMAL,
4697                        access_tx_credit_return_vl_err_cnt),
4698[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4699                        CNTR_NORMAL,
4700                        access_tx_hcrc_insertion_err_cnt),
4701[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4702                        CNTR_NORMAL,
4703                        access_tx_egress_fifo_unc_err_cnt),
4704[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4705                        CNTR_NORMAL,
4706                        access_tx_read_pio_memory_unc_err_cnt),
4707[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4708                        CNTR_NORMAL,
4709                        access_tx_read_sdma_memory_unc_err_cnt),
4710[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4711                        CNTR_NORMAL,
4712                        access_tx_sb_hdr_unc_err_cnt),
4713[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4714                        CNTR_NORMAL,
4715                        access_tx_credit_return_partiy_err_cnt),
4716[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4717                        0, 0, CNTR_NORMAL,
4718                        access_tx_launch_fifo8_unc_or_parity_err_cnt),
4719[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4720                        0, 0, CNTR_NORMAL,
4721                        access_tx_launch_fifo7_unc_or_parity_err_cnt),
4722[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4723                        0, 0, CNTR_NORMAL,
4724                        access_tx_launch_fifo6_unc_or_parity_err_cnt),
4725[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4726                        0, 0, CNTR_NORMAL,
4727                        access_tx_launch_fifo5_unc_or_parity_err_cnt),
4728[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4729                        0, 0, CNTR_NORMAL,
4730                        access_tx_launch_fifo4_unc_or_parity_err_cnt),
4731[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4732                        0, 0, CNTR_NORMAL,
4733                        access_tx_launch_fifo3_unc_or_parity_err_cnt),
4734[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4735                        0, 0, CNTR_NORMAL,
4736                        access_tx_launch_fifo2_unc_or_parity_err_cnt),
4737[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4738                        0, 0, CNTR_NORMAL,
4739                        access_tx_launch_fifo1_unc_or_parity_err_cnt),
4740[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4741                        0, 0, CNTR_NORMAL,
4742                        access_tx_launch_fifo0_unc_or_parity_err_cnt),
4743[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4744                        0, 0, CNTR_NORMAL,
4745                        access_tx_sdma15_disallowed_packet_err_cnt),
4746[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4747                        0, 0, CNTR_NORMAL,
4748                        access_tx_sdma14_disallowed_packet_err_cnt),
4749[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4750                        0, 0, CNTR_NORMAL,
4751                        access_tx_sdma13_disallowed_packet_err_cnt),
4752[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4753                        0, 0, CNTR_NORMAL,
4754                        access_tx_sdma12_disallowed_packet_err_cnt),
4755[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4756                        0, 0, CNTR_NORMAL,
4757                        access_tx_sdma11_disallowed_packet_err_cnt),
4758[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4759                        0, 0, CNTR_NORMAL,
4760                        access_tx_sdma10_disallowed_packet_err_cnt),
4761[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4762                        0, 0, CNTR_NORMAL,
4763                        access_tx_sdma9_disallowed_packet_err_cnt),
4764[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4765                        0, 0, CNTR_NORMAL,
4766                        access_tx_sdma8_disallowed_packet_err_cnt),
4767[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4768                        0, 0, CNTR_NORMAL,
4769                        access_tx_sdma7_disallowed_packet_err_cnt),
4770[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4771                        0, 0, CNTR_NORMAL,
4772                        access_tx_sdma6_disallowed_packet_err_cnt),
4773[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4774                        0, 0, CNTR_NORMAL,
4775                        access_tx_sdma5_disallowed_packet_err_cnt),
4776[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4777                        0, 0, CNTR_NORMAL,
4778                        access_tx_sdma4_disallowed_packet_err_cnt),
4779[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4780                        0, 0, CNTR_NORMAL,
4781                        access_tx_sdma3_disallowed_packet_err_cnt),
4782[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4783                        0, 0, CNTR_NORMAL,
4784                        access_tx_sdma2_disallowed_packet_err_cnt),
4785[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4786                        0, 0, CNTR_NORMAL,
4787                        access_tx_sdma1_disallowed_packet_err_cnt),
4788[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4789                        0, 0, CNTR_NORMAL,
4790                        access_tx_sdma0_disallowed_packet_err_cnt),
4791[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4792                        CNTR_NORMAL,
4793                        access_tx_config_parity_err_cnt),
4794[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4795                        CNTR_NORMAL,
4796                        access_tx_sbrd_ctl_csr_parity_err_cnt),
4797[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4798                        CNTR_NORMAL,
4799                        access_tx_launch_csr_parity_err_cnt),
4800[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4801                        CNTR_NORMAL,
4802                        access_tx_illegal_vl_err_cnt),
4803[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4804                        "TxSbrdCtlStateMachineParityErr", 0, 0,
4805                        CNTR_NORMAL,
4806                        access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4807[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4808                        CNTR_NORMAL,
4809                        access_egress_reserved_10_err_cnt),
4810[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4811                        CNTR_NORMAL,
4812                        access_egress_reserved_9_err_cnt),
4813[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4814                        0, 0, CNTR_NORMAL,
4815                        access_tx_sdma_launch_intf_parity_err_cnt),
4816[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4817                        CNTR_NORMAL,
4818                        access_tx_pio_launch_intf_parity_err_cnt),
4819[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4820                        CNTR_NORMAL,
4821                        access_egress_reserved_6_err_cnt),
4822[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4823                        CNTR_NORMAL,
4824                        access_tx_incorrect_link_state_err_cnt),
4825[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4826                        CNTR_NORMAL,
4827                        access_tx_linkdown_err_cnt),
4828[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4829                        "EgressFifoUnderrunOrParityErr", 0, 0,
4830                        CNTR_NORMAL,
4831                        access_tx_egress_fifi_underrun_or_parity_err_cnt),
4832[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4833                        CNTR_NORMAL,
4834                        access_egress_reserved_2_err_cnt),
4835[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4836                        CNTR_NORMAL,
4837                        access_tx_pkt_integrity_mem_unc_err_cnt),
4838[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4839                        CNTR_NORMAL,
4840                        access_tx_pkt_integrity_mem_cor_err_cnt),
4841/* SendErrStatus */
4842[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4843                        CNTR_NORMAL,
4844                        access_send_csr_write_bad_addr_err_cnt),
4845[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4846                        CNTR_NORMAL,
4847                        access_send_csr_read_bad_addr_err_cnt),
4848[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4849                        CNTR_NORMAL,
4850                        access_send_csr_parity_cnt),
4851/* SendCtxtErrStatus */
4852[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4853                        CNTR_NORMAL,
4854                        access_pio_write_out_of_bounds_err_cnt),
4855[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4856                        CNTR_NORMAL,
4857                        access_pio_write_overflow_err_cnt),
4858[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4859                        0, 0, CNTR_NORMAL,
4860                        access_pio_write_crosses_boundary_err_cnt),
4861[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4862                        CNTR_NORMAL,
4863                        access_pio_disallowed_packet_err_cnt),
4864[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4865                        CNTR_NORMAL,
4866                        access_pio_inconsistent_sop_err_cnt),
4867/* SendDmaEngErrStatus */
4868[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4869                        0, 0, CNTR_NORMAL,
4870                        access_sdma_header_request_fifo_cor_err_cnt),
4871[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4872                        CNTR_NORMAL,
4873                        access_sdma_header_storage_cor_err_cnt),
4874[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4875                        CNTR_NORMAL,
4876                        access_sdma_packet_tracking_cor_err_cnt),
4877[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4878                        CNTR_NORMAL,
4879                        access_sdma_assembly_cor_err_cnt),
4880[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4881                        CNTR_NORMAL,
4882                        access_sdma_desc_table_cor_err_cnt),
4883[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4884                        0, 0, CNTR_NORMAL,
4885                        access_sdma_header_request_fifo_unc_err_cnt),
4886[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4887                        CNTR_NORMAL,
4888                        access_sdma_header_storage_unc_err_cnt),
4889[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4890                        CNTR_NORMAL,
4891                        access_sdma_packet_tracking_unc_err_cnt),
4892[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4893                        CNTR_NORMAL,
4894                        access_sdma_assembly_unc_err_cnt),
4895[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4896                        CNTR_NORMAL,
4897                        access_sdma_desc_table_unc_err_cnt),
4898[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4899                        CNTR_NORMAL,
4900                        access_sdma_timeout_err_cnt),
4901[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4902                        CNTR_NORMAL,
4903                        access_sdma_header_length_err_cnt),
4904[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4905                        CNTR_NORMAL,
4906                        access_sdma_header_address_err_cnt),
4907[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4908                        CNTR_NORMAL,
4909                        access_sdma_header_select_err_cnt),
4910[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4911                        CNTR_NORMAL,
4912                        access_sdma_reserved_9_err_cnt),
4913[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4914                        CNTR_NORMAL,
4915                        access_sdma_packet_desc_overflow_err_cnt),
4916[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4917                        CNTR_NORMAL,
4918                        access_sdma_length_mismatch_err_cnt),
4919[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4920                        CNTR_NORMAL,
4921                        access_sdma_halt_err_cnt),
4922[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4923                        CNTR_NORMAL,
4924                        access_sdma_mem_read_err_cnt),
4925[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4926                        CNTR_NORMAL,
4927                        access_sdma_first_desc_err_cnt),
4928[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4929                        CNTR_NORMAL,
4930                        access_sdma_tail_out_of_bounds_err_cnt),
4931[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4932                        CNTR_NORMAL,
4933                        access_sdma_too_long_err_cnt),
4934[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4935                        CNTR_NORMAL,
4936                        access_sdma_gen_mismatch_err_cnt),
4937[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4938                        CNTR_NORMAL,
4939                        access_sdma_wrong_dw_err_cnt),
4940};
4941
4942static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4943[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4944                        CNTR_NORMAL),
4945[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4946                        CNTR_NORMAL),
4947[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4948                        CNTR_NORMAL),
4949[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4950                        CNTR_NORMAL),
4951[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4952                        CNTR_NORMAL),
4953[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4954                        CNTR_NORMAL),
4955[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4956                        CNTR_NORMAL),
4957[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4958[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4959[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4960[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4961                                      CNTR_SYNTH | CNTR_VL),
4962[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4963                                     CNTR_SYNTH | CNTR_VL),
4964[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4965                                      CNTR_SYNTH | CNTR_VL),
4966[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4967[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4968[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4969                             access_sw_link_dn_cnt),
4970[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4971                           access_sw_link_up_cnt),
4972[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4973                                 access_sw_unknown_frame_cnt),
4974[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4975                             access_sw_xmit_discards),
4976[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4977                                CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4978                                access_sw_xmit_discards),
4979[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4980                                 access_xmit_constraint_errs),
4981[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4982                                access_rcv_constraint_errs),
4983[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4984[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4985[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4986[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4987[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4988[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4989[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4990[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4991[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4992[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4993[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4994[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4995[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4996                               access_sw_cpu_rc_acks),
4997[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4998                                access_sw_cpu_rc_qacks),
4999[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5000                                       access_sw_cpu_rc_delayed_comp),
5001[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5002[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5003[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5004[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5005[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5006[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5007[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5008[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5009[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5010[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5011[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5012[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5013[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5014[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5015[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5016[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5017[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5018[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5019[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5020[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5021[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5022[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5023[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5024[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5025[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5026[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5027[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5028[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5029[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5030[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5031[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5032[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5033[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5034[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5035[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5036[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5037[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5038[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5039[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5040[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5041[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5042[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5043[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5044[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5045[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5046[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5047[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5048[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5049[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5050[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5051[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5052[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5053[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5054[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5055[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5056[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5057[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5058[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5059[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5060[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5061[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5062[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5063[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5064[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5065[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5066[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5067[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5068[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5069[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5070[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5071[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5072[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5073[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5074[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5075[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5076[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5077[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5078[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5079[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5080[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5081};
5082
5083/* ======================================================================== */
5084
5085/* return true if this is chip revision revision a */
5086int is_ax(struct hfi1_devdata *dd)
5087{
5088        u8 chip_rev_minor =
5089                dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5090                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
5091        return (chip_rev_minor & 0xf0) == 0;
5092}
5093
5094/* return true if this is chip revision revision b */
5095int is_bx(struct hfi1_devdata *dd)
5096{
5097        u8 chip_rev_minor =
5098                dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5099                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
5100        return (chip_rev_minor & 0xF0) == 0x10;
5101}
5102
5103/*
5104 * Append string s to buffer buf.  Arguments curp and len are the current
5105 * position and remaining length, respectively.
5106 *
5107 * return 0 on success, 1 on out of room
5108 */
5109static int append_str(char *buf, char **curp, int *lenp, const char *s)
5110{
5111        char *p = *curp;
5112        int len = *lenp;
5113        int result = 0; /* success */
5114        char c;
5115
5116        /* add a comma, if first in the buffer */
5117        if (p != buf) {
5118                if (len == 0) {
5119                        result = 1; /* out of room */
5120                        goto done;
5121                }
5122                *p++ = ',';
5123                len--;
5124        }
5125
5126        /* copy the string */
5127        while ((c = *s++) != 0) {
5128                if (len == 0) {
5129                        result = 1; /* out of room */
5130                        goto done;
5131                }
5132                *p++ = c;
5133                len--;
5134        }
5135
5136done:
5137        /* write return values */
5138        *curp = p;
5139        *lenp = len;
5140
5141        return result;
5142}
5143
5144/*
5145 * Using the given flag table, print a comma separated string into
5146 * the buffer.  End in '*' if the buffer is too short.
5147 */
5148static char *flag_string(char *buf, int buf_len, u64 flags,
5149                         struct flag_table *table, int table_size)
5150{
5151        char extra[32];
5152        char *p = buf;
5153        int len = buf_len;
5154        int no_room = 0;
5155        int i;
5156
5157        /* make sure there is at least 2 so we can form "*" */
5158        if (len < 2)
5159                return "";
5160
5161        len--;  /* leave room for a nul */
5162        for (i = 0; i < table_size; i++) {
5163                if (flags & table[i].flag) {
5164                        no_room = append_str(buf, &p, &len, table[i].str);
5165                        if (no_room)
5166                                break;
5167                        flags &= ~table[i].flag;
5168                }
5169        }
5170
5171        /* any undocumented bits left? */
5172        if (!no_room && flags) {
5173                snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5174                no_room = append_str(buf, &p, &len, extra);
5175        }
5176
5177        /* add * if ran out of room */
5178        if (no_room) {
5179                /* may need to back up to add space for a '*' */
5180                if (len == 0)
5181                        --p;
5182                *p++ = '*';
5183        }
5184
5185        /* add final nul - space already allocated above */
5186        *p = 0;
5187        return buf;
5188}
5189
5190/* first 8 CCE error interrupt source names */
5191static const char * const cce_misc_names[] = {
5192        "CceErrInt",            /* 0 */
5193        "RxeErrInt",            /* 1 */
5194        "MiscErrInt",           /* 2 */
5195        "Reserved3",            /* 3 */
5196        "PioErrInt",            /* 4 */
5197        "SDmaErrInt",           /* 5 */
5198        "EgressErrInt",         /* 6 */
5199        "TxeErrInt"             /* 7 */
5200};
5201
5202/*
5203 * Return the miscellaneous error interrupt name.
5204 */
5205static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5206{
5207        if (source < ARRAY_SIZE(cce_misc_names))
5208                strncpy(buf, cce_misc_names[source], bsize);
5209        else
5210                snprintf(buf, bsize, "Reserved%u",
5211                         source + IS_GENERAL_ERR_START);
5212
5213        return buf;
5214}
5215
5216/*
5217 * Return the SDMA engine error interrupt name.
5218 */
5219static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5220{
5221        snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5222        return buf;
5223}
5224
5225/*
5226 * Return the send context error interrupt name.
5227 */
5228static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5229{
5230        snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5231        return buf;
5232}
5233
5234static const char * const various_names[] = {
5235        "PbcInt",
5236        "GpioAssertInt",
5237        "Qsfp1Int",
5238        "Qsfp2Int",
5239        "TCritInt"
5240};
5241
5242/*
5243 * Return the various interrupt name.
5244 */
5245static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5246{
5247        if (source < ARRAY_SIZE(various_names))
5248                strncpy(buf, various_names[source], bsize);
5249        else
5250                snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5251        return buf;
5252}
5253
5254/*
5255 * Return the DC interrupt name.
5256 */
5257static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5258{
5259        static const char * const dc_int_names[] = {
5260                "common",
5261                "lcb",
5262                "8051",
5263                "lbm"   /* local block merge */
5264        };
5265
5266        if (source < ARRAY_SIZE(dc_int_names))
5267                snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5268        else
5269                snprintf(buf, bsize, "DCInt%u", source);
5270        return buf;
5271}
5272
5273static const char * const sdma_int_names[] = {
5274        "SDmaInt",
5275        "SdmaIdleInt",
5276        "SdmaProgressInt",
5277};
5278
5279/*
5280 * Return the SDMA engine interrupt name.
5281 */
5282static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5283{
5284        /* what interrupt */
5285        unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5286        /* which engine */
5287        unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5288
5289        if (likely(what < 3))
5290                snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5291        else
5292                snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5293        return buf;
5294}
5295
5296/*
5297 * Return the receive available interrupt name.
5298 */
5299static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5300{
5301        snprintf(buf, bsize, "RcvAvailInt%u", source);
5302        return buf;
5303}
5304
5305/*
5306 * Return the receive urgent interrupt name.
5307 */
5308static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5309{
5310        snprintf(buf, bsize, "RcvUrgentInt%u", source);
5311        return buf;
5312}
5313
5314/*
5315 * Return the send credit interrupt name.
5316 */
5317static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5318{
5319        snprintf(buf, bsize, "SendCreditInt%u", source);
5320        return buf;
5321}
5322
5323/*
5324 * Return the reserved interrupt name.
5325 */
5326static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5327{
5328        snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5329        return buf;
5330}
5331
5332static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5333{
5334        return flag_string(buf, buf_len, flags,
5335                           cce_err_status_flags,
5336                           ARRAY_SIZE(cce_err_status_flags));
5337}
5338
5339static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5340{
5341        return flag_string(buf, buf_len, flags,
5342                           rxe_err_status_flags,
5343                           ARRAY_SIZE(rxe_err_status_flags));
5344}
5345
5346static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5347{
5348        return flag_string(buf, buf_len, flags, misc_err_status_flags,
5349                           ARRAY_SIZE(misc_err_status_flags));
5350}
5351
5352static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5353{
5354        return flag_string(buf, buf_len, flags,
5355                           pio_err_status_flags,
5356                           ARRAY_SIZE(pio_err_status_flags));
5357}
5358
5359static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5360{
5361        return flag_string(buf, buf_len, flags,
5362                           sdma_err_status_flags,
5363                           ARRAY_SIZE(sdma_err_status_flags));
5364}
5365
5366static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5367{
5368        return flag_string(buf, buf_len, flags,
5369                           egress_err_status_flags,
5370                           ARRAY_SIZE(egress_err_status_flags));
5371}
5372
5373static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5374{
5375        return flag_string(buf, buf_len, flags,
5376                           egress_err_info_flags,
5377                           ARRAY_SIZE(egress_err_info_flags));
5378}
5379
5380static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5381{
5382        return flag_string(buf, buf_len, flags,
5383                           send_err_status_flags,
5384                           ARRAY_SIZE(send_err_status_flags));
5385}
5386
5387static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5388{
5389        char buf[96];
5390        int i = 0;
5391
5392        /*
5393         * For most these errors, there is nothing that can be done except
5394         * report or record it.
5395         */
5396        dd_dev_info(dd, "CCE Error: %s\n",
5397                    cce_err_status_string(buf, sizeof(buf), reg));
5398
5399        if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5400            is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5401                /* this error requires a manual drop into SPC freeze mode */
5402                /* then a fix up */
5403                start_freeze_handling(dd->pport, FREEZE_SELF);
5404        }
5405
5406        for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5407                if (reg & (1ull << i)) {
5408                        incr_cntr64(&dd->cce_err_status_cnt[i]);
5409                        /* maintain a counter over all cce_err_status errors */
5410                        incr_cntr64(&dd->sw_cce_err_status_aggregate);
5411                }
5412        }
5413}
5414
5415/*
5416 * Check counters for receive errors that do not have an interrupt
5417 * associated with them.
5418 */
5419#define RCVERR_CHECK_TIME 10
5420static void update_rcverr_timer(unsigned long opaque)
5421{
5422        struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5423        struct hfi1_pportdata *ppd = dd->pport;
5424        u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5425
5426        if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5427            ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5428                dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5429                set_link_down_reason(
5430                ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5431                OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5432                queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5433        }
5434        dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5435
5436        mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5437}
5438
5439static int init_rcverr(struct hfi1_devdata *dd)
5440{
5441        setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5442        /* Assume the hardware counter has been reset */
5443        dd->rcv_ovfl_cnt = 0;
5444        return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5445}
5446
5447static void free_rcverr(struct hfi1_devdata *dd)
5448{
5449        if (dd->rcverr_timer.data)
5450                del_timer_sync(&dd->rcverr_timer);
5451        dd->rcverr_timer.data = 0;
5452}
5453
5454static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5455{
5456        char buf[96];
5457        int i = 0;
5458
5459        dd_dev_info(dd, "Receive Error: %s\n",
5460                    rxe_err_status_string(buf, sizeof(buf), reg));
5461
5462        if (reg & ALL_RXE_FREEZE_ERR) {
5463                int flags = 0;
5464
5465                /*
5466                 * Freeze mode recovery is disabled for the errors
5467                 * in RXE_FREEZE_ABORT_MASK
5468                 */
5469                if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5470                        flags = FREEZE_ABORT;
5471
5472                start_freeze_handling(dd->pport, flags);
5473        }
5474
5475        for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5476                if (reg & (1ull << i))
5477                        incr_cntr64(&dd->rcv_err_status_cnt[i]);
5478        }
5479}
5480
5481static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5482{
5483        char buf[96];
5484        int i = 0;
5485
5486        dd_dev_info(dd, "Misc Error: %s",
5487                    misc_err_status_string(buf, sizeof(buf), reg));
5488        for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5489                if (reg & (1ull << i))
5490                        incr_cntr64(&dd->misc_err_status_cnt[i]);
5491        }
5492}
5493
5494static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5495{
5496        char buf[96];
5497        int i = 0;
5498
5499        dd_dev_info(dd, "PIO Error: %s\n",
5500                    pio_err_status_string(buf, sizeof(buf), reg));
5501
5502        if (reg & ALL_PIO_FREEZE_ERR)
5503                start_freeze_handling(dd->pport, 0);
5504
5505        for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5506                if (reg & (1ull << i))
5507                        incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5508        }
5509}
5510
5511static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5512{
5513        char buf[96];
5514        int i = 0;
5515
5516        dd_dev_info(dd, "SDMA Error: %s\n",
5517                    sdma_err_status_string(buf, sizeof(buf), reg));
5518
5519        if (reg & ALL_SDMA_FREEZE_ERR)
5520                start_freeze_handling(dd->pport, 0);
5521
5522        for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5523                if (reg & (1ull << i))
5524                        incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5525        }
5526}
5527
5528static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5529{
5530        incr_cntr64(&ppd->port_xmit_discards);
5531}
5532
5533static void count_port_inactive(struct hfi1_devdata *dd)
5534{
5535        __count_port_discards(dd->pport);
5536}
5537
5538/*
5539 * We have had a "disallowed packet" error during egress. Determine the
5540 * integrity check which failed, and update relevant error counter, etc.
5541 *
5542 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5543 * bit of state per integrity check, and so we can miss the reason for an
5544 * egress error if more than one packet fails the same integrity check
5545 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5546 */
5547static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5548                                        int vl)
5549{
5550        struct hfi1_pportdata *ppd = dd->pport;
5551        u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5552        u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5553        char buf[96];
5554
5555        /* clear down all observed info as quickly as possible after read */
5556        write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5557
5558        dd_dev_info(dd,
5559                    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5560                    info, egress_err_info_string(buf, sizeof(buf), info), src);
5561
5562        /* Eventually add other counters for each bit */
5563        if (info & PORT_DISCARD_EGRESS_ERRS) {
5564                int weight, i;
5565
5566                /*
5567                 * Count all applicable bits as individual errors and
5568                 * attribute them to the packet that triggered this handler.
5569                 * This may not be completely accurate due to limitations
5570                 * on the available hardware error information.  There is
5571                 * a single information register and any number of error
5572                 * packets may have occurred and contributed to it before
5573                 * this routine is called.  This means that:
5574                 * a) If multiple packets with the same error occur before
5575                 *    this routine is called, earlier packets are missed.
5576                 *    There is only a single bit for each error type.
5577                 * b) Errors may not be attributed to the correct VL.
5578                 *    The driver is attributing all bits in the info register
5579                 *    to the packet that triggered this call, but bits
5580                 *    could be an accumulation of different packets with
5581                 *    different VLs.
5582                 * c) A single error packet may have multiple counts attached
5583                 *    to it.  There is no way for the driver to know if
5584                 *    multiple bits set in the info register are due to a
5585                 *    single packet or multiple packets.  The driver assumes
5586                 *    multiple packets.
5587                 */
5588                weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5589                for (i = 0; i < weight; i++) {
5590                        __count_port_discards(ppd);
5591                        if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5592                                incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5593                        else if (vl == 15)
5594                                incr_cntr64(&ppd->port_xmit_discards_vl
5595                                            [C_VL_15]);
5596                }
5597        }
5598}
5599
5600/*
5601 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5602 * register. Does it represent a 'port inactive' error?
5603 */
5604static inline int port_inactive_err(u64 posn)
5605{
5606        return (posn >= SEES(TX_LINKDOWN) &&
5607                posn <= SEES(TX_INCORRECT_LINK_STATE));
5608}
5609
5610/*
5611 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5612 * register. Does it represent a 'disallowed packet' error?
5613 */
5614static inline int disallowed_pkt_err(int posn)
5615{
5616        return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5617                posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5618}
5619
5620/*
5621 * Input value is a bit position of one of the SDMA engine disallowed
5622 * packet errors.  Return which engine.  Use of this must be guarded by
5623 * disallowed_pkt_err().
5624 */
5625static inline int disallowed_pkt_engine(int posn)
5626{
5627        return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5628}
5629
5630/*
5631 * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5632 * be done.
5633 */
5634static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5635{
5636        struct sdma_vl_map *m;
5637        int vl;
5638
5639        /* range check */
5640        if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5641                return -1;
5642
5643        rcu_read_lock();
5644        m = rcu_dereference(dd->sdma_map);
5645        vl = m->engine_to_vl[engine];
5646        rcu_read_unlock();
5647
5648        return vl;
5649}
5650
5651/*
5652 * Translate the send context (sofware index) into a VL.  Return -1 if the
5653 * translation cannot be done.
5654 */
5655static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5656{
5657        struct send_context_info *sci;
5658        struct send_context *sc;
5659        int i;
5660
5661        sci = &dd->send_contexts[sw_index];
5662
5663        /* there is no information for user (PSM) and ack contexts */
5664        if (sci->type != SC_KERNEL)
5665                return -1;
5666
5667        sc = sci->sc;
5668        if (!sc)
5669                return -1;
5670        if (dd->vld[15].sc == sc)
5671                return 15;
5672        for (i = 0; i < num_vls; i++)
5673                if (dd->vld[i].sc == sc)
5674                        return i;
5675
5676        return -1;
5677}
5678
5679static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5680{
5681        u64 reg_copy = reg, handled = 0;
5682        char buf[96];
5683        int i = 0;
5684
5685        if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5686                start_freeze_handling(dd->pport, 0);
5687        else if (is_ax(dd) &&
5688                 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5689                 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5690                start_freeze_handling(dd->pport, 0);
5691
5692        while (reg_copy) {
5693                int posn = fls64(reg_copy);
5694                /* fls64() returns a 1-based offset, we want it zero based */
5695                int shift = posn - 1;
5696                u64 mask = 1ULL << shift;
5697
5698                if (port_inactive_err(shift)) {
5699                        count_port_inactive(dd);
5700                        handled |= mask;
5701                } else if (disallowed_pkt_err(shift)) {
5702                        int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5703
5704                        handle_send_egress_err_info(dd, vl);
5705                        handled |= mask;
5706                }
5707                reg_copy &= ~mask;
5708        }
5709
5710        reg &= ~handled;
5711
5712        if (reg)
5713                dd_dev_info(dd, "Egress Error: %s\n",
5714                            egress_err_status_string(buf, sizeof(buf), reg));
5715
5716        for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5717                if (reg & (1ull << i))
5718                        incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5719        }
5720}
5721
5722static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5723{
5724        char buf[96];
5725        int i = 0;
5726
5727        dd_dev_info(dd, "Send Error: %s\n",
5728                    send_err_status_string(buf, sizeof(buf), reg));
5729
5730        for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5731                if (reg & (1ull << i))
5732                        incr_cntr64(&dd->send_err_status_cnt[i]);
5733        }
5734}
5735
5736/*
5737 * The maximum number of times the error clear down will loop before
5738 * blocking a repeating error.  This value is arbitrary.
5739 */
5740#define MAX_CLEAR_COUNT 20
5741
5742/*
5743 * Clear and handle an error register.  All error interrupts are funneled
5744 * through here to have a central location to correctly handle single-
5745 * or multi-shot errors.
5746 *
5747 * For non per-context registers, call this routine with a context value
5748 * of 0 so the per-context offset is zero.
5749 *
5750 * If the handler loops too many times, assume that something is wrong
5751 * and can't be fixed, so mask the error bits.
5752 */
5753static void interrupt_clear_down(struct hfi1_devdata *dd,
5754                                 u32 context,
5755                                 const struct err_reg_info *eri)
5756{
5757        u64 reg;
5758        u32 count;
5759
5760        /* read in a loop until no more errors are seen */
5761        count = 0;
5762        while (1) {
5763                reg = read_kctxt_csr(dd, context, eri->status);
5764                if (reg == 0)
5765                        break;
5766                write_kctxt_csr(dd, context, eri->clear, reg);
5767                if (likely(eri->handler))
5768                        eri->handler(dd, context, reg);
5769                count++;
5770                if (count > MAX_CLEAR_COUNT) {
5771                        u64 mask;
5772
5773                        dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5774                                   eri->desc, reg);
5775                        /*
5776                         * Read-modify-write so any other masked bits
5777                         * remain masked.
5778                         */
5779                        mask = read_kctxt_csr(dd, context, eri->mask);
5780                        mask &= ~reg;
5781                        write_kctxt_csr(dd, context, eri->mask, mask);
5782                        break;
5783                }
5784        }
5785}
5786
5787/*
5788 * CCE block "misc" interrupt.  Source is < 16.
5789 */
5790static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5791{
5792        const struct err_reg_info *eri = &misc_errs[source];
5793
5794        if (eri->handler) {
5795                interrupt_clear_down(dd, 0, eri);
5796        } else {
5797                dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5798                           source);
5799        }
5800}
5801
5802static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5803{
5804        return flag_string(buf, buf_len, flags,
5805                           sc_err_status_flags,
5806                           ARRAY_SIZE(sc_err_status_flags));
5807}
5808
5809/*
5810 * Send context error interrupt.  Source (hw_context) is < 160.
5811 *
5812 * All send context errors cause the send context to halt.  The normal
5813 * clear-down mechanism cannot be used because we cannot clear the
5814 * error bits until several other long-running items are done first.
5815 * This is OK because with the context halted, nothing else is going
5816 * to happen on it anyway.
5817 */
5818static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5819                                unsigned int hw_context)
5820{
5821        struct send_context_info *sci;
5822        struct send_context *sc;
5823        char flags[96];
5824        u64 status;
5825        u32 sw_index;
5826        int i = 0;
5827
5828        sw_index = dd->hw_to_sw[hw_context];
5829        if (sw_index >= dd->num_send_contexts) {
5830                dd_dev_err(dd,
5831                           "out of range sw index %u for send context %u\n",
5832                           sw_index, hw_context);
5833                return;
5834        }
5835        sci = &dd->send_contexts[sw_index];
5836        sc = sci->sc;
5837        if (!sc) {
5838                dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5839                           sw_index, hw_context);
5840                return;
5841        }
5842
5843        /* tell the software that a halt has begun */
5844        sc_stop(sc, SCF_HALTED);
5845
5846        status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5847
5848        dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5849                    send_context_err_status_string(flags, sizeof(flags),
5850                                                   status));
5851
5852        if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5853                handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5854
5855        /*
5856         * Automatically restart halted kernel contexts out of interrupt
5857         * context.  User contexts must ask the driver to restart the context.
5858         */
5859        if (sc->type != SC_USER)
5860                queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5861
5862        /*
5863         * Update the counters for the corresponding status bits.
5864         * Note that these particular counters are aggregated over all
5865         * 160 contexts.
5866         */
5867        for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5868                if (status & (1ull << i))
5869                        incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5870        }
5871}
5872
5873static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5874                                unsigned int source, u64 status)
5875{
5876        struct sdma_engine *sde;
5877        int i = 0;
5878
5879        sde = &dd->per_sdma[source];
5880#ifdef CONFIG_SDMA_VERBOSITY
5881        dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5882                   slashstrip(__FILE__), __LINE__, __func__);
5883        dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5884                   sde->this_idx, source, (unsigned long long)status);
5885#endif
5886        sde->err_cnt++;
5887        sdma_engine_error(sde, status);
5888
5889        /*
5890        * Update the counters for the corresponding status bits.
5891        * Note that these particular counters are aggregated over
5892        * all 16 DMA engines.
5893        */
5894        for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5895                if (status & (1ull << i))
5896                        incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5897        }
5898}
5899
5900/*
5901 * CCE block SDMA error interrupt.  Source is < 16.
5902 */
5903static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5904{
5905#ifdef CONFIG_SDMA_VERBOSITY
5906        struct sdma_engine *sde = &dd->per_sdma[source];
5907
5908        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5909                   slashstrip(__FILE__), __LINE__, __func__);
5910        dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5911                   source);
5912        sdma_dumpstate(sde);
5913#endif
5914        interrupt_clear_down(dd, source, &sdma_eng_err);
5915}
5916
5917/*
5918 * CCE block "various" interrupt.  Source is < 8.
5919 */
5920static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5921{
5922        const struct err_reg_info *eri = &various_err[source];
5923
5924        /*
5925         * TCritInt cannot go through interrupt_clear_down()
5926         * because it is not a second tier interrupt. The handler
5927         * should be called directly.
5928         */
5929        if (source == TCRIT_INT_SOURCE)
5930                handle_temp_err(dd);
5931        else if (eri->handler)
5932                interrupt_clear_down(dd, 0, eri);
5933        else
5934                dd_dev_info(dd,
5935                            "%s: Unimplemented/reserved interrupt %d\n",
5936                            __func__, source);
5937}
5938
5939static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5940{
5941        /* src_ctx is always zero */
5942        struct hfi1_pportdata *ppd = dd->pport;
5943        unsigned long flags;
5944        u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5945
5946        if (reg & QSFP_HFI0_MODPRST_N) {
5947                if (!qsfp_mod_present(ppd)) {
5948                        dd_dev_info(dd, "%s: QSFP module removed\n",
5949                                    __func__);
5950
5951                        ppd->driver_link_ready = 0;
5952                        /*
5953                         * Cable removed, reset all our information about the
5954                         * cache and cable capabilities
5955                         */
5956
5957                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5958                        /*
5959                         * We don't set cache_refresh_required here as we expect
5960                         * an interrupt when a cable is inserted
5961                         */
5962                        ppd->qsfp_info.cache_valid = 0;
5963                        ppd->qsfp_info.reset_needed = 0;
5964                        ppd->qsfp_info.limiting_active = 0;
5965                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5966                                               flags);
5967                        /* Invert the ModPresent pin now to detect plug-in */
5968                        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5969                                  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5970
5971                        if ((ppd->offline_disabled_reason >
5972                          HFI1_ODR_MASK(
5973                          OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
5974                          (ppd->offline_disabled_reason ==
5975                          HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5976                                ppd->offline_disabled_reason =
5977                                HFI1_ODR_MASK(
5978                                OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
5979
5980                        if (ppd->host_link_state == HLS_DN_POLL) {
5981                                /*
5982                                 * The link is still in POLL. This means
5983                                 * that the normal link down processing
5984                                 * will not happen. We have to do it here
5985                                 * before turning the DC off.
5986                                 */
5987                                queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5988                        }
5989                } else {
5990                        dd_dev_info(dd, "%s: QSFP module inserted\n",
5991                                    __func__);
5992
5993                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5994                        ppd->qsfp_info.cache_valid = 0;
5995                        ppd->qsfp_info.cache_refresh_required = 1;
5996                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5997                                               flags);
5998
5999                        /*
6000                         * Stop inversion of ModPresent pin to detect
6001                         * removal of the cable
6002                         */
6003                        qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6004                        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6005                                  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6006
6007                        ppd->offline_disabled_reason =
6008                                HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6009                }
6010        }
6011
6012        if (reg & QSFP_HFI0_INT_N) {
6013                dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6014                            __func__);
6015                spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6016                ppd->qsfp_info.check_interrupt_flags = 1;
6017                spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6018        }
6019
6020        /* Schedule the QSFP work only if there is a cable attached. */
6021        if (qsfp_mod_present(ppd))
6022                queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6023}
6024
6025static int request_host_lcb_access(struct hfi1_devdata *dd)
6026{
6027        int ret;
6028
6029        ret = do_8051_command(dd, HCMD_MISC,
6030                              (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6031                              LOAD_DATA_FIELD_ID_SHIFT, NULL);
6032        if (ret != HCMD_SUCCESS) {
6033                dd_dev_err(dd, "%s: command failed with error %d\n",
6034                           __func__, ret);
6035        }
6036        return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6037}
6038
6039static int request_8051_lcb_access(struct hfi1_devdata *dd)
6040{
6041        int ret;
6042
6043        ret = do_8051_command(dd, HCMD_MISC,
6044                              (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6045                              LOAD_DATA_FIELD_ID_SHIFT, NULL);
6046        if (ret != HCMD_SUCCESS) {
6047                dd_dev_err(dd, "%s: command failed with error %d\n",
6048                           __func__, ret);
6049        }
6050        return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6051}
6052
6053/*
6054 * Set the LCB selector - allow host access.  The DCC selector always
6055 * points to the host.
6056 */
6057static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6058{
6059        write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6060                  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6061                  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6062}
6063
6064/*
6065 * Clear the LCB selector - allow 8051 access.  The DCC selector always
6066 * points to the host.
6067 */
6068static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6069{
6070        write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6071                  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6072}
6073
6074/*
6075 * Acquire LCB access from the 8051.  If the host already has access,
6076 * just increment a counter.  Otherwise, inform the 8051 that the
6077 * host is taking access.
6078 *
6079 * Returns:
6080 *      0 on success
6081 *      -EBUSY if the 8051 has control and cannot be disturbed
6082 *      -errno if unable to acquire access from the 8051
6083 */
6084int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6085{
6086        struct hfi1_pportdata *ppd = dd->pport;
6087        int ret = 0;
6088
6089        /*
6090         * Use the host link state lock so the operation of this routine
6091         * { link state check, selector change, count increment } can occur
6092         * as a unit against a link state change.  Otherwise there is a
6093         * race between the state change and the count increment.
6094         */
6095        if (sleep_ok) {
6096                mutex_lock(&ppd->hls_lock);
6097        } else {
6098                while (!mutex_trylock(&ppd->hls_lock))
6099                        udelay(1);
6100        }
6101
6102        /* this access is valid only when the link is up */
6103        if ((ppd->host_link_state & HLS_UP) == 0) {
6104                dd_dev_info(dd, "%s: link state %s not up\n",
6105                            __func__, link_state_name(ppd->host_link_state));
6106                ret = -EBUSY;
6107                goto done;
6108        }
6109
6110        if (dd->lcb_access_count == 0) {
6111                ret = request_host_lcb_access(dd);
6112                if (ret) {
6113                        dd_dev_err(dd,
6114                                   "%s: unable to acquire LCB access, err %d\n",
6115                                   __func__, ret);
6116                        goto done;
6117                }
6118                set_host_lcb_access(dd);
6119        }
6120        dd->lcb_access_count++;
6121done:
6122        mutex_unlock(&ppd->hls_lock);
6123        return ret;
6124}
6125
6126/*
6127 * Release LCB access by decrementing the use count.  If the count is moving
6128 * from 1 to 0, inform 8051 that it has control back.
6129 *
6130 * Returns:
6131 *      0 on success
6132 *      -errno if unable to release access to the 8051
6133 */
6134int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6135{
6136        int ret = 0;
6137
6138        /*
6139         * Use the host link state lock because the acquire needed it.
6140         * Here, we only need to keep { selector change, count decrement }
6141         * as a unit.
6142         */
6143        if (sleep_ok) {
6144                mutex_lock(&dd->pport->hls_lock);
6145        } else {
6146                while (!mutex_trylock(&dd->pport->hls_lock))
6147                        udelay(1);
6148        }
6149
6150        if (dd->lcb_access_count == 0) {
6151                dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6152                           __func__);
6153                goto done;
6154        }
6155
6156        if (dd->lcb_access_count == 1) {
6157                set_8051_lcb_access(dd);
6158                ret = request_8051_lcb_access(dd);
6159                if (ret) {
6160                        dd_dev_err(dd,
6161                                   "%s: unable to release LCB access, err %d\n",
6162                                   __func__, ret);
6163                        /* restore host access if the grant didn't work */
6164                        set_host_lcb_access(dd);
6165                        goto done;
6166                }
6167        }
6168        dd->lcb_access_count--;
6169done:
6170        mutex_unlock(&dd->pport->hls_lock);
6171        return ret;
6172}
6173
6174/*
6175 * Initialize LCB access variables and state.  Called during driver load,
6176 * after most of the initialization is finished.
6177 *
6178 * The DC default is LCB access on for the host.  The driver defaults to
6179 * leaving access to the 8051.  Assign access now - this constrains the call
6180 * to this routine to be after all LCB set-up is done.  In particular, after
6181 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6182 */
6183static void init_lcb_access(struct hfi1_devdata *dd)
6184{
6185        dd->lcb_access_count = 0;
6186}
6187
6188/*
6189 * Write a response back to a 8051 request.
6190 */
6191static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6192{
6193        write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6194                  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6195                  (u64)return_code <<
6196                  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6197                  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6198}
6199
6200/*
6201 * Handle host requests from the 8051.
6202 *
6203 * This is a work-queue function outside of the interrupt.
6204 */
6205void handle_8051_request(struct work_struct *work)
6206{
6207        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6208                                                        dc_host_req_work);
6209        struct hfi1_devdata *dd = ppd->dd;
6210        u64 reg;
6211        u16 data = 0;
6212        u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6213        u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
6214
6215        reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6216        if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6217                return; /* no request */
6218
6219        /* zero out COMPLETED so the response is seen */
6220        write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6221
6222        /* extract request details */
6223        type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6224                        & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6225        data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6226                        & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6227
6228        switch (type) {
6229        case HREQ_LOAD_CONFIG:
6230        case HREQ_SAVE_CONFIG:
6231        case HREQ_READ_CONFIG:
6232        case HREQ_SET_TX_EQ_ABS:
6233        case HREQ_SET_TX_EQ_REL:
6234                dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6235                            type);
6236                hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6237                break;
6238
6239        case HREQ_ENABLE:
6240                lanes = data & 0xF;
6241                for (i = 0; lanes; lanes >>= 1, i++) {
6242                        if (!(lanes & 1))
6243                                continue;
6244                        if (data & 0x200) {
6245                                /* enable TX CDR */
6246                                if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6247                                    cache[QSFP_CDR_INFO_OFFS] & 0x80)
6248                                        cdr_ctrl_byte |= (1 << (i + 4));
6249                        } else {
6250                                /* disable TX CDR */
6251                                if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6252                                    cache[QSFP_CDR_INFO_OFFS] & 0x80)
6253                                        cdr_ctrl_byte &= ~(1 << (i + 4));
6254                        }
6255
6256                        if (data & 0x800) {
6257                                /* enable RX CDR */
6258                                if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6259                                    cache[QSFP_CDR_INFO_OFFS] & 0x40)
6260                                        cdr_ctrl_byte |= (1 << i);
6261                        } else {
6262                                /* disable RX CDR */
6263                                if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6264                                    cache[QSFP_CDR_INFO_OFFS] & 0x40)
6265                                        cdr_ctrl_byte &= ~(1 << i);
6266                        }
6267                }
6268                one_qsfp_write(ppd, dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6269                               &cdr_ctrl_byte, 1);
6270                hreq_response(dd, HREQ_SUCCESS, data);
6271                refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6272                break;
6273
6274        case HREQ_CONFIG_DONE:
6275                hreq_response(dd, HREQ_SUCCESS, 0);
6276                break;
6277
6278        case HREQ_INTERFACE_TEST:
6279                hreq_response(dd, HREQ_SUCCESS, data);
6280                break;
6281
6282        default:
6283                dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6284                hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6285                break;
6286        }
6287}
6288
6289static void write_global_credit(struct hfi1_devdata *dd,
6290                                u8 vau, u16 total, u16 shared)
6291{
6292        write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6293                  ((u64)total <<
6294                   SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6295                  ((u64)shared <<
6296                   SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6297                  ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6298}
6299
6300/*
6301 * Set up initial VL15 credits of the remote.  Assumes the rest of
6302 * the CM credit registers are zero from a previous global or credit reset .
6303 */
6304void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6305{
6306        /* leave shared count at zero for both global and VL15 */
6307        write_global_credit(dd, vau, vl15buf, 0);
6308
6309        /* We may need some credits for another VL when sending packets
6310         * with the snoop interface. Dividing it down the middle for VL15
6311         * and VL0 should suffice.
6312         */
6313        if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6314                write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6315                    << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6316                write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6317                    << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6318        } else {
6319                write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6320                        << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6321        }
6322}
6323
6324/*
6325 * Zero all credit details from the previous connection and
6326 * reset the CM manager's internal counters.
6327 */
6328void reset_link_credits(struct hfi1_devdata *dd)
6329{
6330        int i;
6331
6332        /* remove all previous VL credit limits */
6333        for (i = 0; i < TXE_NUM_DATA_VL; i++)
6334                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6335        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6336        write_global_credit(dd, 0, 0, 0);
6337        /* reset the CM block */
6338        pio_send_control(dd, PSC_CM_RESET);
6339}
6340
6341/* convert a vCU to a CU */
6342static u32 vcu_to_cu(u8 vcu)
6343{
6344        return 1 << vcu;
6345}
6346
6347/* convert a CU to a vCU */
6348static u8 cu_to_vcu(u32 cu)
6349{
6350        return ilog2(cu);
6351}
6352
6353/* convert a vAU to an AU */
6354static u32 vau_to_au(u8 vau)
6355{
6356        return 8 * (1 << vau);
6357}
6358
6359static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6360{
6361        ppd->sm_trap_qp = 0x0;
6362        ppd->sa_qp = 0x1;
6363}
6364
6365/*
6366 * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6367 */
6368static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6369{
6370        u64 reg;
6371
6372        /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6373        write_csr(dd, DC_LCB_CFG_RUN, 0);
6374        /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6375        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6376                  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6377        /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6378        dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6379        reg = read_csr(dd, DCC_CFG_RESET);
6380        write_csr(dd, DCC_CFG_RESET, reg |
6381                  (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6382                  (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6383        (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6384        if (!abort) {
6385                udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6386                write_csr(dd, DCC_CFG_RESET, reg);
6387                write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6388        }
6389}
6390
6391/*
6392 * This routine should be called after the link has been transitioned to
6393 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6394 * reset).
6395 *
6396 * The expectation is that the caller of this routine would have taken
6397 * care of properly transitioning the link into the correct state.
6398 */
6399static void dc_shutdown(struct hfi1_devdata *dd)
6400{
6401        unsigned long flags;
6402
6403        spin_lock_irqsave(&dd->dc8051_lock, flags);
6404        if (dd->dc_shutdown) {
6405                spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6406                return;
6407        }
6408        dd->dc_shutdown = 1;
6409        spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6410        /* Shutdown the LCB */
6411        lcb_shutdown(dd, 1);
6412        /*
6413         * Going to OFFLINE would have causes the 8051 to put the
6414         * SerDes into reset already. Just need to shut down the 8051,
6415         * itself.
6416         */
6417        write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6418}
6419
6420/*
6421 * Calling this after the DC has been brought out of reset should not
6422 * do any damage.
6423 */
6424static void dc_start(struct hfi1_devdata *dd)
6425{
6426        unsigned long flags;
6427        int ret;
6428
6429        spin_lock_irqsave(&dd->dc8051_lock, flags);
6430        if (!dd->dc_shutdown)
6431                goto done;
6432        spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6433        /* Take the 8051 out of reset */
6434        write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6435        /* Wait until 8051 is ready */
6436        ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6437        if (ret) {
6438                dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6439                           __func__);
6440        }
6441        /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6442        write_csr(dd, DCC_CFG_RESET, 0x10);
6443        /* lcb_shutdown() with abort=1 does not restore these */
6444        write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6445        spin_lock_irqsave(&dd->dc8051_lock, flags);
6446        dd->dc_shutdown = 0;
6447done:
6448        spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6449}
6450
6451/*
6452 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6453 */
6454static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6455{
6456        u64 rx_radr, tx_radr;
6457        u32 version;
6458
6459        if (dd->icode != ICODE_FPGA_EMULATION)
6460                return;
6461
6462        /*
6463         * These LCB defaults on emulator _s are good, nothing to do here:
6464         *      LCB_CFG_TX_FIFOS_RADR
6465         *      LCB_CFG_RX_FIFOS_RADR
6466         *      LCB_CFG_LN_DCLK
6467         *      LCB_CFG_IGNORE_LOST_RCLK
6468         */
6469        if (is_emulator_s(dd))
6470                return;
6471        /* else this is _p */
6472
6473        version = emulator_rev(dd);
6474        if (!is_ax(dd))
6475                version = 0x2d; /* all B0 use 0x2d or higher settings */
6476
6477        if (version <= 0x12) {
6478                /* release 0x12 and below */
6479
6480                /*
6481                 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6482                 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6483                 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6484                 */
6485                rx_radr =
6486                      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6487                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6488                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6489                /*
6490                 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6491                 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6492                 */
6493                tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6494        } else if (version <= 0x18) {
6495                /* release 0x13 up to 0x18 */
6496                /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6497                rx_radr =
6498                      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6499                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6500                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6501                tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6502        } else if (version == 0x19) {
6503                /* release 0x19 */
6504                /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6505                rx_radr =
6506                      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6507                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6508                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6509                tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6510        } else if (version == 0x1a) {
6511                /* release 0x1a */
6512                /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6513                rx_radr =
6514                      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6515                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6516                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6517                tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6518                write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6519        } else {
6520                /* release 0x1b and higher */
6521                /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6522                rx_radr =
6523                      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6524                    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6525                    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6526                tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6527        }
6528
6529        write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6530        /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6531        write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6532                  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6533        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6534}
6535
6536/*
6537 * Handle a SMA idle message
6538 *
6539 * This is a work-queue function outside of the interrupt.
6540 */
6541void handle_sma_message(struct work_struct *work)
6542{
6543        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6544                                                        sma_message_work);
6545        struct hfi1_devdata *dd = ppd->dd;
6546        u64 msg;
6547        int ret;
6548
6549        /*
6550         * msg is bytes 1-4 of the 40-bit idle message - the command code
6551         * is stripped off
6552         */
6553        ret = read_idle_sma(dd, &msg);
6554        if (ret)
6555                return;
6556        dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6557        /*
6558         * React to the SMA message.  Byte[1] (0 for us) is the command.
6559         */
6560        switch (msg & 0xff) {
6561        case SMA_IDLE_ARM:
6562                /*
6563                 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6564                 * State Transitions
6565                 *
6566                 * Only expected in INIT or ARMED, discard otherwise.
6567                 */
6568                if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6569                        ppd->neighbor_normal = 1;
6570                break;
6571        case SMA_IDLE_ACTIVE:
6572                /*
6573                 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6574                 * State Transitions
6575                 *
6576                 * Can activate the node.  Discard otherwise.
6577                 */
6578                if (ppd->host_link_state == HLS_UP_ARMED &&
6579                    ppd->is_active_optimize_enabled) {
6580                        ppd->neighbor_normal = 1;
6581                        ret = set_link_state(ppd, HLS_UP_ACTIVE);
6582                        if (ret)
6583                                dd_dev_err(
6584                                        dd,
6585                                        "%s: received Active SMA idle message, couldn't set link to Active\n",
6586                                        __func__);
6587                }
6588                break;
6589        default:
6590                dd_dev_err(dd,
6591                           "%s: received unexpected SMA idle message 0x%llx\n",
6592                           __func__, msg);
6593                break;
6594        }
6595}
6596
6597static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6598{
6599        u64 rcvctrl;
6600        unsigned long flags;
6601
6602        spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6603        rcvctrl = read_csr(dd, RCV_CTRL);
6604        rcvctrl |= add;
6605        rcvctrl &= ~clear;
6606        write_csr(dd, RCV_CTRL, rcvctrl);
6607        spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6608}
6609
6610static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6611{
6612        adjust_rcvctrl(dd, add, 0);
6613}
6614
6615static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6616{
6617        adjust_rcvctrl(dd, 0, clear);
6618}
6619
6620/*
6621 * Called from all interrupt handlers to start handling an SPC freeze.
6622 */
6623void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6624{
6625        struct hfi1_devdata *dd = ppd->dd;
6626        struct send_context *sc;
6627        int i;
6628
6629        if (flags & FREEZE_SELF)
6630                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6631
6632        /* enter frozen mode */
6633        dd->flags |= HFI1_FROZEN;
6634
6635        /* notify all SDMA engines that they are going into a freeze */
6636        sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6637
6638        /* do halt pre-handling on all enabled send contexts */
6639        for (i = 0; i < dd->num_send_contexts; i++) {
6640                sc = dd->send_contexts[i].sc;
6641                if (sc && (sc->flags & SCF_ENABLED))
6642                        sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6643        }
6644
6645        /* Send context are frozen. Notify user space */
6646        hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6647
6648        if (flags & FREEZE_ABORT) {
6649                dd_dev_err(dd,
6650                           "Aborted freeze recovery. Please REBOOT system\n");
6651                return;
6652        }
6653        /* queue non-interrupt handler */
6654        queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6655}
6656
6657/*
6658 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6659 * depending on the "freeze" parameter.
6660 *
6661 * No need to return an error if it times out, our only option
6662 * is to proceed anyway.
6663 */
6664static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6665{
6666        unsigned long timeout;
6667        u64 reg;
6668
6669        timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6670        while (1) {
6671                reg = read_csr(dd, CCE_STATUS);
6672                if (freeze) {
6673                        /* waiting until all indicators are set */
6674                        if ((reg & ALL_FROZE) == ALL_FROZE)
6675                                return; /* all done */
6676                } else {
6677                        /* waiting until all indicators are clear */
6678                        if ((reg & ALL_FROZE) == 0)
6679                                return; /* all done */
6680                }
6681
6682                if (time_after(jiffies, timeout)) {
6683                        dd_dev_err(dd,
6684                                   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6685                                   freeze ? "" : "un", reg & ALL_FROZE,
6686                                   freeze ? ALL_FROZE : 0ull);
6687                        return;
6688                }
6689                usleep_range(80, 120);
6690        }
6691}
6692
6693/*
6694 * Do all freeze handling for the RXE block.
6695 */
6696static void rxe_freeze(struct hfi1_devdata *dd)
6697{
6698        int i;
6699
6700        /* disable port */
6701        clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6702
6703        /* disable all receive contexts */
6704        for (i = 0; i < dd->num_rcv_contexts; i++)
6705                hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6706}
6707
6708/*
6709 * Unfreeze handling for the RXE block - kernel contexts only.
6710 * This will also enable the port.  User contexts will do unfreeze
6711 * handling on a per-context basis as they call into the driver.
6712 *
6713 */
6714static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6715{
6716        u32 rcvmask;
6717        int i;
6718
6719        /* enable all kernel contexts */
6720        for (i = 0; i < dd->n_krcv_queues; i++) {
6721                rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6722                /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6723                rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6724                        HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6725                hfi1_rcvctrl(dd, rcvmask, i);
6726        }
6727
6728        /* enable port */
6729        add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6730}
6731
6732/*
6733 * Non-interrupt SPC freeze handling.
6734 *
6735 * This is a work-queue function outside of the triggering interrupt.
6736 */
6737void handle_freeze(struct work_struct *work)
6738{
6739        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6740                                                                freeze_work);
6741        struct hfi1_devdata *dd = ppd->dd;
6742
6743        /* wait for freeze indicators on all affected blocks */
6744        wait_for_freeze_status(dd, 1);
6745
6746        /* SPC is now frozen */
6747
6748        /* do send PIO freeze steps */
6749        pio_freeze(dd);
6750
6751        /* do send DMA freeze steps */
6752        sdma_freeze(dd);
6753
6754        /* do send egress freeze steps - nothing to do */
6755
6756        /* do receive freeze steps */
6757        rxe_freeze(dd);
6758
6759        /*
6760         * Unfreeze the hardware - clear the freeze, wait for each
6761         * block's frozen bit to clear, then clear the frozen flag.
6762         */
6763        write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6764        wait_for_freeze_status(dd, 0);
6765
6766        if (is_ax(dd)) {
6767                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6768                wait_for_freeze_status(dd, 1);
6769                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6770                wait_for_freeze_status(dd, 0);
6771        }
6772
6773        /* do send PIO unfreeze steps for kernel contexts */
6774        pio_kernel_unfreeze(dd);
6775
6776        /* do send DMA unfreeze steps */
6777        sdma_unfreeze(dd);
6778
6779        /* do send egress unfreeze steps - nothing to do */
6780
6781        /* do receive unfreeze steps for kernel contexts */
6782        rxe_kernel_unfreeze(dd);
6783
6784        /*
6785         * The unfreeze procedure touches global device registers when
6786         * it disables and re-enables RXE. Mark the device unfrozen
6787         * after all that is done so other parts of the driver waiting
6788         * for the device to unfreeze don't do things out of order.
6789         *
6790         * The above implies that the meaning of HFI1_FROZEN flag is
6791         * "Device has gone into freeze mode and freeze mode handling
6792         * is still in progress."
6793         *
6794         * The flag will be removed when freeze mode processing has
6795         * completed.
6796         */
6797        dd->flags &= ~HFI1_FROZEN;
6798        wake_up(&dd->event_queue);
6799
6800        /* no longer frozen */
6801}
6802
6803/*
6804 * Handle a link up interrupt from the 8051.
6805 *
6806 * This is a work-queue function outside of the interrupt.
6807 */
6808void handle_link_up(struct work_struct *work)
6809{
6810        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6811                                                  link_up_work);
6812        set_link_state(ppd, HLS_UP_INIT);
6813
6814        /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6815        read_ltp_rtt(ppd->dd);
6816        /*
6817         * OPA specifies that certain counters are cleared on a transition
6818         * to link up, so do that.
6819         */
6820        clear_linkup_counters(ppd->dd);
6821        /*
6822         * And (re)set link up default values.
6823         */
6824        set_linkup_defaults(ppd);
6825
6826        /* enforce link speed enabled */
6827        if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6828                /* oops - current speed is not enabled, bounce */
6829                dd_dev_err(ppd->dd,
6830                           "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6831                           ppd->link_speed_active, ppd->link_speed_enabled);
6832                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6833                                     OPA_LINKDOWN_REASON_SPEED_POLICY);
6834                set_link_state(ppd, HLS_DN_OFFLINE);
6835                tune_serdes(ppd);
6836                start_link(ppd);
6837        }
6838}
6839
6840/*
6841 * Several pieces of LNI information were cached for SMA in ppd.
6842 * Reset these on link down
6843 */
6844static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6845{
6846        ppd->neighbor_guid = 0;
6847        ppd->neighbor_port_number = 0;
6848        ppd->neighbor_type = 0;
6849        ppd->neighbor_fm_security = 0;
6850}
6851
6852/*
6853 * Handle a link down interrupt from the 8051.
6854 *
6855 * This is a work-queue function outside of the interrupt.
6856 */
6857void handle_link_down(struct work_struct *work)
6858{
6859        u8 lcl_reason, neigh_reason = 0;
6860        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6861                                                                link_down_work);
6862
6863        if ((ppd->host_link_state &
6864             (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6865             ppd->port_type == PORT_TYPE_FIXED)
6866                ppd->offline_disabled_reason =
6867                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6868
6869        /* Go offline first, then deal with reading/writing through 8051 */
6870        set_link_state(ppd, HLS_DN_OFFLINE);
6871
6872        lcl_reason = 0;
6873        read_planned_down_reason_code(ppd->dd, &neigh_reason);
6874
6875        /*
6876         * If no reason, assume peer-initiated but missed
6877         * LinkGoingDown idle flits.
6878         */
6879        if (neigh_reason == 0)
6880                lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6881
6882        set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6883
6884        reset_neighbor_info(ppd);
6885
6886        /* disable the port */
6887        clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6888
6889        /*
6890         * If there is no cable attached, turn the DC off. Otherwise,
6891         * start the link bring up.
6892         */
6893        if (!qsfp_mod_present(ppd)) {
6894                dc_shutdown(ppd->dd);
6895        } else {
6896                tune_serdes(ppd);
6897                start_link(ppd);
6898        }
6899}
6900
6901void handle_link_bounce(struct work_struct *work)
6902{
6903        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6904                                                        link_bounce_work);
6905
6906        /*
6907         * Only do something if the link is currently up.
6908         */
6909        if (ppd->host_link_state & HLS_UP) {
6910                set_link_state(ppd, HLS_DN_OFFLINE);
6911                tune_serdes(ppd);
6912                start_link(ppd);
6913        } else {
6914                dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6915                            __func__, link_state_name(ppd->host_link_state));
6916        }
6917}
6918
6919/*
6920 * Mask conversion: Capability exchange to Port LTP.  The capability
6921 * exchange has an implicit 16b CRC that is mandatory.
6922 */
6923static int cap_to_port_ltp(int cap)
6924{
6925        int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6926
6927        if (cap & CAP_CRC_14B)
6928                port_ltp |= PORT_LTP_CRC_MODE_14;
6929        if (cap & CAP_CRC_48B)
6930                port_ltp |= PORT_LTP_CRC_MODE_48;
6931        if (cap & CAP_CRC_12B_16B_PER_LANE)
6932                port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6933
6934        return port_ltp;
6935}
6936
6937/*
6938 * Convert an OPA Port LTP mask to capability mask
6939 */
6940int port_ltp_to_cap(int port_ltp)
6941{
6942        int cap_mask = 0;
6943
6944        if (port_ltp & PORT_LTP_CRC_MODE_14)
6945                cap_mask |= CAP_CRC_14B;
6946        if (port_ltp & PORT_LTP_CRC_MODE_48)
6947                cap_mask |= CAP_CRC_48B;
6948        if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6949                cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6950
6951        return cap_mask;
6952}
6953
6954/*
6955 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6956 */
6957static int lcb_to_port_ltp(int lcb_crc)
6958{
6959        int port_ltp = 0;
6960
6961        if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6962                port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6963        else if (lcb_crc == LCB_CRC_48B)
6964                port_ltp = PORT_LTP_CRC_MODE_48;
6965        else if (lcb_crc == LCB_CRC_14B)
6966                port_ltp = PORT_LTP_CRC_MODE_14;
6967        else
6968                port_ltp = PORT_LTP_CRC_MODE_16;
6969
6970        return port_ltp;
6971}
6972
6973/*
6974 * Our neighbor has indicated that we are allowed to act as a fabric
6975 * manager, so place the full management partition key in the second
6976 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6977 * that we should already have the limited management partition key in
6978 * array element 1, and also that the port is not yet up when
6979 * add_full_mgmt_pkey() is invoked.
6980 */
6981static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6982{
6983        struct hfi1_devdata *dd = ppd->dd;
6984
6985        /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6986        if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6987                dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6988                            __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
6989        ppd->pkeys[2] = FULL_MGMT_P_KEY;
6990        (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6991}
6992
6993/*
6994 * Convert the given link width to the OPA link width bitmask.
6995 */
6996static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6997{
6998        switch (width) {
6999        case 0:
7000                /*
7001                 * Simulator and quick linkup do not set the width.
7002                 * Just set it to 4x without complaint.
7003                 */
7004                if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7005                        return OPA_LINK_WIDTH_4X;
7006                return 0; /* no lanes up */
7007        case 1: return OPA_LINK_WIDTH_1X;
7008        case 2: return OPA_LINK_WIDTH_2X;
7009        case 3: return OPA_LINK_WIDTH_3X;
7010        default:
7011                dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7012                            __func__, width);
7013                /* fall through */
7014        case 4: return OPA_LINK_WIDTH_4X;
7015        }
7016}
7017
7018/*
7019 * Do a population count on the bottom nibble.
7020 */
7021static const u8 bit_counts[16] = {
7022        0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7023};
7024
7025static inline u8 nibble_to_count(u8 nibble)
7026{
7027        return bit_counts[nibble & 0xf];
7028}
7029
7030/*
7031 * Read the active lane information from the 8051 registers and return
7032 * their widths.
7033 *
7034 * Active lane information is found in these 8051 registers:
7035 *      enable_lane_tx
7036 *      enable_lane_rx
7037 */
7038static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7039                            u16 *rx_width)
7040{
7041        u16 tx, rx;
7042        u8 enable_lane_rx;
7043        u8 enable_lane_tx;
7044        u8 tx_polarity_inversion;
7045        u8 rx_polarity_inversion;
7046        u8 max_rate;
7047
7048        /* read the active lanes */
7049        read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7050                         &rx_polarity_inversion, &max_rate);
7051        read_local_lni(dd, &enable_lane_rx);
7052
7053        /* convert to counts */
7054        tx = nibble_to_count(enable_lane_tx);
7055        rx = nibble_to_count(enable_lane_rx);
7056
7057        /*
7058         * Set link_speed_active here, overriding what was set in
7059         * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7060         * set the max_rate field in handle_verify_cap until v0.19.
7061         */
7062        if ((dd->icode == ICODE_RTL_SILICON) &&
7063            (dd->dc8051_ver < dc8051_ver(0, 19))) {
7064                /* max_rate: 0 = 12.5G, 1 = 25G */
7065                switch (max_rate) {
7066                case 0:
7067                        dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7068                        break;
7069                default:
7070                        dd_dev_err(dd,
7071                                   "%s: unexpected max rate %d, using 25Gb\n",
7072                                   __func__, (int)max_rate);
7073                        /* fall through */
7074                case 1:
7075                        dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7076                        break;
7077                }
7078        }
7079
7080        dd_dev_info(dd,
7081                    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7082                    enable_lane_tx, tx, enable_lane_rx, rx);
7083        *tx_width = link_width_to_bits(dd, tx);
7084        *rx_width = link_width_to_bits(dd, rx);
7085}
7086
7087/*
7088 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7089 * Valid after the end of VerifyCap and during LinkUp.  Does not change
7090 * after link up.  I.e. look elsewhere for downgrade information.
7091 *
7092 * Bits are:
7093 *      + bits [7:4] contain the number of active transmitters
7094 *      + bits [3:0] contain the number of active receivers
7095 * These are numbers 1 through 4 and can be different values if the
7096 * link is asymmetric.
7097 *
7098 * verify_cap_local_fm_link_width[0] retains its original value.
7099 */
7100static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7101                              u16 *rx_width)
7102{
7103        u16 widths, tx, rx;
7104        u8 misc_bits, local_flags;
7105        u16 active_tx, active_rx;
7106
7107        read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7108        tx = widths >> 12;
7109        rx = (widths >> 8) & 0xf;
7110
7111        *tx_width = link_width_to_bits(dd, tx);
7112        *rx_width = link_width_to_bits(dd, rx);
7113
7114        /* print the active widths */
7115        get_link_widths(dd, &active_tx, &active_rx);
7116}
7117
7118/*
7119 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7120 * hardware information when the link first comes up.
7121 *
7122 * The link width is not available until after VerifyCap.AllFramesReceived
7123 * (the trigger for handle_verify_cap), so this is outside that routine
7124 * and should be called when the 8051 signals linkup.
7125 */
7126void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7127{
7128        u16 tx_width, rx_width;
7129
7130        /* get end-of-LNI link widths */
7131        get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7132
7133        /* use tx_width as the link is supposed to be symmetric on link up */
7134        ppd->link_width_active = tx_width;
7135        /* link width downgrade active (LWD.A) starts out matching LW.A */
7136        ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7137        ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7138        /* per OPA spec, on link up LWD.E resets to LWD.S */
7139        ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7140        /* cache the active egress rate (units {10^6 bits/sec]) */
7141        ppd->current_egress_rate = active_egress_rate(ppd);
7142}
7143
7144/*
7145 * Handle a verify capabilities interrupt from the 8051.
7146 *
7147 * This is a work-queue function outside of the interrupt.
7148 */
7149void handle_verify_cap(struct work_struct *work)
7150{
7151        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7152                                                                link_vc_work);
7153        struct hfi1_devdata *dd = ppd->dd;
7154        u64 reg;
7155        u8 power_management;
7156        u8 continious;
7157        u8 vcu;
7158        u8 vau;
7159        u8 z;
7160        u16 vl15buf;
7161        u16 link_widths;
7162        u16 crc_mask;
7163        u16 crc_val;
7164        u16 device_id;
7165        u16 active_tx, active_rx;
7166        u8 partner_supported_crc;
7167        u8 remote_tx_rate;
7168        u8 device_rev;
7169
7170        set_link_state(ppd, HLS_VERIFY_CAP);
7171
7172        lcb_shutdown(dd, 0);
7173        adjust_lcb_for_fpga_serdes(dd);
7174
7175        /*
7176         * These are now valid:
7177         *      remote VerifyCap fields in the general LNI config
7178         *      CSR DC8051_STS_REMOTE_GUID
7179         *      CSR DC8051_STS_REMOTE_NODE_TYPE
7180         *      CSR DC8051_STS_REMOTE_FM_SECURITY
7181         *      CSR DC8051_STS_REMOTE_PORT_NO
7182         */
7183
7184        read_vc_remote_phy(dd, &power_management, &continious);
7185        read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7186                              &partner_supported_crc);
7187        read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7188        read_remote_device_id(dd, &device_id, &device_rev);
7189        /*
7190         * And the 'MgmtAllowed' information, which is exchanged during
7191         * LNI, is also be available at this point.
7192         */
7193        read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7194        /* print the active widths */
7195        get_link_widths(dd, &active_tx, &active_rx);
7196        dd_dev_info(dd,
7197                    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7198                    (int)power_management, (int)continious);
7199        dd_dev_info(dd,
7200                    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7201                    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7202                    (int)partner_supported_crc);
7203        dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7204                    (u32)remote_tx_rate, (u32)link_widths);
7205        dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7206                    (u32)device_id, (u32)device_rev);
7207        /*
7208         * The peer vAU value just read is the peer receiver value.  HFI does
7209         * not support a transmit vAU of 0 (AU == 8).  We advertised that
7210         * with Z=1 in the fabric capabilities sent to the peer.  The peer
7211         * will see our Z=1, and, if it advertised a vAU of 0, will move its
7212         * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7213         * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7214         * subject to the Z value exception.
7215         */
7216        if (vau == 0)
7217                vau = 1;
7218        set_up_vl15(dd, vau, vl15buf);
7219
7220        /* set up the LCB CRC mode */
7221        crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7222
7223        /* order is important: use the lowest bit in common */
7224        if (crc_mask & CAP_CRC_14B)
7225                crc_val = LCB_CRC_14B;
7226        else if (crc_mask & CAP_CRC_48B)
7227                crc_val = LCB_CRC_48B;
7228        else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7229                crc_val = LCB_CRC_12B_16B_PER_LANE;
7230        else
7231                crc_val = LCB_CRC_16B;
7232
7233        dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7234        write_csr(dd, DC_LCB_CFG_CRC_MODE,
7235                  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7236
7237        /* set (14b only) or clear sideband credit */
7238        reg = read_csr(dd, SEND_CM_CTRL);
7239        if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7240                write_csr(dd, SEND_CM_CTRL,
7241                          reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7242        } else {
7243                write_csr(dd, SEND_CM_CTRL,
7244                          reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7245        }
7246
7247        ppd->link_speed_active = 0;     /* invalid value */
7248        if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7249                /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7250                switch (remote_tx_rate) {
7251                case 0:
7252                        ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7253                        break;
7254                case 1:
7255                        ppd->link_speed_active = OPA_LINK_SPEED_25G;
7256                        break;
7257                }
7258        } else {
7259                /* actual rate is highest bit of the ANDed rates */
7260                u8 rate = remote_tx_rate & ppd->local_tx_rate;
7261
7262                if (rate & 2)
7263                        ppd->link_speed_active = OPA_LINK_SPEED_25G;
7264                else if (rate & 1)
7265                        ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7266        }
7267        if (ppd->link_speed_active == 0) {
7268                dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7269                           __func__, (int)remote_tx_rate);
7270                ppd->link_speed_active = OPA_LINK_SPEED_25G;
7271        }
7272
7273        /*
7274         * Cache the values of the supported, enabled, and active
7275         * LTP CRC modes to return in 'portinfo' queries. But the bit
7276         * flags that are returned in the portinfo query differ from
7277         * what's in the link_crc_mask, crc_sizes, and crc_val
7278         * variables. Convert these here.
7279         */
7280        ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7281                /* supported crc modes */
7282        ppd->port_ltp_crc_mode |=
7283                cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7284                /* enabled crc modes */
7285        ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7286                /* active crc mode */
7287
7288        /* set up the remote credit return table */
7289        assign_remote_cm_au_table(dd, vcu);
7290
7291        /*
7292         * The LCB is reset on entry to handle_verify_cap(), so this must
7293         * be applied on every link up.
7294         *
7295         * Adjust LCB error kill enable to kill the link if
7296         * these RBUF errors are seen:
7297         *      REPLAY_BUF_MBE_SMASK
7298         *      FLIT_INPUT_BUF_MBE_SMASK
7299         */
7300        if (is_ax(dd)) {                        /* fixed in B0 */
7301                reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7302                reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7303                        | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7304                write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7305        }
7306
7307        /* pull LCB fifos out of reset - all fifo clocks must be stable */
7308        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7309
7310        /* give 8051 access to the LCB CSRs */
7311        write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7312        set_8051_lcb_access(dd);
7313
7314        ppd->neighbor_guid =
7315                read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7316        ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7317                                        DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7318        ppd->neighbor_type =
7319                read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7320                DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7321        ppd->neighbor_fm_security =
7322                read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7323                DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7324        dd_dev_info(dd,
7325                    "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7326                    ppd->neighbor_guid, ppd->neighbor_type,
7327                    ppd->mgmt_allowed, ppd->neighbor_fm_security);
7328        if (ppd->mgmt_allowed)
7329                add_full_mgmt_pkey(ppd);
7330
7331        /* tell the 8051 to go to LinkUp */
7332        set_link_state(ppd, HLS_GOING_UP);
7333}
7334
7335/*
7336 * Apply the link width downgrade enabled policy against the current active
7337 * link widths.
7338 *
7339 * Called when the enabled policy changes or the active link widths change.
7340 */
7341void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7342{
7343        int do_bounce = 0;
7344        int tries;
7345        u16 lwde;
7346        u16 tx, rx;
7347
7348        /* use the hls lock to avoid a race with actual link up */
7349        tries = 0;
7350retry:
7351        mutex_lock(&ppd->hls_lock);
7352        /* only apply if the link is up */
7353        if (!(ppd->host_link_state & HLS_UP)) {
7354                /* still going up..wait and retry */
7355                if (ppd->host_link_state & HLS_GOING_UP) {
7356                        if (++tries < 1000) {
7357                                mutex_unlock(&ppd->hls_lock);
7358                                usleep_range(100, 120); /* arbitrary */
7359                                goto retry;
7360                        }
7361                        dd_dev_err(ppd->dd,
7362                                   "%s: giving up waiting for link state change\n",
7363                                   __func__);
7364                }
7365                goto done;
7366        }
7367
7368        lwde = ppd->link_width_downgrade_enabled;
7369
7370        if (refresh_widths) {
7371                get_link_widths(ppd->dd, &tx, &rx);
7372                ppd->link_width_downgrade_tx_active = tx;
7373                ppd->link_width_downgrade_rx_active = rx;
7374        }
7375
7376        if (lwde == 0) {
7377                /* downgrade is disabled */
7378
7379                /* bounce if not at starting active width */
7380                if ((ppd->link_width_active !=
7381                     ppd->link_width_downgrade_tx_active) ||
7382                    (ppd->link_width_active !=
7383                     ppd->link_width_downgrade_rx_active)) {
7384                        dd_dev_err(ppd->dd,
7385                                   "Link downgrade is disabled and link has downgraded, downing link\n");
7386                        dd_dev_err(ppd->dd,
7387                                   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7388                                   ppd->link_width_active,
7389                                   ppd->link_width_downgrade_tx_active,
7390                                   ppd->link_width_downgrade_rx_active);
7391                        do_bounce = 1;
7392                }
7393        } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7394                   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7395                /* Tx or Rx is outside the enabled policy */
7396                dd_dev_err(ppd->dd,
7397                           "Link is outside of downgrade allowed, downing link\n");
7398                dd_dev_err(ppd->dd,
7399                           "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7400                           lwde, ppd->link_width_downgrade_tx_active,
7401                           ppd->link_width_downgrade_rx_active);
7402                do_bounce = 1;
7403        }
7404
7405done:
7406        mutex_unlock(&ppd->hls_lock);
7407
7408        if (do_bounce) {
7409                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7410                                     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7411                set_link_state(ppd, HLS_DN_OFFLINE);
7412                tune_serdes(ppd);
7413                start_link(ppd);
7414        }
7415}
7416
7417/*
7418 * Handle a link downgrade interrupt from the 8051.
7419 *
7420 * This is a work-queue function outside of the interrupt.
7421 */
7422void handle_link_downgrade(struct work_struct *work)
7423{
7424        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7425                                                        link_downgrade_work);
7426
7427        dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7428        apply_link_downgrade_policy(ppd, 1);
7429}
7430
7431static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7432{
7433        return flag_string(buf, buf_len, flags, dcc_err_flags,
7434                ARRAY_SIZE(dcc_err_flags));
7435}
7436
7437static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7438{
7439        return flag_string(buf, buf_len, flags, lcb_err_flags,
7440                ARRAY_SIZE(lcb_err_flags));
7441}
7442
7443static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7444{
7445        return flag_string(buf, buf_len, flags, dc8051_err_flags,
7446                ARRAY_SIZE(dc8051_err_flags));
7447}
7448
7449static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7450{
7451        return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7452                ARRAY_SIZE(dc8051_info_err_flags));
7453}
7454
7455static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7456{
7457        return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7458                ARRAY_SIZE(dc8051_info_host_msg_flags));
7459}
7460
7461static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7462{
7463        struct hfi1_pportdata *ppd = dd->pport;
7464        u64 info, err, host_msg;
7465        int queue_link_down = 0;
7466        char buf[96];
7467
7468        /* look at the flags */
7469        if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7470                /* 8051 information set by firmware */
7471                /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7472                info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7473                err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7474                        & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7475                host_msg = (info >>
7476                        DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7477                        & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7478
7479                /*
7480                 * Handle error flags.
7481                 */
7482                if (err & FAILED_LNI) {
7483                        /*
7484                         * LNI error indications are cleared by the 8051
7485                         * only when starting polling.  Only pay attention
7486                         * to them when in the states that occur during
7487                         * LNI.
7488                         */
7489                        if (ppd->host_link_state
7490                            & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7491                                queue_link_down = 1;
7492                                dd_dev_info(dd, "Link error: %s\n",
7493                                            dc8051_info_err_string(buf,
7494                                                                   sizeof(buf),
7495                                                                   err &
7496                                                                   FAILED_LNI));
7497                        }
7498                        err &= ~(u64)FAILED_LNI;
7499                }
7500                /* unknown frames can happen durning LNI, just count */
7501                if (err & UNKNOWN_FRAME) {
7502                        ppd->unknown_frame_count++;
7503                        err &= ~(u64)UNKNOWN_FRAME;
7504                }
7505                if (err) {
7506                        /* report remaining errors, but do not do anything */
7507                        dd_dev_err(dd, "8051 info error: %s\n",
7508                                   dc8051_info_err_string(buf, sizeof(buf),
7509                                                          err));
7510                }
7511
7512                /*
7513                 * Handle host message flags.
7514                 */
7515                if (host_msg & HOST_REQ_DONE) {
7516                        /*
7517                         * Presently, the driver does a busy wait for
7518                         * host requests to complete.  This is only an
7519                         * informational message.
7520                         * NOTE: The 8051 clears the host message
7521                         * information *on the next 8051 command*.
7522                         * Therefore, when linkup is achieved,
7523                         * this flag will still be set.
7524                         */
7525                        host_msg &= ~(u64)HOST_REQ_DONE;
7526                }
7527                if (host_msg & BC_SMA_MSG) {
7528                        queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7529                        host_msg &= ~(u64)BC_SMA_MSG;
7530                }
7531                if (host_msg & LINKUP_ACHIEVED) {
7532                        dd_dev_info(dd, "8051: Link up\n");
7533                        queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7534                        host_msg &= ~(u64)LINKUP_ACHIEVED;
7535                }
7536                if (host_msg & EXT_DEVICE_CFG_REQ) {
7537                        queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
7538                        host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7539                }
7540                if (host_msg & VERIFY_CAP_FRAME) {
7541                        queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7542                        host_msg &= ~(u64)VERIFY_CAP_FRAME;
7543                }
7544                if (host_msg & LINK_GOING_DOWN) {
7545                        const char *extra = "";
7546                        /* no downgrade action needed if going down */
7547                        if (host_msg & LINK_WIDTH_DOWNGRADED) {
7548                                host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7549                                extra = " (ignoring downgrade)";
7550                        }
7551                        dd_dev_info(dd, "8051: Link down%s\n", extra);
7552                        queue_link_down = 1;
7553                        host_msg &= ~(u64)LINK_GOING_DOWN;
7554                }
7555                if (host_msg & LINK_WIDTH_DOWNGRADED) {
7556                        queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7557                        host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7558                }
7559                if (host_msg) {
7560                        /* report remaining messages, but do not do anything */
7561                        dd_dev_info(dd, "8051 info host message: %s\n",
7562                                    dc8051_info_host_msg_string(buf,
7563                                                                sizeof(buf),
7564                                                                host_msg));
7565                }
7566
7567                reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7568        }
7569        if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7570                /*
7571                 * Lost the 8051 heartbeat.  If this happens, we
7572                 * receive constant interrupts about it.  Disable
7573                 * the interrupt after the first.
7574                 */
7575                dd_dev_err(dd, "Lost 8051 heartbeat\n");
7576                write_csr(dd, DC_DC8051_ERR_EN,
7577                          read_csr(dd, DC_DC8051_ERR_EN) &
7578                          ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7579
7580                reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7581        }
7582        if (reg) {
7583                /* report the error, but do not do anything */
7584                dd_dev_err(dd, "8051 error: %s\n",
7585                           dc8051_err_string(buf, sizeof(buf), reg));
7586        }
7587
7588        if (queue_link_down) {
7589                /*
7590                 * if the link is already going down or disabled, do not
7591                 * queue another
7592                 */
7593                if ((ppd->host_link_state &
7594                    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7595                    ppd->link_enabled == 0) {
7596                        dd_dev_info(dd, "%s: not queuing link down\n",
7597                                    __func__);
7598                } else {
7599                        queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7600                }
7601        }
7602}
7603
7604static const char * const fm_config_txt[] = {
7605[0] =
7606        "BadHeadDist: Distance violation between two head flits",
7607[1] =
7608        "BadTailDist: Distance violation between two tail flits",
7609[2] =
7610        "BadCtrlDist: Distance violation between two credit control flits",
7611[3] =
7612        "BadCrdAck: Credits return for unsupported VL",
7613[4] =
7614        "UnsupportedVLMarker: Received VL Marker",
7615[5] =
7616        "BadPreempt: Exceeded the preemption nesting level",
7617[6] =
7618        "BadControlFlit: Received unsupported control flit",
7619/* no 7 */
7620[8] =
7621        "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7622};
7623
7624static const char * const port_rcv_txt[] = {
7625[1] =
7626        "BadPktLen: Illegal PktLen",
7627[2] =
7628        "PktLenTooLong: Packet longer than PktLen",
7629[3] =
7630        "PktLenTooShort: Packet shorter than PktLen",
7631[4] =
7632        "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7633[5] =
7634        "BadDLID: Illegal DLID (0, doesn't match HFI)",
7635[6] =
7636        "BadL2: Illegal L2 opcode",
7637[7] =
7638        "BadSC: Unsupported SC",
7639[9] =
7640        "BadRC: Illegal RC",
7641[11] =
7642        "PreemptError: Preempting with same VL",
7643[12] =
7644        "PreemptVL15: Preempting a VL15 packet",
7645};
7646
7647#define OPA_LDR_FMCONFIG_OFFSET 16
7648#define OPA_LDR_PORTRCV_OFFSET 0
7649static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7650{
7651        u64 info, hdr0, hdr1;
7652        const char *extra;
7653        char buf[96];
7654        struct hfi1_pportdata *ppd = dd->pport;
7655        u8 lcl_reason = 0;
7656        int do_bounce = 0;
7657
7658        if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7659                if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7660                        info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7661                        dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7662                        /* set status bit */
7663                        dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7664                }
7665                reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7666        }
7667
7668        if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7669                struct hfi1_pportdata *ppd = dd->pport;
7670                /* this counter saturates at (2^32) - 1 */
7671                if (ppd->link_downed < (u32)UINT_MAX)
7672                        ppd->link_downed++;
7673                reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7674        }
7675
7676        if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7677                u8 reason_valid = 1;
7678
7679                info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7680                if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7681                        dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7682                        /* set status bit */
7683                        dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7684                }
7685                switch (info) {
7686                case 0:
7687                case 1:
7688                case 2:
7689                case 3:
7690                case 4:
7691                case 5:
7692                case 6:
7693                        extra = fm_config_txt[info];
7694                        break;
7695                case 8:
7696                        extra = fm_config_txt[info];
7697                        if (ppd->port_error_action &
7698                            OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7699                                do_bounce = 1;
7700                                /*
7701                                 * lcl_reason cannot be derived from info
7702                                 * for this error
7703                                 */
7704                                lcl_reason =
7705                                  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7706                        }
7707                        break;
7708                default:
7709                        reason_valid = 0;
7710                        snprintf(buf, sizeof(buf), "reserved%lld", info);
7711                        extra = buf;
7712                        break;
7713                }
7714
7715                if (reason_valid && !do_bounce) {
7716                        do_bounce = ppd->port_error_action &
7717                                        (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7718                        lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7719                }
7720
7721                /* just report this */
7722                dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7723                reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7724        }
7725
7726        if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7727                u8 reason_valid = 1;
7728
7729                info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7730                hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7731                hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7732                if (!(dd->err_info_rcvport.status_and_code &
7733                      OPA_EI_STATUS_SMASK)) {
7734                        dd->err_info_rcvport.status_and_code =
7735                                info & OPA_EI_CODE_SMASK;
7736                        /* set status bit */
7737                        dd->err_info_rcvport.status_and_code |=
7738                                OPA_EI_STATUS_SMASK;
7739                        /*
7740                         * save first 2 flits in the packet that caused
7741                         * the error
7742                         */
7743                         dd->err_info_rcvport.packet_flit1 = hdr0;
7744                         dd->err_info_rcvport.packet_flit2 = hdr1;
7745                }
7746                switch (info) {
7747                case 1:
7748                case 2:
7749                case 3:
7750                case 4:
7751                case 5:
7752                case 6:
7753                case 7:
7754                case 9:
7755                case 11:
7756                case 12:
7757                        extra = port_rcv_txt[info];
7758                        break;
7759                default:
7760                        reason_valid = 0;
7761                        snprintf(buf, sizeof(buf), "reserved%lld", info);
7762                        extra = buf;
7763                        break;
7764                }
7765
7766                if (reason_valid && !do_bounce) {
7767                        do_bounce = ppd->port_error_action &
7768                                        (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7769                        lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7770                }
7771
7772                /* just report this */
7773                dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7774                dd_dev_info(dd, "           hdr0 0x%llx, hdr1 0x%llx\n",
7775                            hdr0, hdr1);
7776
7777                reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7778        }
7779
7780        if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7781                /* informative only */
7782                dd_dev_info(dd, "8051 access to LCB blocked\n");
7783                reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7784        }
7785        if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7786                /* informative only */
7787                dd_dev_info(dd, "host access to LCB blocked\n");
7788                reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7789        }
7790
7791        /* report any remaining errors */
7792        if (reg)
7793                dd_dev_info(dd, "DCC Error: %s\n",
7794                            dcc_err_string(buf, sizeof(buf), reg));
7795
7796        if (lcl_reason == 0)
7797                lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7798
7799        if (do_bounce) {
7800                dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7801                set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7802                queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7803        }
7804}
7805
7806static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7807{
7808        char buf[96];
7809
7810        dd_dev_info(dd, "LCB Error: %s\n",
7811                    lcb_err_string(buf, sizeof(buf), reg));
7812}
7813
7814/*
7815 * CCE block DC interrupt.  Source is < 8.
7816 */
7817static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7818{
7819        const struct err_reg_info *eri = &dc_errs[source];
7820
7821        if (eri->handler) {
7822                interrupt_clear_down(dd, 0, eri);
7823        } else if (source == 3 /* dc_lbm_int */) {
7824                /*
7825                 * This indicates that a parity error has occurred on the
7826                 * address/control lines presented to the LBM.  The error
7827                 * is a single pulse, there is no associated error flag,
7828                 * and it is non-maskable.  This is because if a parity
7829                 * error occurs on the request the request is dropped.
7830                 * This should never occur, but it is nice to know if it
7831                 * ever does.
7832                 */
7833                dd_dev_err(dd, "Parity error in DC LBM block\n");
7834        } else {
7835                dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7836        }
7837}
7838
7839/*
7840 * TX block send credit interrupt.  Source is < 160.
7841 */
7842static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7843{
7844        sc_group_release_update(dd, source);
7845}
7846
7847/*
7848 * TX block SDMA interrupt.  Source is < 48.
7849 *
7850 * SDMA interrupts are grouped by type:
7851 *
7852 *       0 -  N-1 = SDma
7853 *       N - 2N-1 = SDmaProgress
7854 *      2N - 3N-1 = SDmaIdle
7855 */
7856static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7857{
7858        /* what interrupt */
7859        unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
7860        /* which engine */
7861        unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7862
7863#ifdef CONFIG_SDMA_VERBOSITY
7864        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7865                   slashstrip(__FILE__), __LINE__, __func__);
7866        sdma_dumpstate(&dd->per_sdma[which]);
7867#endif
7868
7869        if (likely(what < 3 && which < dd->num_sdma)) {
7870                sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7871        } else {
7872                /* should not happen */
7873                dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7874        }
7875}
7876
7877/*
7878 * RX block receive available interrupt.  Source is < 160.
7879 */
7880static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7881{
7882        struct hfi1_ctxtdata *rcd;
7883        char *err_detail;
7884
7885        if (likely(source < dd->num_rcv_contexts)) {
7886                rcd = dd->rcd[source];
7887                if (rcd) {
7888                        if (source < dd->first_user_ctxt)
7889                                rcd->do_interrupt(rcd, 0);
7890                        else
7891                                handle_user_interrupt(rcd);
7892                        return; /* OK */
7893                }
7894                /* received an interrupt, but no rcd */
7895                err_detail = "dataless";
7896        } else {
7897                /* received an interrupt, but are not using that context */
7898                err_detail = "out of range";
7899        }
7900        dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7901                   err_detail, source);
7902}
7903
7904/*
7905 * RX block receive urgent interrupt.  Source is < 160.
7906 */
7907static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7908{
7909        struct hfi1_ctxtdata *rcd;
7910        char *err_detail;
7911
7912        if (likely(source < dd->num_rcv_contexts)) {
7913                rcd = dd->rcd[source];
7914                if (rcd) {
7915                        /* only pay attention to user urgent interrupts */
7916                        if (source >= dd->first_user_ctxt)
7917                                handle_user_interrupt(rcd);
7918                        return; /* OK */
7919                }
7920                /* received an interrupt, but no rcd */
7921                err_detail = "dataless";
7922        } else {
7923                /* received an interrupt, but are not using that context */
7924                err_detail = "out of range";
7925        }
7926        dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7927                   err_detail, source);
7928}
7929
7930/*
7931 * Reserved range interrupt.  Should not be called in normal operation.
7932 */
7933static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7934{
7935        char name[64];
7936
7937        dd_dev_err(dd, "unexpected %s interrupt\n",
7938                   is_reserved_name(name, sizeof(name), source));
7939}
7940
7941static const struct is_table is_table[] = {
7942/*
7943 * start                 end
7944 *                              name func               interrupt func
7945 */
7946{ IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
7947                                is_misc_err_name,       is_misc_err_int },
7948{ IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
7949                                is_sdma_eng_err_name,   is_sdma_eng_err_int },
7950{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7951                                is_sendctxt_err_name,   is_sendctxt_err_int },
7952{ IS_SDMA_START,             IS_SDMA_END,
7953                                is_sdma_eng_name,       is_sdma_eng_int },
7954{ IS_VARIOUS_START,          IS_VARIOUS_END,
7955                                is_various_name,        is_various_int },
7956{ IS_DC_START,       IS_DC_END,
7957                                is_dc_name,             is_dc_int },
7958{ IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
7959                                is_rcv_avail_name,      is_rcv_avail_int },
7960{ IS_RCVURGENT_START,    IS_RCVURGENT_END,
7961                                is_rcv_urgent_name,     is_rcv_urgent_int },
7962{ IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
7963                                is_send_credit_name,    is_send_credit_int},
7964{ IS_RESERVED_START,     IS_RESERVED_END,
7965                                is_reserved_name,       is_reserved_int},
7966};
7967
7968/*
7969 * Interrupt source interrupt - called when the given source has an interrupt.
7970 * Source is a bit index into an array of 64-bit integers.
7971 */
7972static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7973{
7974        const struct is_table *entry;
7975
7976        /* avoids a double compare by walking the table in-order */
7977        for (entry = &is_table[0]; entry->is_name; entry++) {
7978                if (source < entry->end) {
7979                        trace_hfi1_interrupt(dd, entry, source);
7980                        entry->is_int(dd, source - entry->start);
7981                        return;
7982                }
7983        }
7984        /* fell off the end */
7985        dd_dev_err(dd, "invalid interrupt source %u\n", source);
7986}
7987
7988/*
7989 * General interrupt handler.  This is able to correctly handle
7990 * all interrupts in case INTx is used.
7991 */
7992static irqreturn_t general_interrupt(int irq, void *data)
7993{
7994        struct hfi1_devdata *dd = data;
7995        u64 regs[CCE_NUM_INT_CSRS];
7996        u32 bit;
7997        int i;
7998
7999        this_cpu_inc(*dd->int_counter);
8000
8001        /* phase 1: scan and clear all handled interrupts */
8002        for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8003                if (dd->gi_mask[i] == 0) {
8004                        regs[i] = 0;    /* used later */
8005                        continue;
8006                }
8007                regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8008                                dd->gi_mask[i];
8009                /* only clear if anything is set */
8010                if (regs[i])
8011                        write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8012        }
8013
8014        /* phase 2: call the appropriate handler */
8015        for_each_set_bit(bit, (unsigned long *)&regs[0],
8016                         CCE_NUM_INT_CSRS * 64) {
8017                is_interrupt(dd, bit);
8018        }
8019
8020        return IRQ_HANDLED;
8021}
8022
8023static irqreturn_t sdma_interrupt(int irq, void *data)
8024{
8025        struct sdma_engine *sde = data;
8026        struct hfi1_devdata *dd = sde->dd;
8027        u64 status;
8028
8029#ifdef CONFIG_SDMA_VERBOSITY
8030        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8031                   slashstrip(__FILE__), __LINE__, __func__);
8032        sdma_dumpstate(sde);
8033#endif
8034
8035        this_cpu_inc(*dd->int_counter);
8036
8037        /* This read_csr is really bad in the hot path */
8038        status = read_csr(dd,
8039                          CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8040                          & sde->imask;
8041        if (likely(status)) {
8042                /* clear the interrupt(s) */
8043                write_csr(dd,
8044                          CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8045                          status);
8046
8047                /* handle the interrupt(s) */
8048                sdma_engine_interrupt(sde, status);
8049        } else
8050                dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8051                           sde->this_idx);
8052
8053        return IRQ_HANDLED;
8054}
8055
8056/*
8057 * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8058 * to insure that the write completed.  This does NOT guarantee that
8059 * queued DMA writes to memory from the chip are pushed.
8060 */
8061static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8062{
8063        struct hfi1_devdata *dd = rcd->dd;
8064        u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8065
8066        mmiowb();       /* make sure everything before is written */
8067        write_csr(dd, addr, rcd->imask);
8068        /* force the above write on the chip and get a value back */
8069        (void)read_csr(dd, addr);
8070}
8071
8072/* force the receive interrupt */
8073void force_recv_intr(struct hfi1_ctxtdata *rcd)
8074{
8075        write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8076}
8077
8078/*
8079 * Return non-zero if a packet is present.
8080 *
8081 * This routine is called when rechecking for packets after the RcvAvail
8082 * interrupt has been cleared down.  First, do a quick check of memory for
8083 * a packet present.  If not found, use an expensive CSR read of the context
8084 * tail to determine the actual tail.  The CSR read is necessary because there
8085 * is no method to push pending DMAs to memory other than an interrupt and we
8086 * are trying to determine if we need to force an interrupt.
8087 */
8088static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8089{
8090        u32 tail;
8091        int present;
8092
8093        if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8094                present = (rcd->seq_cnt ==
8095                                rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8096        else /* is RDMA rtail */
8097                present = (rcd->head != get_rcvhdrtail(rcd));
8098
8099        if (present)
8100                return 1;
8101
8102        /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8103        tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8104        return rcd->head != tail;
8105}
8106
8107/*
8108 * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8109 * This routine will try to handle packets immediately (latency), but if
8110 * it finds too many, it will invoke the thread handler (bandwitdh).  The
8111 * chip receive interrupt is *not* cleared down until this or the thread (if
8112 * invoked) is finished.  The intent is to avoid extra interrupts while we
8113 * are processing packets anyway.
8114 */
8115static irqreturn_t receive_context_interrupt(int irq, void *data)
8116{
8117        struct hfi1_ctxtdata *rcd = data;
8118        struct hfi1_devdata *dd = rcd->dd;
8119        int disposition;
8120        int present;
8121
8122        trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8123        this_cpu_inc(*dd->int_counter);
8124        aspm_ctx_disable(rcd);
8125
8126        /* receive interrupt remains blocked while processing packets */
8127        disposition = rcd->do_interrupt(rcd, 0);
8128
8129        /*
8130         * Too many packets were seen while processing packets in this
8131         * IRQ handler.  Invoke the handler thread.  The receive interrupt
8132         * remains blocked.
8133         */
8134        if (disposition == RCV_PKT_LIMIT)
8135                return IRQ_WAKE_THREAD;
8136
8137        /*
8138         * The packet processor detected no more packets.  Clear the receive
8139         * interrupt and recheck for a packet packet that may have arrived
8140         * after the previous check and interrupt clear.  If a packet arrived,
8141         * force another interrupt.
8142         */
8143        clear_recv_intr(rcd);
8144        present = check_packet_present(rcd);
8145        if (present)
8146                force_recv_intr(rcd);
8147
8148        return IRQ_HANDLED;
8149}
8150
8151/*
8152 * Receive packet thread handler.  This expects to be invoked with the
8153 * receive interrupt still blocked.
8154 */
8155static irqreturn_t receive_context_thread(int irq, void *data)
8156{
8157        struct hfi1_ctxtdata *rcd = data;
8158        int present;
8159
8160        /* receive interrupt is still blocked from the IRQ handler */
8161        (void)rcd->do_interrupt(rcd, 1);
8162
8163        /*
8164         * The packet processor will only return if it detected no more
8165         * packets.  Hold IRQs here so we can safely clear the interrupt and
8166         * recheck for a packet that may have arrived after the previous
8167         * check and the interrupt clear.  If a packet arrived, force another
8168         * interrupt.
8169         */
8170        local_irq_disable();
8171        clear_recv_intr(rcd);
8172        present = check_packet_present(rcd);
8173        if (present)
8174                force_recv_intr(rcd);
8175        local_irq_enable();
8176
8177        return IRQ_HANDLED;
8178}
8179
8180/* ========================================================================= */
8181
8182u32 read_physical_state(struct hfi1_devdata *dd)
8183{
8184        u64 reg;
8185
8186        reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8187        return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8188                                & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8189}
8190
8191u32 read_logical_state(struct hfi1_devdata *dd)
8192{
8193        u64 reg;
8194
8195        reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8196        return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8197                                & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8198}
8199
8200static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8201{
8202        u64 reg;
8203
8204        reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8205        /* clear current state, set new state */
8206        reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8207        reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8208        write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8209}
8210
8211/*
8212 * Use the 8051 to read a LCB CSR.
8213 */
8214static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8215{
8216        u32 regno;
8217        int ret;
8218
8219        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8220                if (acquire_lcb_access(dd, 0) == 0) {
8221                        *data = read_csr(dd, addr);
8222                        release_lcb_access(dd, 0);
8223                        return 0;
8224                }
8225                return -EBUSY;
8226        }
8227
8228        /* register is an index of LCB registers: (offset - base) / 8 */
8229        regno = (addr - DC_LCB_CFG_RUN) >> 3;
8230        ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8231        if (ret != HCMD_SUCCESS)
8232                return -EBUSY;
8233        return 0;
8234}
8235
8236/*
8237 * Read an LCB CSR.  Access may not be in host control, so check.
8238 * Return 0 on success, -EBUSY on failure.
8239 */
8240int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8241{
8242        struct hfi1_pportdata *ppd = dd->pport;
8243
8244        /* if up, go through the 8051 for the value */
8245        if (ppd->host_link_state & HLS_UP)
8246                return read_lcb_via_8051(dd, addr, data);
8247        /* if going up or down, no access */
8248        if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8249                return -EBUSY;
8250        /* otherwise, host has access */
8251        *data = read_csr(dd, addr);
8252        return 0;
8253}
8254
8255/*
8256 * Use the 8051 to write a LCB CSR.
8257 */
8258static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8259{
8260        u32 regno;
8261        int ret;
8262
8263        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8264            (dd->dc8051_ver < dc8051_ver(0, 20))) {
8265                if (acquire_lcb_access(dd, 0) == 0) {
8266                        write_csr(dd, addr, data);
8267                        release_lcb_access(dd, 0);
8268                        return 0;
8269                }
8270                return -EBUSY;
8271        }
8272
8273        /* register is an index of LCB registers: (offset - base) / 8 */
8274        regno = (addr - DC_LCB_CFG_RUN) >> 3;
8275        ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8276        if (ret != HCMD_SUCCESS)
8277                return -EBUSY;
8278        return 0;
8279}
8280
8281/*
8282 * Write an LCB CSR.  Access may not be in host control, so check.
8283 * Return 0 on success, -EBUSY on failure.
8284 */
8285int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8286{
8287        struct hfi1_pportdata *ppd = dd->pport;
8288
8289        /* if up, go through the 8051 for the value */
8290        if (ppd->host_link_state & HLS_UP)
8291                return write_lcb_via_8051(dd, addr, data);
8292        /* if going up or down, no access */
8293        if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8294                return -EBUSY;
8295        /* otherwise, host has access */
8296        write_csr(dd, addr, data);
8297        return 0;
8298}
8299
8300/*
8301 * Returns:
8302 *      < 0 = Linux error, not able to get access
8303 *      > 0 = 8051 command RETURN_CODE
8304 */
8305static int do_8051_command(
8306        struct hfi1_devdata *dd,
8307        u32 type,
8308        u64 in_data,
8309        u64 *out_data)
8310{
8311        u64 reg, completed;
8312        int return_code;
8313        unsigned long flags;
8314        unsigned long timeout;
8315
8316        hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8317
8318        /*
8319         * Alternative to holding the lock for a long time:
8320         * - keep busy wait - have other users bounce off
8321         */
8322        spin_lock_irqsave(&dd->dc8051_lock, flags);
8323
8324        /* We can't send any commands to the 8051 if it's in reset */
8325        if (dd->dc_shutdown) {
8326                return_code = -ENODEV;
8327                goto fail;
8328        }
8329
8330        /*
8331         * If an 8051 host command timed out previously, then the 8051 is
8332         * stuck.
8333         *
8334         * On first timeout, attempt to reset and restart the entire DC
8335         * block (including 8051). (Is this too big of a hammer?)
8336         *
8337         * If the 8051 times out a second time, the reset did not bring it
8338         * back to healthy life. In that case, fail any subsequent commands.
8339         */
8340        if (dd->dc8051_timed_out) {
8341                if (dd->dc8051_timed_out > 1) {
8342                        dd_dev_err(dd,
8343                                   "Previous 8051 host command timed out, skipping command %u\n",
8344                                   type);
8345                        return_code = -ENXIO;
8346                        goto fail;
8347                }
8348                spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8349                dc_shutdown(dd);
8350                dc_start(dd);
8351                spin_lock_irqsave(&dd->dc8051_lock, flags);
8352        }
8353
8354        /*
8355         * If there is no timeout, then the 8051 command interface is
8356         * waiting for a command.
8357         */
8358
8359        /*
8360         * When writing a LCB CSR, out_data contains the full value to
8361         * to be written, while in_data contains the relative LCB
8362         * address in 7:0.  Do the work here, rather than the caller,
8363         * of distrubting the write data to where it needs to go:
8364         *
8365         * Write data
8366         *   39:00 -> in_data[47:8]
8367         *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8368         *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8369         */
8370        if (type == HCMD_WRITE_LCB_CSR) {
8371                in_data |= ((*out_data) & 0xffffffffffull) << 8;
8372                reg = ((((*out_data) >> 40) & 0xff) <<
8373                                DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8374                      | ((((*out_data) >> 48) & 0xffff) <<
8375                                DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8376                write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8377        }
8378
8379        /*
8380         * Do two writes: the first to stabilize the type and req_data, the
8381         * second to activate.
8382         */
8383        reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8384                        << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8385                | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8386                        << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8387        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8388        reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8389        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8390
8391        /* wait for completion, alternate: interrupt */
8392        timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8393        while (1) {
8394                reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8395                completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8396                if (completed)
8397                        break;
8398                if (time_after(jiffies, timeout)) {
8399                        dd->dc8051_timed_out++;
8400                        dd_dev_err(dd, "8051 host command %u timeout\n", type);
8401                        if (out_data)
8402                                *out_data = 0;
8403                        return_code = -ETIMEDOUT;
8404                        goto fail;
8405                }
8406                udelay(2);
8407        }
8408
8409        if (out_data) {
8410                *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8411                                & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8412                if (type == HCMD_READ_LCB_CSR) {
8413                        /* top 16 bits are in a different register */
8414                        *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8415                                & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8416                                << (48
8417                                    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8418                }
8419        }
8420        return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8421                                & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8422        dd->dc8051_timed_out = 0;
8423        /*
8424         * Clear command for next user.
8425         */
8426        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8427
8428fail:
8429        spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8430
8431        return return_code;
8432}
8433
8434static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8435{
8436        return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8437}
8438
8439int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8440                     u8 lane_id, u32 config_data)
8441{
8442        u64 data;
8443        int ret;
8444
8445        data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8446                | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8447                | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8448        ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8449        if (ret != HCMD_SUCCESS) {
8450                dd_dev_err(dd,
8451                           "load 8051 config: field id %d, lane %d, err %d\n",
8452                           (int)field_id, (int)lane_id, ret);
8453        }
8454        return ret;
8455}
8456
8457/*
8458 * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8459 * set the result, even on error.
8460 * Return 0 on success, -errno on failure
8461 */
8462int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8463                     u32 *result)
8464{
8465        u64 big_data;
8466        u32 addr;
8467        int ret;
8468
8469        /* address start depends on the lane_id */
8470        if (lane_id < 4)
8471                addr = (4 * NUM_GENERAL_FIELDS)
8472                        + (lane_id * 4 * NUM_LANE_FIELDS);
8473        else
8474                addr = 0;
8475        addr += field_id * 4;
8476
8477        /* read is in 8-byte chunks, hardware will truncate the address down */
8478        ret = read_8051_data(dd, addr, 8, &big_data);
8479
8480        if (ret == 0) {
8481                /* extract the 4 bytes we want */
8482                if (addr & 0x4)
8483                        *result = (u32)(big_data >> 32);
8484                else
8485                        *result = (u32)big_data;
8486        } else {
8487                *result = 0;
8488                dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8489                           __func__, lane_id, field_id);
8490        }
8491
8492        return ret;
8493}
8494
8495static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8496                              u8 continuous)
8497{
8498        u32 frame;
8499
8500        frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8501                | power_management << POWER_MANAGEMENT_SHIFT;
8502        return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8503                                GENERAL_CONFIG, frame);
8504}
8505
8506static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8507                                 u16 vl15buf, u8 crc_sizes)
8508{
8509        u32 frame;
8510
8511        frame = (u32)vau << VAU_SHIFT
8512                | (u32)z << Z_SHIFT
8513                | (u32)vcu << VCU_SHIFT
8514                | (u32)vl15buf << VL15BUF_SHIFT
8515                | (u32)crc_sizes << CRC_SIZES_SHIFT;
8516        return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8517                                GENERAL_CONFIG, frame);
8518}
8519
8520static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8521                                     u8 *flag_bits, u16 *link_widths)
8522{
8523        u32 frame;
8524
8525        read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8526                         &frame);
8527        *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8528        *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8529        *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8530}
8531
8532static int write_vc_local_link_width(struct hfi1_devdata *dd,
8533                                     u8 misc_bits,
8534                                     u8 flag_bits,
8535                                     u16 link_widths)
8536{
8537        u32 frame;
8538
8539        frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8540                | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8541                | (u32)link_widths << LINK_WIDTH_SHIFT;
8542        return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8543                     frame);
8544}
8545
8546static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8547                                 u8 device_rev)
8548{
8549        u32 frame;
8550
8551        frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8552                | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8553        return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8554}
8555
8556static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8557                                  u8 *device_rev)
8558{
8559        u32 frame;
8560
8561        read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8562        *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8563        *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8564                        & REMOTE_DEVICE_REV_MASK;
8565}
8566
8567void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8568{
8569        u32 frame;
8570
8571        read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8572        *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8573        *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8574}
8575
8576static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8577                               u8 *continuous)
8578{
8579        u32 frame;
8580
8581        read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8582        *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8583                                        & POWER_MANAGEMENT_MASK;
8584        *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8585                                        & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8586}
8587
8588static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8589                                  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8590{
8591        u32 frame;
8592
8593        read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8594        *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8595        *z = (frame >> Z_SHIFT) & Z_MASK;
8596        *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8597        *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8598        *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8599}
8600
8601static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8602                                      u8 *remote_tx_rate,
8603                                      u16 *link_widths)
8604{
8605        u32 frame;
8606
8607        read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8608                         &frame);
8609        *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8610                                & REMOTE_TX_RATE_MASK;
8611        *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8612}
8613
8614static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8615{
8616        u32 frame;
8617
8618        read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8619        *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8620}
8621
8622static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8623{
8624        u32 frame;
8625
8626        read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8627        *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8628}
8629
8630static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8631{
8632        read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8633}
8634
8635static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8636{
8637        read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8638}
8639
8640void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8641{
8642        u32 frame;
8643        int ret;
8644
8645        *link_quality = 0;
8646        if (dd->pport->host_link_state & HLS_UP) {
8647                ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8648                                       &frame);
8649                if (ret == 0)
8650                        *link_quality = (frame >> LINK_QUALITY_SHIFT)
8651                                                & LINK_QUALITY_MASK;
8652        }
8653}
8654
8655static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8656{
8657        u32 frame;
8658
8659        read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8660        *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8661}
8662
8663static int read_tx_settings(struct hfi1_devdata *dd,
8664                            u8 *enable_lane_tx,
8665                            u8 *tx_polarity_inversion,
8666                            u8 *rx_polarity_inversion,
8667                            u8 *max_rate)
8668{
8669        u32 frame;
8670        int ret;
8671
8672        ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8673        *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8674                                & ENABLE_LANE_TX_MASK;
8675        *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8676                                & TX_POLARITY_INVERSION_MASK;
8677        *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8678                                & RX_POLARITY_INVERSION_MASK;
8679        *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8680        return ret;
8681}
8682
8683static int write_tx_settings(struct hfi1_devdata *dd,
8684                             u8 enable_lane_tx,
8685                             u8 tx_polarity_inversion,
8686                             u8 rx_polarity_inversion,
8687                             u8 max_rate)
8688{
8689        u32 frame;
8690
8691        /* no need to mask, all variable sizes match field widths */
8692        frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8693                | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8694                | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8695                | max_rate << MAX_RATE_SHIFT;
8696        return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8697}
8698
8699static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8700{
8701        u32 frame, version, prod_id;
8702        int ret, lane;
8703
8704        /* 4 lanes */
8705        for (lane = 0; lane < 4; lane++) {
8706                ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8707                if (ret) {
8708                        dd_dev_err(dd,
8709                                   "Unable to read lane %d firmware details\n",
8710                                   lane);
8711                        continue;
8712                }
8713                version = (frame >> SPICO_ROM_VERSION_SHIFT)
8714                                        & SPICO_ROM_VERSION_MASK;
8715                prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8716                                        & SPICO_ROM_PROD_ID_MASK;
8717                dd_dev_info(dd,
8718                            "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8719                            lane, version, prod_id);
8720        }
8721}
8722
8723/*
8724 * Read an idle LCB message.
8725 *
8726 * Returns 0 on success, -EINVAL on error
8727 */
8728static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8729{
8730        int ret;
8731
8732        ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8733        if (ret != HCMD_SUCCESS) {
8734                dd_dev_err(dd, "read idle message: type %d, err %d\n",
8735                           (u32)type, ret);
8736                return -EINVAL;
8737        }
8738        dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8739        /* return only the payload as we already know the type */
8740        *data_out >>= IDLE_PAYLOAD_SHIFT;
8741        return 0;
8742}
8743
8744/*
8745 * Read an idle SMA message.  To be done in response to a notification from
8746 * the 8051.
8747 *
8748 * Returns 0 on success, -EINVAL on error
8749 */
8750static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8751{
8752        return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8753                                 data);
8754}
8755
8756/*
8757 * Send an idle LCB message.
8758 *
8759 * Returns 0 on success, -EINVAL on error
8760 */
8761static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8762{
8763        int ret;
8764
8765        dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8766        ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8767        if (ret != HCMD_SUCCESS) {
8768                dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8769                           data, ret);
8770                return -EINVAL;
8771        }
8772        return 0;
8773}
8774
8775/*
8776 * Send an idle SMA message.
8777 *
8778 * Returns 0 on success, -EINVAL on error
8779 */
8780int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8781{
8782        u64 data;
8783
8784        data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8785                ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8786        return send_idle_message(dd, data);
8787}
8788
8789/*
8790 * Initialize the LCB then do a quick link up.  This may or may not be
8791 * in loopback.
8792 *
8793 * return 0 on success, -errno on error
8794 */
8795static int do_quick_linkup(struct hfi1_devdata *dd)
8796{
8797        u64 reg;
8798        unsigned long timeout;
8799        int ret;
8800
8801        lcb_shutdown(dd, 0);
8802
8803        if (loopback) {
8804                /* LCB_CFG_LOOPBACK.VAL = 2 */
8805                /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8806                write_csr(dd, DC_LCB_CFG_LOOPBACK,
8807                          IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8808                write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8809        }
8810
8811        /* start the LCBs */
8812        /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8813        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8814
8815        /* simulator only loopback steps */
8816        if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8817                /* LCB_CFG_RUN.EN = 1 */
8818                write_csr(dd, DC_LCB_CFG_RUN,
8819                          1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8820
8821                /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8822                timeout = jiffies + msecs_to_jiffies(10);
8823                while (1) {
8824                        reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8825                        if (reg)
8826                                break;
8827                        if (time_after(jiffies, timeout)) {
8828                                dd_dev_err(dd,
8829                                           "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8830                                return -ETIMEDOUT;
8831                        }
8832                        udelay(2);
8833                }
8834
8835                write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8836                          1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8837        }
8838
8839        if (!loopback) {
8840                /*
8841                 * When doing quick linkup and not in loopback, both
8842                 * sides must be done with LCB set-up before either
8843                 * starts the quick linkup.  Put a delay here so that
8844                 * both sides can be started and have a chance to be
8845                 * done with LCB set up before resuming.
8846                 */
8847                dd_dev_err(dd,
8848                           "Pausing for peer to be finished with LCB set up\n");
8849                msleep(5000);
8850                dd_dev_err(dd, "Continuing with quick linkup\n");
8851        }
8852
8853        write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8854        set_8051_lcb_access(dd);
8855
8856        /*
8857         * State "quick" LinkUp request sets the physical link state to
8858         * LinkUp without a verify capability sequence.
8859         * This state is in simulator v37 and later.
8860         */
8861        ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8862        if (ret != HCMD_SUCCESS) {
8863                dd_dev_err(dd,
8864                           "%s: set physical link state to quick LinkUp failed with return %d\n",
8865                           __func__, ret);
8866
8867                set_host_lcb_access(dd);
8868                write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8869
8870                if (ret >= 0)
8871                        ret = -EINVAL;
8872                return ret;
8873        }
8874
8875        return 0; /* success */
8876}
8877
8878/*
8879 * Set the SerDes to internal loopback mode.
8880 * Returns 0 on success, -errno on error.
8881 */
8882static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8883{
8884        int ret;
8885
8886        ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8887        if (ret == HCMD_SUCCESS)
8888                return 0;
8889        dd_dev_err(dd,
8890                   "Set physical link state to SerDes Loopback failed with return %d\n",
8891                   ret);
8892        if (ret >= 0)
8893                ret = -EINVAL;
8894        return ret;
8895}
8896
8897/*
8898 * Do all special steps to set up loopback.
8899 */
8900static int init_loopback(struct hfi1_devdata *dd)
8901{
8902        dd_dev_info(dd, "Entering loopback mode\n");
8903
8904        /* all loopbacks should disable self GUID check */
8905        write_csr(dd, DC_DC8051_CFG_MODE,
8906                  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8907
8908        /*
8909         * The simulator has only one loopback option - LCB.  Switch
8910         * to that option, which includes quick link up.
8911         *
8912         * Accept all valid loopback values.
8913         */
8914        if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
8915            (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
8916             loopback == LOOPBACK_CABLE)) {
8917                loopback = LOOPBACK_LCB;
8918                quick_linkup = 1;
8919                return 0;
8920        }
8921
8922        /* handle serdes loopback */
8923        if (loopback == LOOPBACK_SERDES) {
8924                /* internal serdes loopack needs quick linkup on RTL */
8925                if (dd->icode == ICODE_RTL_SILICON)
8926                        quick_linkup = 1;
8927                return set_serdes_loopback_mode(dd);
8928        }
8929
8930        /* LCB loopback - handled at poll time */
8931        if (loopback == LOOPBACK_LCB) {
8932                quick_linkup = 1; /* LCB is always quick linkup */
8933
8934                /* not supported in emulation due to emulation RTL changes */
8935                if (dd->icode == ICODE_FPGA_EMULATION) {
8936                        dd_dev_err(dd,
8937                                   "LCB loopback not supported in emulation\n");
8938                        return -EINVAL;
8939                }
8940                return 0;
8941        }
8942
8943        /* external cable loopback requires no extra steps */
8944        if (loopback == LOOPBACK_CABLE)
8945                return 0;
8946
8947        dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8948        return -EINVAL;
8949}
8950
8951/*
8952 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8953 * used in the Verify Capability link width attribute.
8954 */
8955static u16 opa_to_vc_link_widths(u16 opa_widths)
8956{
8957        int i;
8958        u16 result = 0;
8959
8960        static const struct link_bits {
8961                u16 from;
8962                u16 to;
8963        } opa_link_xlate[] = {
8964                { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
8965                { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
8966                { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
8967                { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
8968        };
8969
8970        for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8971                if (opa_widths & opa_link_xlate[i].from)
8972                        result |= opa_link_xlate[i].to;
8973        }
8974        return result;
8975}
8976
8977/*
8978 * Set link attributes before moving to polling.
8979 */
8980static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8981{
8982        struct hfi1_devdata *dd = ppd->dd;
8983        u8 enable_lane_tx;
8984        u8 tx_polarity_inversion;
8985        u8 rx_polarity_inversion;
8986        int ret;
8987
8988        /* reset our fabric serdes to clear any lingering problems */
8989        fabric_serdes_reset(dd);
8990
8991        /* set the local tx rate - need to read-modify-write */
8992        ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8993                               &rx_polarity_inversion, &ppd->local_tx_rate);
8994        if (ret)
8995                goto set_local_link_attributes_fail;
8996
8997        if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8998                /* set the tx rate to the fastest enabled */
8999                if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9000                        ppd->local_tx_rate = 1;
9001                else
9002                        ppd->local_tx_rate = 0;
9003        } else {
9004                /* set the tx rate to all enabled */
9005                ppd->local_tx_rate = 0;
9006                if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9007                        ppd->local_tx_rate |= 2;
9008                if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9009                        ppd->local_tx_rate |= 1;
9010        }
9011
9012        enable_lane_tx = 0xF; /* enable all four lanes */
9013        ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9014                                rx_polarity_inversion, ppd->local_tx_rate);
9015        if (ret != HCMD_SUCCESS)
9016                goto set_local_link_attributes_fail;
9017
9018        /*
9019         * DC supports continuous updates.
9020         */
9021        ret = write_vc_local_phy(dd,
9022                                 0 /* no power management */,
9023                                 1 /* continuous updates */);
9024        if (ret != HCMD_SUCCESS)
9025                goto set_local_link_attributes_fail;
9026
9027        /* z=1 in the next call: AU of 0 is not supported by the hardware */
9028        ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9029                                    ppd->port_crc_mode_enabled);
9030        if (ret != HCMD_SUCCESS)
9031                goto set_local_link_attributes_fail;
9032
9033        ret = write_vc_local_link_width(dd, 0, 0,
9034                                        opa_to_vc_link_widths(
9035                                                ppd->link_width_enabled));
9036        if (ret != HCMD_SUCCESS)
9037                goto set_local_link_attributes_fail;
9038
9039        /* let peer know who we are */
9040        ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9041        if (ret == HCMD_SUCCESS)
9042                return 0;
9043
9044set_local_link_attributes_fail:
9045        dd_dev_err(dd,
9046                   "Failed to set local link attributes, return 0x%x\n",
9047                   ret);
9048        return ret;
9049}
9050
9051/*
9052 * Call this to start the link.  Schedule a retry if the cable is not
9053 * present or if unable to start polling.  Do not do anything if the
9054 * link is disabled.  Returns 0 if link is disabled or moved to polling
9055 */
9056int start_link(struct hfi1_pportdata *ppd)
9057{
9058        if (!ppd->link_enabled) {
9059                dd_dev_info(ppd->dd,
9060                            "%s: stopping link start because link is disabled\n",
9061                            __func__);
9062                return 0;
9063        }
9064        if (!ppd->driver_link_ready) {
9065                dd_dev_info(ppd->dd,
9066                            "%s: stopping link start because driver is not ready\n",
9067                            __func__);
9068                return 0;
9069        }
9070
9071        if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9072            loopback == LOOPBACK_LCB ||
9073            ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9074                return set_link_state(ppd, HLS_DN_POLL);
9075
9076        dd_dev_info(ppd->dd,
9077                    "%s: stopping link start because no cable is present\n",
9078                    __func__);
9079        return -EAGAIN;
9080}
9081
9082static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9083{
9084        struct hfi1_devdata *dd = ppd->dd;
9085        u64 mask;
9086        unsigned long timeout;
9087
9088        /*
9089         * Check for QSFP interrupt for t_init (SFF 8679)
9090         */
9091        timeout = jiffies + msecs_to_jiffies(2000);
9092        while (1) {
9093                mask = read_csr(dd, dd->hfi1_id ?
9094                                ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9095                if (!(mask & QSFP_HFI0_INT_N)) {
9096                        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9097                                  ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9098                        break;
9099                }
9100                if (time_after(jiffies, timeout)) {
9101                        dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9102                                    __func__);
9103                        break;
9104                }
9105                udelay(2);
9106        }
9107}
9108
9109static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9110{
9111        struct hfi1_devdata *dd = ppd->dd;
9112        u64 mask;
9113
9114        mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9115        if (enable)
9116                mask |= (u64)QSFP_HFI0_INT_N;
9117        else
9118                mask &= ~(u64)QSFP_HFI0_INT_N;
9119        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9120}
9121
9122void reset_qsfp(struct hfi1_pportdata *ppd)
9123{
9124        struct hfi1_devdata *dd = ppd->dd;
9125        u64 mask, qsfp_mask;
9126
9127        /* Disable INT_N from triggering QSFP interrupts */
9128        set_qsfp_int_n(ppd, 0);
9129
9130        /* Reset the QSFP */
9131        mask = (u64)QSFP_HFI0_RESET_N;
9132        qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
9133        qsfp_mask |= mask;
9134        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
9135
9136        qsfp_mask = read_csr(dd,
9137                             dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9138        qsfp_mask &= ~mask;
9139        write_csr(dd,
9140                  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9141
9142        udelay(10);
9143
9144        qsfp_mask |= mask;
9145        write_csr(dd,
9146                  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9147
9148        wait_for_qsfp_init(ppd);
9149
9150        /*
9151         * Allow INT_N to trigger the QSFP interrupt to watch
9152         * for alarms and warnings
9153         */
9154        set_qsfp_int_n(ppd, 1);
9155}
9156
9157static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9158                                        u8 *qsfp_interrupt_status)
9159{
9160        struct hfi1_devdata *dd = ppd->dd;
9161
9162        if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9163            (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9164                dd_dev_info(dd, "%s: QSFP cable on fire\n",
9165                            __func__);
9166
9167        if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9168            (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9169                dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9170                            __func__);
9171
9172        if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9173            (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9174                dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9175                            __func__);
9176
9177        if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9178            (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9179                dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9180                            __func__);
9181
9182        /* Byte 2 is vendor specific */
9183
9184        if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9185            (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9186                dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9187                            __func__);
9188
9189        if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9190            (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9191                dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9192                            __func__);
9193
9194        if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9195            (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9196                dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9197                            __func__);
9198
9199        if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9200            (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9201                dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9202                            __func__);
9203
9204        if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9205            (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9206                dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9207                            __func__);
9208
9209        if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9210            (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9211                dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9212                            __func__);
9213
9214        if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9215            (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9216                dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9217                            __func__);
9218
9219        if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9220            (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9221                dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9222                            __func__);
9223
9224        if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9225            (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9226                dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9227                            __func__);
9228
9229        if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9230            (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9231                dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9232                            __func__);
9233
9234        if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9235            (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9236                dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9237                            __func__);
9238
9239        if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9240            (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9241                dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9242                            __func__);
9243
9244        /* Bytes 9-10 and 11-12 are reserved */
9245        /* Bytes 13-15 are vendor specific */
9246
9247        return 0;
9248}
9249
9250/* This routine will only be scheduled if the QSFP module is present */
9251void qsfp_event(struct work_struct *work)
9252{
9253        struct qsfp_data *qd;
9254        struct hfi1_pportdata *ppd;
9255        struct hfi1_devdata *dd;
9256
9257        qd = container_of(work, struct qsfp_data, qsfp_work);
9258        ppd = qd->ppd;
9259        dd = ppd->dd;
9260
9261        /* Sanity check */
9262        if (!qsfp_mod_present(ppd))
9263                return;
9264
9265        /*
9266         * Turn DC back on after cables has been
9267         * re-inserted. Up until now, the DC has been in
9268         * reset to save power.
9269         */
9270        dc_start(dd);
9271
9272        if (qd->cache_refresh_required) {
9273                set_qsfp_int_n(ppd, 0);
9274
9275                wait_for_qsfp_init(ppd);
9276
9277                /*
9278                 * Allow INT_N to trigger the QSFP interrupt to watch
9279                 * for alarms and warnings
9280                 */
9281                set_qsfp_int_n(ppd, 1);
9282
9283                tune_serdes(ppd);
9284
9285                start_link(ppd);
9286        }
9287
9288        if (qd->check_interrupt_flags) {
9289                u8 qsfp_interrupt_status[16] = {0,};
9290
9291                if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9292                                  &qsfp_interrupt_status[0], 16) != 16) {
9293                        dd_dev_info(dd,
9294                                    "%s: Failed to read status of QSFP module\n",
9295                                    __func__);
9296                } else {
9297                        unsigned long flags;
9298
9299                        handle_qsfp_error_conditions(
9300                                        ppd, qsfp_interrupt_status);
9301                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9302                        ppd->qsfp_info.check_interrupt_flags = 0;
9303                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9304                                               flags);
9305                }
9306        }
9307}
9308
9309static void init_qsfp_int(struct hfi1_devdata *dd)
9310{
9311        struct hfi1_pportdata *ppd = dd->pport;
9312        u64 qsfp_mask, cce_int_mask;
9313        const int qsfp1_int_smask = QSFP1_INT % 64;
9314        const int qsfp2_int_smask = QSFP2_INT % 64;
9315
9316        /*
9317         * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9318         * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9319         * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9320         * the index of the appropriate CSR in the CCEIntMask CSR array
9321         */
9322        cce_int_mask = read_csr(dd, CCE_INT_MASK +
9323                                (8 * (QSFP1_INT / 64)));
9324        if (dd->hfi1_id) {
9325                cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9326                write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9327                          cce_int_mask);
9328        } else {
9329                cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9330                write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9331                          cce_int_mask);
9332        }
9333
9334        qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9335        /* Clear current status to avoid spurious interrupts */
9336        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9337                  qsfp_mask);
9338        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9339                  qsfp_mask);
9340
9341        set_qsfp_int_n(ppd, 0);
9342
9343        /* Handle active low nature of INT_N and MODPRST_N pins */
9344        if (qsfp_mod_present(ppd))
9345                qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9346        write_csr(dd,
9347                  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9348                  qsfp_mask);
9349}
9350
9351/*
9352 * Do a one-time initialize of the LCB block.
9353 */
9354static void init_lcb(struct hfi1_devdata *dd)
9355{
9356        /* simulator does not correctly handle LCB cclk loopback, skip */
9357        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9358                return;
9359
9360        /* the DC has been reset earlier in the driver load */
9361
9362        /* set LCB for cclk loopback on the port */
9363        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9364        write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9365        write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9366        write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9367        write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9368        write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9369        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9370}
9371
9372int bringup_serdes(struct hfi1_pportdata *ppd)
9373{
9374        struct hfi1_devdata *dd = ppd->dd;
9375        u64 guid;
9376        int ret;
9377
9378        if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9379                add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9380
9381        guid = ppd->guid;
9382        if (!guid) {
9383                if (dd->base_guid)
9384                        guid = dd->base_guid + ppd->port - 1;
9385                ppd->guid = guid;
9386        }
9387
9388        /* Set linkinit_reason on power up per OPA spec */
9389        ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9390
9391        /* one-time init of the LCB */
9392        init_lcb(dd);
9393
9394        if (loopback) {
9395                ret = init_loopback(dd);
9396                if (ret < 0)
9397                        return ret;
9398        }
9399
9400        /* tune the SERDES to a ballpark setting for
9401         * optimal signal and bit error rate
9402         * Needs to be done before starting the link
9403         */
9404        tune_serdes(ppd);
9405
9406        return start_link(ppd);
9407}
9408
9409void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9410{
9411        struct hfi1_devdata *dd = ppd->dd;
9412
9413        /*
9414         * Shut down the link and keep it down.   First turn off that the
9415         * driver wants to allow the link to be up (driver_link_ready).
9416         * Then make sure the link is not automatically restarted
9417         * (link_enabled).  Cancel any pending restart.  And finally
9418         * go offline.
9419         */
9420        ppd->driver_link_ready = 0;
9421        ppd->link_enabled = 0;
9422
9423        ppd->offline_disabled_reason =
9424                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9425        set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9426                             OPA_LINKDOWN_REASON_SMA_DISABLED);
9427        set_link_state(ppd, HLS_DN_OFFLINE);
9428
9429        /* disable the port */
9430        clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9431}
9432
9433static inline int init_cpu_counters(struct hfi1_devdata *dd)
9434{
9435        struct hfi1_pportdata *ppd;
9436        int i;
9437
9438        ppd = (struct hfi1_pportdata *)(dd + 1);
9439        for (i = 0; i < dd->num_pports; i++, ppd++) {
9440                ppd->ibport_data.rvp.rc_acks = NULL;
9441                ppd->ibport_data.rvp.rc_qacks = NULL;
9442                ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9443                ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9444                ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9445                if (!ppd->ibport_data.rvp.rc_acks ||
9446                    !ppd->ibport_data.rvp.rc_delayed_comp ||
9447                    !ppd->ibport_data.rvp.rc_qacks)
9448                        return -ENOMEM;
9449        }
9450
9451        return 0;
9452}
9453
9454static const char * const pt_names[] = {
9455        "expected",
9456        "eager",
9457        "invalid"
9458};
9459
9460static const char *pt_name(u32 type)
9461{
9462        return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9463}
9464
9465/*
9466 * index is the index into the receive array
9467 */
9468void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9469                  u32 type, unsigned long pa, u16 order)
9470{
9471        u64 reg;
9472        void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9473                              (dd->kregbase + RCV_ARRAY));
9474
9475        if (!(dd->flags & HFI1_PRESENT))
9476                goto done;
9477
9478        if (type == PT_INVALID) {
9479                pa = 0;
9480        } else if (type > PT_INVALID) {
9481                dd_dev_err(dd,
9482                           "unexpected receive array type %u for index %u, not handled\n",
9483                           type, index);
9484                goto done;
9485        }
9486
9487        hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9488                  pt_name(type), index, pa, (unsigned long)order);
9489
9490#define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9491        reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9492                | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9493                | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9494                                        << RCV_ARRAY_RT_ADDR_SHIFT;
9495        writeq(reg, base + (index * 8));
9496
9497        if (type == PT_EAGER)
9498                /*
9499                 * Eager entries are written one-by-one so we have to push them
9500                 * after we write the entry.
9501                 */
9502                flush_wc();
9503done:
9504        return;
9505}
9506
9507void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9508{
9509        struct hfi1_devdata *dd = rcd->dd;
9510        u32 i;
9511
9512        /* this could be optimized */
9513        for (i = rcd->eager_base; i < rcd->eager_base +
9514                     rcd->egrbufs.alloced; i++)
9515                hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9516
9517        for (i = rcd->expected_base;
9518                        i < rcd->expected_base + rcd->expected_count; i++)
9519                hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9520}
9521
9522int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9523                        struct hfi1_ctxt_info *kinfo)
9524{
9525        kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9526                HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9527        return 0;
9528}
9529
9530struct hfi1_message_header *hfi1_get_msgheader(
9531                                struct hfi1_devdata *dd, __le32 *rhf_addr)
9532{
9533        u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9534
9535        return (struct hfi1_message_header *)
9536                (rhf_addr - dd->rhf_offset + offset);
9537}
9538
9539static const char * const ib_cfg_name_strings[] = {
9540        "HFI1_IB_CFG_LIDLMC",
9541        "HFI1_IB_CFG_LWID_DG_ENB",
9542        "HFI1_IB_CFG_LWID_ENB",
9543        "HFI1_IB_CFG_LWID",
9544        "HFI1_IB_CFG_SPD_ENB",
9545        "HFI1_IB_CFG_SPD",
9546        "HFI1_IB_CFG_RXPOL_ENB",
9547        "HFI1_IB_CFG_LREV_ENB",
9548        "HFI1_IB_CFG_LINKLATENCY",
9549        "HFI1_IB_CFG_HRTBT",
9550        "HFI1_IB_CFG_OP_VLS",
9551        "HFI1_IB_CFG_VL_HIGH_CAP",
9552        "HFI1_IB_CFG_VL_LOW_CAP",
9553        "HFI1_IB_CFG_OVERRUN_THRESH",
9554        "HFI1_IB_CFG_PHYERR_THRESH",
9555        "HFI1_IB_CFG_LINKDEFAULT",
9556        "HFI1_IB_CFG_PKEYS",
9557        "HFI1_IB_CFG_MTU",
9558        "HFI1_IB_CFG_LSTATE",
9559        "HFI1_IB_CFG_VL_HIGH_LIMIT",
9560        "HFI1_IB_CFG_PMA_TICKS",
9561        "HFI1_IB_CFG_PORT"
9562};
9563
9564static const char *ib_cfg_name(int which)
9565{
9566        if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9567                return "invalid";
9568        return ib_cfg_name_strings[which];
9569}
9570
9571int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9572{
9573        struct hfi1_devdata *dd = ppd->dd;
9574        int val = 0;
9575
9576        switch (which) {
9577        case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9578                val = ppd->link_width_enabled;
9579                break;
9580        case HFI1_IB_CFG_LWID: /* currently active Link-width */
9581                val = ppd->link_width_active;
9582                break;
9583        case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9584                val = ppd->link_speed_enabled;
9585                break;
9586        case HFI1_IB_CFG_SPD: /* current Link speed */
9587                val = ppd->link_speed_active;
9588                break;
9589
9590        case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9591        case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9592        case HFI1_IB_CFG_LINKLATENCY:
9593                goto unimplemented;
9594
9595        case HFI1_IB_CFG_OP_VLS:
9596                val = ppd->vls_operational;
9597                break;
9598        case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9599                val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9600                break;
9601        case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9602                val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9603                break;
9604        case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9605                val = ppd->overrun_threshold;
9606                break;
9607        case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9608                val = ppd->phy_error_threshold;
9609                break;
9610        case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9611                val = dd->link_default;
9612                break;
9613
9614        case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9615        case HFI1_IB_CFG_PMA_TICKS:
9616        default:
9617unimplemented:
9618                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9619                        dd_dev_info(
9620                                dd,
9621                                "%s: which %s: not implemented\n",
9622                                __func__,
9623                                ib_cfg_name(which));
9624                break;
9625        }
9626
9627        return val;
9628}
9629
9630/*
9631 * The largest MAD packet size.
9632 */
9633#define MAX_MAD_PACKET 2048
9634
9635/*
9636 * Return the maximum header bytes that can go on the _wire_
9637 * for this device. This count includes the ICRC which is
9638 * not part of the packet held in memory but it is appended
9639 * by the HW.
9640 * This is dependent on the device's receive header entry size.
9641 * HFI allows this to be set per-receive context, but the
9642 * driver presently enforces a global value.
9643 */
9644u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9645{
9646        /*
9647         * The maximum non-payload (MTU) bytes in LRH.PktLen are
9648         * the Receive Header Entry Size minus the PBC (or RHF) size
9649         * plus one DW for the ICRC appended by HW.
9650         *
9651         * dd->rcd[0].rcvhdrqentsize is in DW.
9652         * We use rcd[0] as all context will have the same value. Also,
9653         * the first kernel context would have been allocated by now so
9654         * we are guaranteed a valid value.
9655         */
9656        return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9657}
9658
9659/*
9660 * Set Send Length
9661 * @ppd - per port data
9662 *
9663 * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
9664 * registers compare against LRH.PktLen, so use the max bytes included
9665 * in the LRH.
9666 *
9667 * This routine changes all VL values except VL15, which it maintains at
9668 * the same value.
9669 */
9670static void set_send_length(struct hfi1_pportdata *ppd)
9671{
9672        struct hfi1_devdata *dd = ppd->dd;
9673        u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9674        u32 maxvlmtu = dd->vld[15].mtu;
9675        u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9676                              & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9677                SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9678        int i;
9679
9680        for (i = 0; i < ppd->vls_supported; i++) {
9681                if (dd->vld[i].mtu > maxvlmtu)
9682                        maxvlmtu = dd->vld[i].mtu;
9683                if (i <= 3)
9684                        len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9685                                 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9686                                ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9687                else
9688                        len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9689                                 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9690                                ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9691        }
9692        write_csr(dd, SEND_LEN_CHECK0, len1);
9693        write_csr(dd, SEND_LEN_CHECK1, len2);
9694        /* adjust kernel credit return thresholds based on new MTUs */
9695        /* all kernel receive contexts have the same hdrqentsize */
9696        for (i = 0; i < ppd->vls_supported; i++) {
9697                sc_set_cr_threshold(dd->vld[i].sc,
9698                                    sc_mtu_to_threshold(dd->vld[i].sc,
9699                                                        dd->vld[i].mtu,
9700                                                        dd->rcd[0]->
9701                                                        rcvhdrqentsize));
9702        }
9703        sc_set_cr_threshold(dd->vld[15].sc,
9704                            sc_mtu_to_threshold(dd->vld[15].sc,
9705                                                dd->vld[15].mtu,
9706                                                dd->rcd[0]->rcvhdrqentsize));
9707
9708        /* Adjust maximum MTU for the port in DC */
9709        dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9710                (ilog2(maxvlmtu >> 8) + 1);
9711        len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9712        len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9713        len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9714                DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9715        write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9716}
9717
9718static void set_lidlmc(struct hfi1_pportdata *ppd)
9719{
9720        int i;
9721        u64 sreg = 0;
9722        struct hfi1_devdata *dd = ppd->dd;
9723        u32 mask = ~((1U << ppd->lmc) - 1);
9724        u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9725
9726        if (dd->hfi1_snoop.mode_flag)
9727                dd_dev_info(dd, "Set lid/lmc while snooping");
9728
9729        c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9730                | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9731        c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9732                        << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9733              ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9734                        << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9735        write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9736
9737        /*
9738         * Iterate over all the send contexts and set their SLID check
9739         */
9740        sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9741                        SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9742               (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9743                        SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9744
9745        for (i = 0; i < dd->chip_send_contexts; i++) {
9746                hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9747                          i, (u32)sreg);
9748                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9749        }
9750
9751        /* Now we have to do the same thing for the sdma engines */
9752        sdma_update_lmc(dd, mask, ppd->lid);
9753}
9754
9755static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9756{
9757        unsigned long timeout;
9758        u32 curr_state;
9759
9760        timeout = jiffies + msecs_to_jiffies(msecs);
9761        while (1) {
9762                curr_state = read_physical_state(dd);
9763                if (curr_state == state)
9764                        break;
9765                if (time_after(jiffies, timeout)) {
9766                        dd_dev_err(dd,
9767                                   "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9768                                   state, curr_state);
9769                        return -ETIMEDOUT;
9770                }
9771                usleep_range(1950, 2050); /* sleep 2ms-ish */
9772        }
9773
9774        return 0;
9775}
9776
9777/*
9778 * Helper for set_link_state().  Do not call except from that routine.
9779 * Expects ppd->hls_mutex to be held.
9780 *
9781 * @rem_reason value to be sent to the neighbor
9782 *
9783 * LinkDownReasons only set if transition succeeds.
9784 */
9785static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9786{
9787        struct hfi1_devdata *dd = ppd->dd;
9788        u32 pstate, previous_state;
9789        u32 last_local_state;
9790        u32 last_remote_state;
9791        int ret;
9792        int do_transition;
9793        int do_wait;
9794
9795        previous_state = ppd->host_link_state;
9796        ppd->host_link_state = HLS_GOING_OFFLINE;
9797        pstate = read_physical_state(dd);
9798        if (pstate == PLS_OFFLINE) {
9799                do_transition = 0;      /* in right state */
9800                do_wait = 0;            /* ...no need to wait */
9801        } else if ((pstate & 0xff) == PLS_OFFLINE) {
9802                do_transition = 0;      /* in an offline transient state */
9803                do_wait = 1;            /* ...wait for it to settle */
9804        } else {
9805                do_transition = 1;      /* need to move to offline */
9806                do_wait = 1;            /* ...will need to wait */
9807        }
9808
9809        if (do_transition) {
9810                ret = set_physical_link_state(dd,
9811                                              (rem_reason << 8) | PLS_OFFLINE);
9812
9813                if (ret != HCMD_SUCCESS) {
9814                        dd_dev_err(dd,
9815                                   "Failed to transition to Offline link state, return %d\n",
9816                                   ret);
9817                        return -EINVAL;
9818                }
9819                if (ppd->offline_disabled_reason ==
9820                                HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
9821                        ppd->offline_disabled_reason =
9822                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
9823        }
9824
9825        if (do_wait) {
9826                /* it can take a while for the link to go down */
9827                ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9828                if (ret < 0)
9829                        return ret;
9830        }
9831
9832        /* make sure the logical state is also down */
9833        wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9834
9835        /*
9836         * Now in charge of LCB - must be after the physical state is
9837         * offline.quiet and before host_link_state is changed.
9838         */
9839        set_host_lcb_access(dd);
9840        write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9841        ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9842
9843        if (ppd->port_type == PORT_TYPE_QSFP &&
9844            ppd->qsfp_info.limiting_active &&
9845            qsfp_mod_present(ppd)) {
9846                int ret;
9847
9848                ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9849                if (ret == 0) {
9850                        set_qsfp_tx(ppd, 0);
9851                        release_chip_resource(dd, qsfp_resource(dd));
9852                } else {
9853                        /* not fatal, but should warn */
9854                        dd_dev_err(dd,
9855                                   "Unable to acquire lock to turn off QSFP TX\n");
9856                }
9857        }
9858
9859        /*
9860         * The LNI has a mandatory wait time after the physical state
9861         * moves to Offline.Quiet.  The wait time may be different
9862         * depending on how the link went down.  The 8051 firmware
9863         * will observe the needed wait time and only move to ready
9864         * when that is completed.  The largest of the quiet timeouts
9865         * is 6s, so wait that long and then at least 0.5s more for
9866         * other transitions, and another 0.5s for a buffer.
9867         */
9868        ret = wait_fm_ready(dd, 7000);
9869        if (ret) {
9870                dd_dev_err(dd,
9871                           "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9872                /* state is really offline, so make it so */
9873                ppd->host_link_state = HLS_DN_OFFLINE;
9874                return ret;
9875        }
9876
9877        /*
9878         * The state is now offline and the 8051 is ready to accept host
9879         * requests.
9880         *      - change our state
9881         *      - notify others if we were previously in a linkup state
9882         */
9883        ppd->host_link_state = HLS_DN_OFFLINE;
9884        if (previous_state & HLS_UP) {
9885                /* went down while link was up */
9886                handle_linkup_change(dd, 0);
9887        } else if (previous_state
9888                        & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9889                /* went down while attempting link up */
9890                /* byte 1 of last_*_state is the failure reason */
9891                read_last_local_state(dd, &last_local_state);
9892                read_last_remote_state(dd, &last_remote_state);
9893                dd_dev_err(dd,
9894                           "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9895                           last_local_state, last_remote_state);
9896        }
9897
9898        /* the active link width (downgrade) is 0 on link down */
9899        ppd->link_width_active = 0;
9900        ppd->link_width_downgrade_tx_active = 0;
9901        ppd->link_width_downgrade_rx_active = 0;
9902        ppd->current_egress_rate = 0;
9903        return 0;
9904}
9905
9906/* return the link state name */
9907static const char *link_state_name(u32 state)
9908{
9909        const char *name;
9910        int n = ilog2(state);
9911        static const char * const names[] = {
9912                [__HLS_UP_INIT_BP]       = "INIT",
9913                [__HLS_UP_ARMED_BP]      = "ARMED",
9914                [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
9915                [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
9916                [__HLS_DN_POLL_BP]       = "POLL",
9917                [__HLS_DN_DISABLE_BP]    = "DISABLE",
9918                [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
9919                [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
9920                [__HLS_GOING_UP_BP]      = "GOING_UP",
9921                [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9922                [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9923        };
9924
9925        name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9926        return name ? name : "unknown";
9927}
9928
9929/* return the link state reason name */
9930static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9931{
9932        if (state == HLS_UP_INIT) {
9933                switch (ppd->linkinit_reason) {
9934                case OPA_LINKINIT_REASON_LINKUP:
9935                        return "(LINKUP)";
9936                case OPA_LINKINIT_REASON_FLAPPING:
9937                        return "(FLAPPING)";
9938                case OPA_LINKINIT_OUTSIDE_POLICY:
9939                        return "(OUTSIDE_POLICY)";
9940                case OPA_LINKINIT_QUARANTINED:
9941                        return "(QUARANTINED)";
9942                case OPA_LINKINIT_INSUFIC_CAPABILITY:
9943                        return "(INSUFIC_CAPABILITY)";
9944                default:
9945                        break;
9946                }
9947        }
9948        return "";
9949}
9950
9951/*
9952 * driver_physical_state - convert the driver's notion of a port's
9953 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9954 * Return -1 (converted to a u32) to indicate error.
9955 */
9956u32 driver_physical_state(struct hfi1_pportdata *ppd)
9957{
9958        switch (ppd->host_link_state) {
9959        case HLS_UP_INIT:
9960        case HLS_UP_ARMED:
9961        case HLS_UP_ACTIVE:
9962                return IB_PORTPHYSSTATE_LINKUP;
9963        case HLS_DN_POLL:
9964                return IB_PORTPHYSSTATE_POLLING;
9965        case HLS_DN_DISABLE:
9966                return IB_PORTPHYSSTATE_DISABLED;
9967        case HLS_DN_OFFLINE:
9968                return OPA_PORTPHYSSTATE_OFFLINE;
9969        case HLS_VERIFY_CAP:
9970                return IB_PORTPHYSSTATE_POLLING;
9971        case HLS_GOING_UP:
9972                return IB_PORTPHYSSTATE_POLLING;
9973        case HLS_GOING_OFFLINE:
9974                return OPA_PORTPHYSSTATE_OFFLINE;
9975        case HLS_LINK_COOLDOWN:
9976                return OPA_PORTPHYSSTATE_OFFLINE;
9977        case HLS_DN_DOWNDEF:
9978        default:
9979                dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9980                           ppd->host_link_state);
9981                return  -1;
9982        }
9983}
9984
9985/*
9986 * driver_logical_state - convert the driver's notion of a port's
9987 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9988 * (converted to a u32) to indicate error.
9989 */
9990u32 driver_logical_state(struct hfi1_pportdata *ppd)
9991{
9992        if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9993                return IB_PORT_DOWN;
9994
9995        switch (ppd->host_link_state & HLS_UP) {
9996        case HLS_UP_INIT:
9997                return IB_PORT_INIT;
9998        case HLS_UP_ARMED:
9999                return IB_PORT_ARMED;
10000        case HLS_UP_ACTIVE:
10001                return IB_PORT_ACTIVE;
10002        default:
10003                dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10004                           ppd->host_link_state);
10005        return -1;
10006        }
10007}
10008
10009void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10010                          u8 neigh_reason, u8 rem_reason)
10011{
10012        if (ppd->local_link_down_reason.latest == 0 &&
10013            ppd->neigh_link_down_reason.latest == 0) {
10014                ppd->local_link_down_reason.latest = lcl_reason;
10015                ppd->neigh_link_down_reason.latest = neigh_reason;
10016                ppd->remote_link_down_reason = rem_reason;
10017        }
10018}
10019
10020/*
10021 * Change the physical and/or logical link state.
10022 *
10023 * Do not call this routine while inside an interrupt.  It contains
10024 * calls to routines that can take multiple seconds to finish.
10025 *
10026 * Returns 0 on success, -errno on failure.
10027 */
10028int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10029{
10030        struct hfi1_devdata *dd = ppd->dd;
10031        struct ib_event event = {.device = NULL};
10032        int ret1, ret = 0;
10033        int was_up, is_down;
10034        int orig_new_state, poll_bounce;
10035
10036        mutex_lock(&ppd->hls_lock);
10037
10038        orig_new_state = state;
10039        if (state == HLS_DN_DOWNDEF)
10040                state = dd->link_default;
10041
10042        /* interpret poll -> poll as a link bounce */
10043        poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10044                      state == HLS_DN_POLL;
10045
10046        dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10047                    link_state_name(ppd->host_link_state),
10048                    link_state_name(orig_new_state),
10049                    poll_bounce ? "(bounce) " : "",
10050                    link_state_reason_name(ppd, state));
10051
10052        was_up = !!(ppd->host_link_state & HLS_UP);
10053
10054        /*
10055         * If we're going to a (HLS_*) link state that implies the logical
10056         * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10057         * reset is_sm_config_started to 0.
10058         */
10059        if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10060                ppd->is_sm_config_started = 0;
10061
10062        /*
10063         * Do nothing if the states match.  Let a poll to poll link bounce
10064         * go through.
10065         */
10066        if (ppd->host_link_state == state && !poll_bounce)
10067                goto done;
10068
10069        switch (state) {
10070        case HLS_UP_INIT:
10071                if (ppd->host_link_state == HLS_DN_POLL &&
10072                    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10073                        /*
10074                         * Quick link up jumps from polling to here.
10075                         *
10076                         * Whether in normal or loopback mode, the
10077                         * simulator jumps from polling to link up.
10078                         * Accept that here.
10079                         */
10080                        /* OK */
10081                } else if (ppd->host_link_state != HLS_GOING_UP) {
10082                        goto unexpected;
10083                }
10084
10085                ppd->host_link_state = HLS_UP_INIT;
10086                ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10087                if (ret) {
10088                        /* logical state didn't change, stay at going_up */
10089                        ppd->host_link_state = HLS_GOING_UP;
10090                        dd_dev_err(dd,
10091                                   "%s: logical state did not change to INIT\n",
10092                                   __func__);
10093                } else {
10094                        /* clear old transient LINKINIT_REASON code */
10095                        if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10096                                ppd->linkinit_reason =
10097                                        OPA_LINKINIT_REASON_LINKUP;
10098
10099                        /* enable the port */
10100                        add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10101
10102                        handle_linkup_change(dd, 1);
10103                }
10104                break;
10105        case HLS_UP_ARMED:
10106                if (ppd->host_link_state != HLS_UP_INIT)
10107                        goto unexpected;
10108
10109                ppd->host_link_state = HLS_UP_ARMED;
10110                set_logical_state(dd, LSTATE_ARMED);
10111                ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10112                if (ret) {
10113                        /* logical state didn't change, stay at init */
10114                        ppd->host_link_state = HLS_UP_INIT;
10115                        dd_dev_err(dd,
10116                                   "%s: logical state did not change to ARMED\n",
10117                                   __func__);
10118                }
10119                /*
10120                 * The simulator does not currently implement SMA messages,
10121                 * so neighbor_normal is not set.  Set it here when we first
10122                 * move to Armed.
10123                 */
10124                if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10125                        ppd->neighbor_normal = 1;
10126                break;
10127        case HLS_UP_ACTIVE:
10128                if (ppd->host_link_state != HLS_UP_ARMED)
10129                        goto unexpected;
10130
10131                ppd->host_link_state = HLS_UP_ACTIVE;
10132                set_logical_state(dd, LSTATE_ACTIVE);
10133                ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10134                if (ret) {
10135                        /* logical state didn't change, stay at armed */
10136                        ppd->host_link_state = HLS_UP_ARMED;
10137                        dd_dev_err(dd,
10138                                   "%s: logical state did not change to ACTIVE\n",
10139                                   __func__);
10140                } else {
10141                        /* tell all engines to go running */
10142                        sdma_all_running(dd);
10143
10144                        /* Signal the IB layer that the port has went active */
10145                        event.device = &dd->verbs_dev.rdi.ibdev;
10146                        event.element.port_num = ppd->port;
10147                        event.event = IB_EVENT_PORT_ACTIVE;
10148                }
10149                break;
10150        case HLS_DN_POLL:
10151                if ((ppd->host_link_state == HLS_DN_DISABLE ||
10152                     ppd->host_link_state == HLS_DN_OFFLINE) &&
10153                    dd->dc_shutdown)
10154                        dc_start(dd);
10155                /* Hand LED control to the DC */
10156                write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10157
10158                if (ppd->host_link_state != HLS_DN_OFFLINE) {
10159                        u8 tmp = ppd->link_enabled;
10160
10161                        ret = goto_offline(ppd, ppd->remote_link_down_reason);
10162                        if (ret) {
10163                                ppd->link_enabled = tmp;
10164                                break;
10165                        }
10166                        ppd->remote_link_down_reason = 0;
10167
10168                        if (ppd->driver_link_ready)
10169                                ppd->link_enabled = 1;
10170                }
10171
10172                set_all_slowpath(ppd->dd);
10173                ret = set_local_link_attributes(ppd);
10174                if (ret)
10175                        break;
10176
10177                ppd->port_error_action = 0;
10178                ppd->host_link_state = HLS_DN_POLL;
10179
10180                if (quick_linkup) {
10181                        /* quick linkup does not go into polling */
10182                        ret = do_quick_linkup(dd);
10183                } else {
10184                        ret1 = set_physical_link_state(dd, PLS_POLLING);
10185                        if (ret1 != HCMD_SUCCESS) {
10186                                dd_dev_err(dd,
10187                                           "Failed to transition to Polling link state, return 0x%x\n",
10188                                           ret1);
10189                                ret = -EINVAL;
10190                        }
10191                }
10192                ppd->offline_disabled_reason =
10193                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10194                /*
10195                 * If an error occurred above, go back to offline.  The
10196                 * caller may reschedule another attempt.
10197                 */
10198                if (ret)
10199                        goto_offline(ppd, 0);
10200                break;
10201        case HLS_DN_DISABLE:
10202                /* link is disabled */
10203                ppd->link_enabled = 0;
10204
10205                /* allow any state to transition to disabled */
10206
10207                /* must transition to offline first */
10208                if (ppd->host_link_state != HLS_DN_OFFLINE) {
10209                        ret = goto_offline(ppd, ppd->remote_link_down_reason);
10210                        if (ret)
10211                                break;
10212                        ppd->remote_link_down_reason = 0;
10213                }
10214
10215                ret1 = set_physical_link_state(dd, PLS_DISABLED);
10216                if (ret1 != HCMD_SUCCESS) {
10217                        dd_dev_err(dd,
10218                                   "Failed to transition to Disabled link state, return 0x%x\n",
10219                                   ret1);
10220                        ret = -EINVAL;
10221                        break;
10222                }
10223                ppd->host_link_state = HLS_DN_DISABLE;
10224                dc_shutdown(dd);
10225                break;
10226        case HLS_DN_OFFLINE:
10227                if (ppd->host_link_state == HLS_DN_DISABLE)
10228                        dc_start(dd);
10229
10230                /* allow any state to transition to offline */
10231                ret = goto_offline(ppd, ppd->remote_link_down_reason);
10232                if (!ret)
10233                        ppd->remote_link_down_reason = 0;
10234                break;
10235        case HLS_VERIFY_CAP:
10236                if (ppd->host_link_state != HLS_DN_POLL)
10237                        goto unexpected;
10238                ppd->host_link_state = HLS_VERIFY_CAP;
10239                break;
10240        case HLS_GOING_UP:
10241                if (ppd->host_link_state != HLS_VERIFY_CAP)
10242                        goto unexpected;
10243
10244                ret1 = set_physical_link_state(dd, PLS_LINKUP);
10245                if (ret1 != HCMD_SUCCESS) {
10246                        dd_dev_err(dd,
10247                                   "Failed to transition to link up state, return 0x%x\n",
10248                                   ret1);
10249                        ret = -EINVAL;
10250                        break;
10251                }
10252                ppd->host_link_state = HLS_GOING_UP;
10253                break;
10254
10255        case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10256        case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10257        default:
10258                dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10259                            __func__, state);
10260                ret = -EINVAL;
10261                break;
10262        }
10263
10264        is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10265                        HLS_DN_DISABLE | HLS_DN_OFFLINE));
10266
10267        if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10268            ppd->neigh_link_down_reason.sma == 0) {
10269                ppd->local_link_down_reason.sma =
10270                  ppd->local_link_down_reason.latest;
10271                ppd->neigh_link_down_reason.sma =
10272                  ppd->neigh_link_down_reason.latest;
10273        }
10274
10275        goto done;
10276
10277unexpected:
10278        dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10279                   __func__, link_state_name(ppd->host_link_state),
10280                   link_state_name(state));
10281        ret = -EINVAL;
10282
10283done:
10284        mutex_unlock(&ppd->hls_lock);
10285
10286        if (event.device)
10287                ib_dispatch_event(&event);
10288
10289        return ret;
10290}
10291
10292int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10293{
10294        u64 reg;
10295        int ret = 0;
10296
10297        switch (which) {
10298        case HFI1_IB_CFG_LIDLMC:
10299                set_lidlmc(ppd);
10300                break;
10301        case HFI1_IB_CFG_VL_HIGH_LIMIT:
10302                /*
10303                 * The VL Arbitrator high limit is sent in units of 4k
10304                 * bytes, while HFI stores it in units of 64 bytes.
10305                 */
10306                val *= 4096 / 64;
10307                reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10308                        << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10309                write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10310                break;
10311        case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10312                /* HFI only supports POLL as the default link down state */
10313                if (val != HLS_DN_POLL)
10314                        ret = -EINVAL;
10315                break;
10316        case HFI1_IB_CFG_OP_VLS:
10317                if (ppd->vls_operational != val) {
10318                        ppd->vls_operational = val;
10319                        if (!ppd->port)
10320                                ret = -EINVAL;
10321                }
10322                break;
10323        /*
10324         * For link width, link width downgrade, and speed enable, always AND
10325         * the setting with what is actually supported.  This has two benefits.
10326         * First, enabled can't have unsupported values, no matter what the
10327         * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10328         * "fill in with your supported value" have all the bits in the
10329         * field set, so simply ANDing with supported has the desired result.
10330         */
10331        case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10332                ppd->link_width_enabled = val & ppd->link_width_supported;
10333                break;
10334        case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10335                ppd->link_width_downgrade_enabled =
10336                                val & ppd->link_width_downgrade_supported;
10337                break;
10338        case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10339                ppd->link_speed_enabled = val & ppd->link_speed_supported;
10340                break;
10341        case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10342                /*
10343                 * HFI does not follow IB specs, save this value
10344                 * so we can report it, if asked.
10345                 */
10346                ppd->overrun_threshold = val;
10347                break;
10348        case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10349                /*
10350                 * HFI does not follow IB specs, save this value
10351                 * so we can report it, if asked.
10352                 */
10353                ppd->phy_error_threshold = val;
10354                break;
10355
10356        case HFI1_IB_CFG_MTU:
10357                set_send_length(ppd);
10358                break;
10359
10360        case HFI1_IB_CFG_PKEYS:
10361                if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10362                        set_partition_keys(ppd);
10363                break;
10364
10365        default:
10366                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10367                        dd_dev_info(ppd->dd,
10368                                    "%s: which %s, val 0x%x: not implemented\n",
10369                                    __func__, ib_cfg_name(which), val);
10370                break;
10371        }
10372        return ret;
10373}
10374
10375/* begin functions related to vl arbitration table caching */
10376static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10377{
10378        int i;
10379
10380        BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10381                        VL_ARB_LOW_PRIO_TABLE_SIZE);
10382        BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10383                        VL_ARB_HIGH_PRIO_TABLE_SIZE);
10384
10385        /*
10386         * Note that we always return values directly from the
10387         * 'vl_arb_cache' (and do no CSR reads) in response to a
10388         * 'Get(VLArbTable)'. This is obviously correct after a
10389         * 'Set(VLArbTable)', since the cache will then be up to
10390         * date. But it's also correct prior to any 'Set(VLArbTable)'
10391         * since then both the cache, and the relevant h/w registers
10392         * will be zeroed.
10393         */
10394
10395        for (i = 0; i < MAX_PRIO_TABLE; i++)
10396                spin_lock_init(&ppd->vl_arb_cache[i].lock);
10397}
10398
10399/*
10400 * vl_arb_lock_cache
10401 *
10402 * All other vl_arb_* functions should be called only after locking
10403 * the cache.
10404 */
10405static inline struct vl_arb_cache *
10406vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10407{
10408        if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10409                return NULL;
10410        spin_lock(&ppd->vl_arb_cache[idx].lock);
10411        return &ppd->vl_arb_cache[idx];
10412}
10413
10414static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10415{
10416        spin_unlock(&ppd->vl_arb_cache[idx].lock);
10417}
10418
10419static void vl_arb_get_cache(struct vl_arb_cache *cache,
10420                             struct ib_vl_weight_elem *vl)
10421{
10422        memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10423}
10424
10425static void vl_arb_set_cache(struct vl_arb_cache *cache,
10426                             struct ib_vl_weight_elem *vl)
10427{
10428        memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10429}
10430
10431static int vl_arb_match_cache(struct vl_arb_cache *cache,
10432                              struct ib_vl_weight_elem *vl)
10433{
10434        return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10435}
10436
10437/* end functions related to vl arbitration table caching */
10438
10439static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10440                          u32 size, struct ib_vl_weight_elem *vl)
10441{
10442        struct hfi1_devdata *dd = ppd->dd;
10443        u64 reg;
10444        unsigned int i, is_up = 0;
10445        int drain, ret = 0;
10446
10447        mutex_lock(&ppd->hls_lock);
10448
10449        if (ppd->host_link_state & HLS_UP)
10450                is_up = 1;
10451
10452        drain = !is_ax(dd) && is_up;
10453
10454        if (drain)
10455                /*
10456                 * Before adjusting VL arbitration weights, empty per-VL
10457                 * FIFOs, otherwise a packet whose VL weight is being
10458                 * set to 0 could get stuck in a FIFO with no chance to
10459                 * egress.
10460                 */
10461                ret = stop_drain_data_vls(dd);
10462
10463        if (ret) {
10464                dd_dev_err(
10465                        dd,
10466                        "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10467                        __func__);
10468                goto err;
10469        }
10470
10471        for (i = 0; i < size; i++, vl++) {
10472                /*
10473                 * NOTE: The low priority shift and mask are used here, but
10474                 * they are the same for both the low and high registers.
10475                 */
10476                reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10477                                << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10478                      | (((u64)vl->weight
10479                                & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10480                                << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10481                write_csr(dd, target + (i * 8), reg);
10482        }
10483        pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10484
10485        if (drain)
10486                open_fill_data_vls(dd); /* reopen all VLs */
10487
10488err:
10489        mutex_unlock(&ppd->hls_lock);
10490
10491        return ret;
10492}
10493
10494/*
10495 * Read one credit merge VL register.
10496 */
10497static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10498                           struct vl_limit *vll)
10499{
10500        u64 reg = read_csr(dd, csr);
10501
10502        vll->dedicated = cpu_to_be16(
10503                (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10504                & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10505        vll->shared = cpu_to_be16(
10506                (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10507                & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10508}
10509
10510/*
10511 * Read the current credit merge limits.
10512 */
10513static int get_buffer_control(struct hfi1_devdata *dd,
10514                              struct buffer_control *bc, u16 *overall_limit)
10515{
10516        u64 reg;
10517        int i;
10518
10519        /* not all entries are filled in */
10520        memset(bc, 0, sizeof(*bc));
10521
10522        /* OPA and HFI have a 1-1 mapping */
10523        for (i = 0; i < TXE_NUM_DATA_VL; i++)
10524                read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10525
10526        /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10527        read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10528
10529        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10530        bc->overall_shared_limit = cpu_to_be16(
10531                (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10532                & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10533        if (overall_limit)
10534                *overall_limit = (reg
10535                        >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10536                        & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10537        return sizeof(struct buffer_control);
10538}
10539
10540static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10541{
10542        u64 reg;
10543        int i;
10544
10545        /* each register contains 16 SC->VLnt mappings, 4 bits each */
10546        reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10547        for (i = 0; i < sizeof(u64); i++) {
10548                u8 byte = *(((u8 *)&reg) + i);
10549
10550                dp->vlnt[2 * i] = byte & 0xf;
10551                dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10552        }
10553
10554        reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10555        for (i = 0; i < sizeof(u64); i++) {
10556                u8 byte = *(((u8 *)&reg) + i);
10557
10558                dp->vlnt[16 + (2 * i)] = byte & 0xf;
10559                dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10560        }
10561        return sizeof(struct sc2vlnt);
10562}
10563
10564static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10565                              struct ib_vl_weight_elem *vl)
10566{
10567        unsigned int i;
10568
10569        for (i = 0; i < nelems; i++, vl++) {
10570                vl->vl = 0xf;
10571                vl->weight = 0;
10572        }
10573}
10574
10575static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10576{
10577        write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10578                  DC_SC_VL_VAL(15_0,
10579                               0, dp->vlnt[0] & 0xf,
10580                               1, dp->vlnt[1] & 0xf,
10581                               2, dp->vlnt[2] & 0xf,
10582                               3, dp->vlnt[3] & 0xf,
10583                               4, dp->vlnt[4] & 0xf,
10584                               5, dp->vlnt[5] & 0xf,
10585                               6, dp->vlnt[6] & 0xf,
10586                               7, dp->vlnt[7] & 0xf,
10587                               8, dp->vlnt[8] & 0xf,
10588                               9, dp->vlnt[9] & 0xf,
10589                               10, dp->vlnt[10] & 0xf,
10590                               11, dp->vlnt[11] & 0xf,
10591                               12, dp->vlnt[12] & 0xf,
10592                               13, dp->vlnt[13] & 0xf,
10593                               14, dp->vlnt[14] & 0xf,
10594                               15, dp->vlnt[15] & 0xf));
10595        write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10596                  DC_SC_VL_VAL(31_16,
10597                               16, dp->vlnt[16] & 0xf,
10598                               17, dp->vlnt[17] & 0xf,
10599                               18, dp->vlnt[18] & 0xf,
10600                               19, dp->vlnt[19] & 0xf,
10601                               20, dp->vlnt[20] & 0xf,
10602                               21, dp->vlnt[21] & 0xf,
10603                               22, dp->vlnt[22] & 0xf,
10604                               23, dp->vlnt[23] & 0xf,
10605                               24, dp->vlnt[24] & 0xf,
10606                               25, dp->vlnt[25] & 0xf,
10607                               26, dp->vlnt[26] & 0xf,
10608                               27, dp->vlnt[27] & 0xf,
10609                               28, dp->vlnt[28] & 0xf,
10610                               29, dp->vlnt[29] & 0xf,
10611                               30, dp->vlnt[30] & 0xf,
10612                               31, dp->vlnt[31] & 0xf));
10613}
10614
10615static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10616                        u16 limit)
10617{
10618        if (limit != 0)
10619                dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10620                            what, (int)limit, idx);
10621}
10622
10623/* change only the shared limit portion of SendCmGLobalCredit */
10624static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10625{
10626        u64 reg;
10627
10628        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10629        reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10630        reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10631        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10632}
10633
10634/* change only the total credit limit portion of SendCmGLobalCredit */
10635static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10636{
10637        u64 reg;
10638
10639        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10640        reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10641        reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10642        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10643}
10644
10645/* set the given per-VL shared limit */
10646static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10647{
10648        u64 reg;
10649        u32 addr;
10650
10651        if (vl < TXE_NUM_DATA_VL)
10652                addr = SEND_CM_CREDIT_VL + (8 * vl);
10653        else
10654                addr = SEND_CM_CREDIT_VL15;
10655
10656        reg = read_csr(dd, addr);
10657        reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10658        reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10659        write_csr(dd, addr, reg);
10660}
10661
10662/* set the given per-VL dedicated limit */
10663static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10664{
10665        u64 reg;
10666        u32 addr;
10667
10668        if (vl < TXE_NUM_DATA_VL)
10669                addr = SEND_CM_CREDIT_VL + (8 * vl);
10670        else
10671                addr = SEND_CM_CREDIT_VL15;
10672
10673        reg = read_csr(dd, addr);
10674        reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10675        reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10676        write_csr(dd, addr, reg);
10677}
10678
10679/* spin until the given per-VL status mask bits clear */
10680static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10681                                     const char *which)
10682{
10683        unsigned long timeout;
10684        u64 reg;
10685
10686        timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10687        while (1) {
10688                reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10689
10690                if (reg == 0)
10691                        return; /* success */
10692                if (time_after(jiffies, timeout))
10693                        break;          /* timed out */
10694                udelay(1);
10695        }
10696
10697        dd_dev_err(dd,
10698                   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10699                   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10700        /*
10701         * If this occurs, it is likely there was a credit loss on the link.
10702         * The only recovery from that is a link bounce.
10703         */
10704        dd_dev_err(dd,
10705                   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
10706}
10707
10708/*
10709 * The number of credits on the VLs may be changed while everything
10710 * is "live", but the following algorithm must be followed due to
10711 * how the hardware is actually implemented.  In particular,
10712 * Return_Credit_Status[] is the only correct status check.
10713 *
10714 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10715 *     set Global_Shared_Credit_Limit = 0
10716 *     use_all_vl = 1
10717 * mask0 = all VLs that are changing either dedicated or shared limits
10718 * set Shared_Limit[mask0] = 0
10719 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10720 * if (changing any dedicated limit)
10721 *     mask1 = all VLs that are lowering dedicated limits
10722 *     lower Dedicated_Limit[mask1]
10723 *     spin until Return_Credit_Status[mask1] == 0
10724 *     raise Dedicated_Limits
10725 * raise Shared_Limits
10726 * raise Global_Shared_Credit_Limit
10727 *
10728 * lower = if the new limit is lower, set the limit to the new value
10729 * raise = if the new limit is higher than the current value (may be changed
10730 *      earlier in the algorithm), set the new limit to the new value
10731 */
10732int set_buffer_control(struct hfi1_pportdata *ppd,
10733                       struct buffer_control *new_bc)
10734{
10735        struct hfi1_devdata *dd = ppd->dd;
10736        u64 changing_mask, ld_mask, stat_mask;
10737        int change_count;
10738        int i, use_all_mask;
10739        int this_shared_changing;
10740        int vl_count = 0, ret;
10741        /*
10742         * A0: add the variable any_shared_limit_changing below and in the
10743         * algorithm above.  If removing A0 support, it can be removed.
10744         */
10745        int any_shared_limit_changing;
10746        struct buffer_control cur_bc;
10747        u8 changing[OPA_MAX_VLS];
10748        u8 lowering_dedicated[OPA_MAX_VLS];
10749        u16 cur_total;
10750        u32 new_total = 0;
10751        const u64 all_mask =
10752        SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10753         | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10754         | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10755         | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10756         | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10757         | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10758         | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10759         | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10760         | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10761
10762#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10763#define NUM_USABLE_VLS 16       /* look at VL15 and less */
10764
10765        /* find the new total credits, do sanity check on unused VLs */
10766        for (i = 0; i < OPA_MAX_VLS; i++) {
10767                if (valid_vl(i)) {
10768                        new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10769                        continue;
10770                }
10771                nonzero_msg(dd, i, "dedicated",
10772                            be16_to_cpu(new_bc->vl[i].dedicated));
10773                nonzero_msg(dd, i, "shared",
10774                            be16_to_cpu(new_bc->vl[i].shared));
10775                new_bc->vl[i].dedicated = 0;
10776                new_bc->vl[i].shared = 0;
10777        }
10778        new_total += be16_to_cpu(new_bc->overall_shared_limit);
10779
10780        /* fetch the current values */
10781        get_buffer_control(dd, &cur_bc, &cur_total);
10782
10783        /*
10784         * Create the masks we will use.
10785         */
10786        memset(changing, 0, sizeof(changing));
10787        memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10788        /*
10789         * NOTE: Assumes that the individual VL bits are adjacent and in
10790         * increasing order
10791         */
10792        stat_mask =
10793                SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10794        changing_mask = 0;
10795        ld_mask = 0;
10796        change_count = 0;
10797        any_shared_limit_changing = 0;
10798        for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10799                if (!valid_vl(i))
10800                        continue;
10801                this_shared_changing = new_bc->vl[i].shared
10802                                                != cur_bc.vl[i].shared;
10803                if (this_shared_changing)
10804                        any_shared_limit_changing = 1;
10805                if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10806                    this_shared_changing) {
10807                        changing[i] = 1;
10808                        changing_mask |= stat_mask;
10809                        change_count++;
10810                }
10811                if (be16_to_cpu(new_bc->vl[i].dedicated) <
10812                                        be16_to_cpu(cur_bc.vl[i].dedicated)) {
10813                        lowering_dedicated[i] = 1;
10814                        ld_mask |= stat_mask;
10815                }
10816        }
10817
10818        /* bracket the credit change with a total adjustment */
10819        if (new_total > cur_total)
10820                set_global_limit(dd, new_total);
10821
10822        /*
10823         * Start the credit change algorithm.
10824         */
10825        use_all_mask = 0;
10826        if ((be16_to_cpu(new_bc->overall_shared_limit) <
10827             be16_to_cpu(cur_bc.overall_shared_limit)) ||
10828            (is_ax(dd) && any_shared_limit_changing)) {
10829                set_global_shared(dd, 0);
10830                cur_bc.overall_shared_limit = 0;
10831                use_all_mask = 1;
10832        }
10833
10834        for (i = 0; i < NUM_USABLE_VLS; i++) {
10835                if (!valid_vl(i))
10836                        continue;
10837
10838                if (changing[i]) {
10839                        set_vl_shared(dd, i, 0);
10840                        cur_bc.vl[i].shared = 0;
10841                }
10842        }
10843
10844        wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10845                                 "shared");
10846
10847        if (change_count > 0) {
10848                for (i = 0; i < NUM_USABLE_VLS; i++) {
10849                        if (!valid_vl(i))
10850                                continue;
10851
10852                        if (lowering_dedicated[i]) {
10853                                set_vl_dedicated(dd, i,
10854                                                 be16_to_cpu(new_bc->
10855                                                             vl[i].dedicated));
10856                                cur_bc.vl[i].dedicated =
10857                                                new_bc->vl[i].dedicated;
10858                        }
10859                }
10860
10861                wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10862
10863                /* now raise all dedicated that are going up */
10864                for (i = 0; i < NUM_USABLE_VLS; i++) {
10865                        if (!valid_vl(i))
10866                                continue;
10867
10868                        if (be16_to_cpu(new_bc->vl[i].dedicated) >
10869                                        be16_to_cpu(cur_bc.vl[i].dedicated))
10870                                set_vl_dedicated(dd, i,
10871                                                 be16_to_cpu(new_bc->
10872                                                             vl[i].dedicated));
10873                }
10874        }
10875
10876        /* next raise all shared that are going up */
10877        for (i = 0; i < NUM_USABLE_VLS; i++) {
10878                if (!valid_vl(i))
10879                        continue;
10880
10881                if (be16_to_cpu(new_bc->vl[i].shared) >
10882                                be16_to_cpu(cur_bc.vl[i].shared))
10883                        set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10884        }
10885
10886        /* finally raise the global shared */
10887        if (be16_to_cpu(new_bc->overall_shared_limit) >
10888            be16_to_cpu(cur_bc.overall_shared_limit))
10889                set_global_shared(dd,
10890                                  be16_to_cpu(new_bc->overall_shared_limit));
10891
10892        /* bracket the credit change with a total adjustment */
10893        if (new_total < cur_total)
10894                set_global_limit(dd, new_total);
10895
10896        /*
10897         * Determine the actual number of operational VLS using the number of
10898         * dedicated and shared credits for each VL.
10899         */
10900        if (change_count > 0) {
10901                for (i = 0; i < TXE_NUM_DATA_VL; i++)
10902                        if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10903                            be16_to_cpu(new_bc->vl[i].shared) > 0)
10904                                vl_count++;
10905                ppd->actual_vls_operational = vl_count;
10906                ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10907                                    ppd->actual_vls_operational :
10908                                    ppd->vls_operational,
10909                                    NULL);
10910                if (ret == 0)
10911                        ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10912                                           ppd->actual_vls_operational :
10913                                           ppd->vls_operational, NULL);
10914                if (ret)
10915                        return ret;
10916        }
10917        return 0;
10918}
10919
10920/*
10921 * Read the given fabric manager table. Return the size of the
10922 * table (in bytes) on success, and a negative error code on
10923 * failure.
10924 */
10925int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10926
10927{
10928        int size;
10929        struct vl_arb_cache *vlc;
10930
10931        switch (which) {
10932        case FM_TBL_VL_HIGH_ARB:
10933                size = 256;
10934                /*
10935                 * OPA specifies 128 elements (of 2 bytes each), though
10936                 * HFI supports only 16 elements in h/w.
10937                 */
10938                vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10939                vl_arb_get_cache(vlc, t);
10940                vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10941                break;
10942        case FM_TBL_VL_LOW_ARB:
10943                size = 256;
10944                /*
10945                 * OPA specifies 128 elements (of 2 bytes each), though
10946                 * HFI supports only 16 elements in h/w.
10947                 */
10948                vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10949                vl_arb_get_cache(vlc, t);
10950                vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10951                break;
10952        case FM_TBL_BUFFER_CONTROL:
10953                size = get_buffer_control(ppd->dd, t, NULL);
10954                break;
10955        case FM_TBL_SC2VLNT:
10956                size = get_sc2vlnt(ppd->dd, t);
10957                break;
10958        case FM_TBL_VL_PREEMPT_ELEMS:
10959                size = 256;
10960                /* OPA specifies 128 elements, of 2 bytes each */
10961                get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10962                break;
10963        case FM_TBL_VL_PREEMPT_MATRIX:
10964                size = 256;
10965                /*
10966                 * OPA specifies that this is the same size as the VL
10967                 * arbitration tables (i.e., 256 bytes).
10968                 */
10969                break;
10970        default:
10971                return -EINVAL;
10972        }
10973        return size;
10974}
10975
10976/*
10977 * Write the given fabric manager table.
10978 */
10979int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10980{
10981        int ret = 0;
10982        struct vl_arb_cache *vlc;
10983
10984        switch (which) {
10985        case FM_TBL_VL_HIGH_ARB:
10986                vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10987                if (vl_arb_match_cache(vlc, t)) {
10988                        vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10989                        break;
10990                }
10991                vl_arb_set_cache(vlc, t);
10992                vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10993                ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10994                                     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10995                break;
10996        case FM_TBL_VL_LOW_ARB:
10997                vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10998                if (vl_arb_match_cache(vlc, t)) {
10999                        vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11000                        break;
11001                }
11002                vl_arb_set_cache(vlc, t);
11003                vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11004                ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11005                                     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11006                break;
11007        case FM_TBL_BUFFER_CONTROL:
11008                ret = set_buffer_control(ppd, t);
11009                break;
11010        case FM_TBL_SC2VLNT:
11011                set_sc2vlnt(ppd->dd, t);
11012                break;
11013        default:
11014                ret = -EINVAL;
11015        }
11016        return ret;
11017}
11018
11019/*
11020 * Disable all data VLs.
11021 *
11022 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11023 */
11024static int disable_data_vls(struct hfi1_devdata *dd)
11025{
11026        if (is_ax(dd))
11027                return 1;
11028
11029        pio_send_control(dd, PSC_DATA_VL_DISABLE);
11030
11031        return 0;
11032}
11033
11034/*
11035 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11036 * Just re-enables all data VLs (the "fill" part happens
11037 * automatically - the name was chosen for symmetry with
11038 * stop_drain_data_vls()).
11039 *
11040 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11041 */
11042int open_fill_data_vls(struct hfi1_devdata *dd)
11043{
11044        if (is_ax(dd))
11045                return 1;
11046
11047        pio_send_control(dd, PSC_DATA_VL_ENABLE);
11048
11049        return 0;
11050}
11051
11052/*
11053 * drain_data_vls() - assumes that disable_data_vls() has been called,
11054 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11055 * engines to drop to 0.
11056 */
11057static void drain_data_vls(struct hfi1_devdata *dd)
11058{
11059        sc_wait(dd);
11060        sdma_wait(dd);
11061        pause_for_credit_return(dd);
11062}
11063
11064/*
11065 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11066 *
11067 * Use open_fill_data_vls() to resume using data VLs.  This pair is
11068 * meant to be used like this:
11069 *
11070 * stop_drain_data_vls(dd);
11071 * // do things with per-VL resources
11072 * open_fill_data_vls(dd);
11073 */
11074int stop_drain_data_vls(struct hfi1_devdata *dd)
11075{
11076        int ret;
11077
11078        ret = disable_data_vls(dd);
11079        if (ret == 0)
11080                drain_data_vls(dd);
11081
11082        return ret;
11083}
11084
11085/*
11086 * Convert a nanosecond time to a cclock count.  No matter how slow
11087 * the cclock, a non-zero ns will always have a non-zero result.
11088 */
11089u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11090{
11091        u32 cclocks;
11092
11093        if (dd->icode == ICODE_FPGA_EMULATION)
11094                cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11095        else  /* simulation pretends to be ASIC */
11096                cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11097        if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11098                cclocks = 1;
11099        return cclocks;
11100}
11101
11102/*
11103 * Convert a cclock count to nanoseconds. Not matter how slow
11104 * the cclock, a non-zero cclocks will always have a non-zero result.
11105 */
11106u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11107{
11108        u32 ns;
11109
11110        if (dd->icode == ICODE_FPGA_EMULATION)
11111                ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11112        else  /* simulation pretends to be ASIC */
11113                ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11114        if (cclocks && !ns)
11115                ns = 1;
11116        return ns;
11117}
11118
11119/*
11120 * Dynamically adjust the receive interrupt timeout for a context based on
11121 * incoming packet rate.
11122 *
11123 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11124 */
11125static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11126{
11127        struct hfi1_devdata *dd = rcd->dd;
11128        u32 timeout = rcd->rcvavail_timeout;
11129
11130        /*
11131         * This algorithm doubles or halves the timeout depending on whether
11132         * the number of packets received in this interrupt were less than or
11133         * greater equal the interrupt count.
11134         *
11135         * The calculations below do not allow a steady state to be achieved.
11136         * Only at the endpoints it is possible to have an unchanging
11137         * timeout.
11138         */
11139        if (npkts < rcv_intr_count) {
11140                /*
11141                 * Not enough packets arrived before the timeout, adjust
11142                 * timeout downward.
11143                 */
11144                if (timeout < 2) /* already at minimum? */
11145                        return;
11146                timeout >>= 1;
11147        } else {
11148                /*
11149                 * More than enough packets arrived before the timeout, adjust
11150                 * timeout upward.
11151                 */
11152                if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11153                        return;
11154                timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11155        }
11156
11157        rcd->rcvavail_timeout = timeout;
11158        /*
11159         * timeout cannot be larger than rcv_intr_timeout_csr which has already
11160         * been verified to be in range
11161         */
11162        write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11163                        (u64)timeout <<
11164                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11165}
11166
11167void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11168                    u32 intr_adjust, u32 npkts)
11169{
11170        struct hfi1_devdata *dd = rcd->dd;
11171        u64 reg;
11172        u32 ctxt = rcd->ctxt;
11173
11174        /*
11175         * Need to write timeout register before updating RcvHdrHead to ensure
11176         * that a new value is used when the HW decides to restart counting.
11177         */
11178        if (intr_adjust)
11179                adjust_rcv_timeout(rcd, npkts);
11180        if (updegr) {
11181                reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11182                        << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11183                write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11184        }
11185        mmiowb();
11186        reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11187                (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11188                        << RCV_HDR_HEAD_HEAD_SHIFT);
11189        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11190        mmiowb();
11191}
11192
11193u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11194{
11195        u32 head, tail;
11196
11197        head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11198                & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11199
11200        if (rcd->rcvhdrtail_kvaddr)
11201                tail = get_rcvhdrtail(rcd);
11202        else
11203                tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11204
11205        return head == tail;
11206}
11207
11208/*
11209 * Context Control and Receive Array encoding for buffer size:
11210 *      0x0 invalid
11211 *      0x1   4 KB
11212 *      0x2   8 KB
11213 *      0x3  16 KB
11214 *      0x4  32 KB
11215 *      0x5  64 KB
11216 *      0x6 128 KB
11217 *      0x7 256 KB
11218 *      0x8 512 KB (Receive Array only)
11219 *      0x9   1 MB (Receive Array only)
11220 *      0xa   2 MB (Receive Array only)
11221 *
11222 *      0xB-0xF - reserved (Receive Array only)
11223 *
11224 *
11225 * This routine assumes that the value has already been sanity checked.
11226 */
11227static u32 encoded_size(u32 size)
11228{
11229        switch (size) {
11230        case   4 * 1024: return 0x1;
11231        case   8 * 1024: return 0x2;
11232        case  16 * 1024: return 0x3;
11233        case  32 * 1024: return 0x4;
11234        case  64 * 1024: return 0x5;
11235        case 128 * 1024: return 0x6;
11236        case 256 * 1024: return 0x7;
11237        case 512 * 1024: return 0x8;
11238        case   1 * 1024 * 1024: return 0x9;
11239        case   2 * 1024 * 1024: return 0xa;
11240        }
11241        return 0x1;     /* if invalid, go with the minimum size */
11242}
11243
11244void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11245{
11246        struct hfi1_ctxtdata *rcd;
11247        u64 rcvctrl, reg;
11248        int did_enable = 0;
11249
11250        rcd = dd->rcd[ctxt];
11251        if (!rcd)
11252                return;
11253
11254        hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11255
11256        rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11257        /* if the context already enabled, don't do the extra steps */
11258        if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11259            !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11260                /* reset the tail and hdr addresses, and sequence count */
11261                write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11262                                rcd->rcvhdrq_phys);
11263                if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11264                        write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11265                                        rcd->rcvhdrqtailaddr_phys);
11266                rcd->seq_cnt = 1;
11267
11268                /* reset the cached receive header queue head value */
11269                rcd->head = 0;
11270
11271                /*
11272                 * Zero the receive header queue so we don't get false
11273                 * positives when checking the sequence number.  The
11274                 * sequence numbers could land exactly on the same spot.
11275                 * E.g. a rcd restart before the receive header wrapped.
11276                 */
11277                memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11278
11279                /* starting timeout */
11280                rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11281
11282                /* enable the context */
11283                rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11284
11285                /* clean the egr buffer size first */
11286                rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11287                rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11288                                & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11289                                        << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11290
11291                /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11292                write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11293                did_enable = 1;
11294
11295                /* zero RcvEgrIndexHead */
11296                write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11297
11298                /* set eager count and base index */
11299                reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11300                        & RCV_EGR_CTRL_EGR_CNT_MASK)
11301                       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11302                        (((rcd->eager_base >> RCV_SHIFT)
11303                          & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11304                         << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11305                write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11306
11307                /*
11308                 * Set TID (expected) count and base index.
11309                 * rcd->expected_count is set to individual RcvArray entries,
11310                 * not pairs, and the CSR takes a pair-count in groups of
11311                 * four, so divide by 8.
11312                 */
11313                reg = (((rcd->expected_count >> RCV_SHIFT)
11314                                        & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11315                                << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11316                      (((rcd->expected_base >> RCV_SHIFT)
11317                                        & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11318                                << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11319                write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11320                if (ctxt == HFI1_CTRL_CTXT)
11321                        write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11322        }
11323        if (op & HFI1_RCVCTRL_CTXT_DIS) {
11324                write_csr(dd, RCV_VL15, 0);
11325                /*
11326                 * When receive context is being disabled turn on tail
11327                 * update with a dummy tail address and then disable
11328                 * receive context.
11329                 */
11330                if (dd->rcvhdrtail_dummy_physaddr) {
11331                        write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11332                                        dd->rcvhdrtail_dummy_physaddr);
11333                        /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11334                        rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11335                }
11336
11337                rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11338        }
11339        if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11340                rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11341        if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11342                rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11343        if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11344                rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11345        if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11346                /* See comment on RcvCtxtCtrl.TailUpd above */
11347                if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11348                        rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11349        }
11350        if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11351                rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11352        if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11353                rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11354        if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11355                /*
11356                 * In one-packet-per-eager mode, the size comes from
11357                 * the RcvArray entry.
11358                 */
11359                rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11360                rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11361        }
11362        if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11363                rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11364        if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11365                rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11366        if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11367                rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11368        if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11369                rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11370        if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11371                rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11372        rcd->rcvctrl = rcvctrl;
11373        hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11374        write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11375
11376        /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11377        if (did_enable &&
11378            (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11379                reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11380                if (reg != 0) {
11381                        dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11382                                    ctxt, reg);
11383                        read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11384                        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11385                        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11386                        read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11387                        reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11388                        dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11389                                    ctxt, reg, reg == 0 ? "not" : "still");
11390                }
11391        }
11392
11393        if (did_enable) {
11394                /*
11395                 * The interrupt timeout and count must be set after
11396                 * the context is enabled to take effect.
11397                 */
11398                /* set interrupt timeout */
11399                write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11400                                (u64)rcd->rcvavail_timeout <<
11401                                RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11402
11403                /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11404                reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11405                write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11406        }
11407
11408        if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11409                /*
11410                 * If the context has been disabled and the Tail Update has
11411                 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11412                 * so it doesn't contain an address that is invalid.
11413                 */
11414                write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11415                                dd->rcvhdrtail_dummy_physaddr);
11416}
11417
11418u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11419{
11420        int ret;
11421        u64 val = 0;
11422
11423        if (namep) {
11424                ret = dd->cntrnameslen;
11425                *namep = dd->cntrnames;
11426        } else {
11427                const struct cntr_entry *entry;
11428                int i, j;
11429
11430                ret = (dd->ndevcntrs) * sizeof(u64);
11431
11432                /* Get the start of the block of counters */
11433                *cntrp = dd->cntrs;
11434
11435                /*
11436                 * Now go and fill in each counter in the block.
11437                 */
11438                for (i = 0; i < DEV_CNTR_LAST; i++) {
11439                        entry = &dev_cntrs[i];
11440                        hfi1_cdbg(CNTR, "reading %s", entry->name);
11441                        if (entry->flags & CNTR_DISABLED) {
11442                                /* Nothing */
11443                                hfi1_cdbg(CNTR, "\tDisabled\n");
11444                        } else {
11445                                if (entry->flags & CNTR_VL) {
11446                                        hfi1_cdbg(CNTR, "\tPer VL\n");
11447                                        for (j = 0; j < C_VL_COUNT; j++) {
11448                                                val = entry->rw_cntr(entry,
11449                                                                  dd, j,
11450                                                                  CNTR_MODE_R,
11451                                                                  0);
11452                                                hfi1_cdbg(
11453                                                   CNTR,
11454                                                   "\t\tRead 0x%llx for %d\n",
11455                                                   val, j);
11456                                                dd->cntrs[entry->offset + j] =
11457                                                                            val;
11458                                        }
11459                                } else if (entry->flags & CNTR_SDMA) {
11460                                        hfi1_cdbg(CNTR,
11461                                                  "\t Per SDMA Engine\n");
11462                                        for (j = 0; j < dd->chip_sdma_engines;
11463                                             j++) {
11464                                                val =
11465                                                entry->rw_cntr(entry, dd, j,
11466                                                               CNTR_MODE_R, 0);
11467                                                hfi1_cdbg(CNTR,
11468                                                          "\t\tRead 0x%llx for %d\n",
11469                                                          val, j);
11470                                                dd->cntrs[entry->offset + j] =
11471                                                                        val;
11472                                        }
11473                                } else {
11474                                        val = entry->rw_cntr(entry, dd,
11475                                                        CNTR_INVALID_VL,
11476                                                        CNTR_MODE_R, 0);
11477                                        dd->cntrs[entry->offset] = val;
11478                                        hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11479                                }
11480                        }
11481                }
11482        }
11483        return ret;
11484}
11485
11486/*
11487 * Used by sysfs to create files for hfi stats to read
11488 */
11489u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11490{
11491        int ret;
11492        u64 val = 0;
11493
11494        if (namep) {
11495                ret = ppd->dd->portcntrnameslen;
11496                *namep = ppd->dd->portcntrnames;
11497        } else {
11498                const struct cntr_entry *entry;
11499                int i, j;
11500
11501                ret = ppd->dd->nportcntrs * sizeof(u64);
11502                *cntrp = ppd->cntrs;
11503
11504                for (i = 0; i < PORT_CNTR_LAST; i++) {
11505                        entry = &port_cntrs[i];
11506                        hfi1_cdbg(CNTR, "reading %s", entry->name);
11507                        if (entry->flags & CNTR_DISABLED) {
11508                                /* Nothing */
11509                                hfi1_cdbg(CNTR, "\tDisabled\n");
11510                                continue;
11511                        }
11512
11513                        if (entry->flags & CNTR_VL) {
11514                                hfi1_cdbg(CNTR, "\tPer VL");
11515                                for (j = 0; j < C_VL_COUNT; j++) {
11516                                        val = entry->rw_cntr(entry, ppd, j,
11517                                                               CNTR_MODE_R,
11518                                                               0);
11519                                        hfi1_cdbg(
11520                                           CNTR,
11521                                           "\t\tRead 0x%llx for %d",
11522                                           val, j);
11523                                        ppd->cntrs[entry->offset + j] = val;
11524                                }
11525                        } else {
11526                                val = entry->rw_cntr(entry, ppd,
11527                                                       CNTR_INVALID_VL,
11528                                                       CNTR_MODE_R,
11529                                                       0);
11530                                ppd->cntrs[entry->offset] = val;
11531                                hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11532                        }
11533                }
11534        }
11535        return ret;
11536}
11537
11538static void free_cntrs(struct hfi1_devdata *dd)
11539{
11540        struct hfi1_pportdata *ppd;
11541        int i;
11542
11543        if (dd->synth_stats_timer.data)
11544                del_timer_sync(&dd->synth_stats_timer);
11545        dd->synth_stats_timer.data = 0;
11546        ppd = (struct hfi1_pportdata *)(dd + 1);
11547        for (i = 0; i < dd->num_pports; i++, ppd++) {
11548                kfree(ppd->cntrs);
11549                kfree(ppd->scntrs);
11550                free_percpu(ppd->ibport_data.rvp.rc_acks);
11551                free_percpu(ppd->ibport_data.rvp.rc_qacks);
11552                free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11553                ppd->cntrs = NULL;
11554                ppd->scntrs = NULL;
11555                ppd->ibport_data.rvp.rc_acks = NULL;
11556                ppd->ibport_data.rvp.rc_qacks = NULL;
11557                ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11558        }
11559        kfree(dd->portcntrnames);
11560        dd->portcntrnames = NULL;
11561        kfree(dd->cntrs);
11562        dd->cntrs = NULL;
11563        kfree(dd->scntrs);
11564        dd->scntrs = NULL;
11565        kfree(dd->cntrnames);
11566        dd->cntrnames = NULL;
11567}
11568
11569#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11570#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11571
11572static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11573                              u64 *psval, void *context, int vl)
11574{
11575        u64 val;
11576        u64 sval = *psval;
11577
11578        if (entry->flags & CNTR_DISABLED) {
11579                dd_dev_err(dd, "Counter %s not enabled", entry->name);
11580                return 0;
11581        }
11582
11583        hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11584
11585        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11586
11587        /* If its a synthetic counter there is more work we need to do */
11588        if (entry->flags & CNTR_SYNTH) {
11589                if (sval == CNTR_MAX) {
11590                        /* No need to read already saturated */
11591                        return CNTR_MAX;
11592                }
11593
11594                if (entry->flags & CNTR_32BIT) {
11595                        /* 32bit counters can wrap multiple times */
11596                        u64 upper = sval >> 32;
11597                        u64 lower = (sval << 32) >> 32;
11598
11599                        if (lower > val) { /* hw wrapped */
11600                                if (upper == CNTR_32BIT_MAX)
11601                                        val = CNTR_MAX;
11602                                else
11603                                        upper++;
11604                        }
11605
11606                        if (val != CNTR_MAX)
11607                                val = (upper << 32) | val;
11608
11609                } else {
11610                        /* If we rolled we are saturated */
11611                        if ((val < sval) || (val > CNTR_MAX))
11612                                val = CNTR_MAX;
11613                }
11614        }
11615
11616        *psval = val;
11617
11618        hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11619
11620        return val;
11621}
11622
11623static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11624                               struct cntr_entry *entry,
11625                               u64 *psval, void *context, int vl, u64 data)
11626{
11627        u64 val;
11628
11629        if (entry->flags & CNTR_DISABLED) {
11630                dd_dev_err(dd, "Counter %s not enabled", entry->name);
11631                return 0;
11632        }
11633
11634        hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11635
11636        if (entry->flags & CNTR_SYNTH) {
11637                *psval = data;
11638                if (entry->flags & CNTR_32BIT) {
11639                        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11640                                             (data << 32) >> 32);
11641                        val = data; /* return the full 64bit value */
11642                } else {
11643                        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11644                                             data);
11645                }
11646        } else {
11647                val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11648        }
11649
11650        *psval = val;
11651
11652        hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11653
11654        return val;
11655}
11656
11657u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11658{
11659        struct cntr_entry *entry;
11660        u64 *sval;
11661
11662        entry = &dev_cntrs[index];
11663        sval = dd->scntrs + entry->offset;
11664
11665        if (vl != CNTR_INVALID_VL)
11666                sval += vl;
11667
11668        return read_dev_port_cntr(dd, entry, sval, dd, vl);
11669}
11670
11671u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11672{
11673        struct cntr_entry *entry;
11674        u64 *sval;
11675
11676        entry = &dev_cntrs[index];
11677        sval = dd->scntrs + entry->offset;
11678
11679        if (vl != CNTR_INVALID_VL)
11680                sval += vl;
11681
11682        return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11683}
11684
11685u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11686{
11687        struct cntr_entry *entry;
11688        u64 *sval;
11689
11690        entry = &port_cntrs[index];
11691        sval = ppd->scntrs + entry->offset;
11692
11693        if (vl != CNTR_INVALID_VL)
11694                sval += vl;
11695
11696        if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11697            (index <= C_RCV_HDR_OVF_LAST)) {
11698                /* We do not want to bother for disabled contexts */
11699                return 0;
11700        }
11701
11702        return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11703}
11704
11705u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11706{
11707        struct cntr_entry *entry;
11708        u64 *sval;
11709
11710        entry = &port_cntrs[index];
11711        sval = ppd->scntrs + entry->offset;
11712
11713        if (vl != CNTR_INVALID_VL)
11714                sval += vl;
11715
11716        if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11717            (index <= C_RCV_HDR_OVF_LAST)) {
11718                /* We do not want to bother for disabled contexts */
11719                return 0;
11720        }
11721
11722        return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11723}
11724
11725static void update_synth_timer(unsigned long opaque)
11726{
11727        u64 cur_tx;
11728        u64 cur_rx;
11729        u64 total_flits;
11730        u8 update = 0;
11731        int i, j, vl;
11732        struct hfi1_pportdata *ppd;
11733        struct cntr_entry *entry;
11734
11735        struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11736
11737        /*
11738         * Rather than keep beating on the CSRs pick a minimal set that we can
11739         * check to watch for potential roll over. We can do this by looking at
11740         * the number of flits sent/recv. If the total flits exceeds 32bits then
11741         * we have to iterate all the counters and update.
11742         */
11743        entry = &dev_cntrs[C_DC_RCV_FLITS];
11744        cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11745
11746        entry = &dev_cntrs[C_DC_XMIT_FLITS];
11747        cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11748
11749        hfi1_cdbg(
11750            CNTR,
11751            "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11752            dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11753
11754        if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11755                /*
11756                 * May not be strictly necessary to update but it won't hurt and
11757                 * simplifies the logic here.
11758                 */
11759                update = 1;
11760                hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11761                          dd->unit);
11762        } else {
11763                total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11764                hfi1_cdbg(CNTR,
11765                          "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11766                          total_flits, (u64)CNTR_32BIT_MAX);
11767                if (total_flits >= CNTR_32BIT_MAX) {
11768                        hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11769                                  dd->unit);
11770                        update = 1;
11771                }
11772        }
11773
11774        if (update) {
11775                hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11776                for (i = 0; i < DEV_CNTR_LAST; i++) {
11777                        entry = &dev_cntrs[i];
11778                        if (entry->flags & CNTR_VL) {
11779                                for (vl = 0; vl < C_VL_COUNT; vl++)
11780                                        read_dev_cntr(dd, i, vl);
11781                        } else {
11782                                read_dev_cntr(dd, i, CNTR_INVALID_VL);
11783                        }
11784                }
11785                ppd = (struct hfi1_pportdata *)(dd + 1);
11786                for (i = 0; i < dd->num_pports; i++, ppd++) {
11787                        for (j = 0; j < PORT_CNTR_LAST; j++) {
11788                                entry = &port_cntrs[j];
11789                                if (entry->flags & CNTR_VL) {
11790                                        for (vl = 0; vl < C_VL_COUNT; vl++)
11791                                                read_port_cntr(ppd, j, vl);
11792                                } else {
11793                                        read_port_cntr(ppd, j, CNTR_INVALID_VL);
11794                                }
11795                        }
11796                }
11797
11798                /*
11799                 * We want the value in the register. The goal is to keep track
11800                 * of the number of "ticks" not the counter value. In other
11801                 * words if the register rolls we want to notice it and go ahead
11802                 * and force an update.
11803                 */
11804                entry = &dev_cntrs[C_DC_XMIT_FLITS];
11805                dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11806                                                CNTR_MODE_R, 0);
11807
11808                entry = &dev_cntrs[C_DC_RCV_FLITS];
11809                dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11810                                                CNTR_MODE_R, 0);
11811
11812                hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11813                          dd->unit, dd->last_tx, dd->last_rx);
11814
11815        } else {
11816                hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11817        }
11818
11819mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11820}
11821
11822#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11823static int init_cntrs(struct hfi1_devdata *dd)
11824{
11825        int i, rcv_ctxts, j;
11826        size_t sz;
11827        char *p;
11828        char name[C_MAX_NAME];
11829        struct hfi1_pportdata *ppd;
11830        const char *bit_type_32 = ",32";
11831        const int bit_type_32_sz = strlen(bit_type_32);
11832
11833        /* set up the stats timer; the add_timer is done at the end */
11834        setup_timer(&dd->synth_stats_timer, update_synth_timer,
11835                    (unsigned long)dd);
11836
11837        /***********************/
11838        /* per device counters */
11839        /***********************/
11840
11841        /* size names and determine how many we have*/
11842        dd->ndevcntrs = 0;
11843        sz = 0;
11844
11845        for (i = 0; i < DEV_CNTR_LAST; i++) {
11846                if (dev_cntrs[i].flags & CNTR_DISABLED) {
11847                        hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11848                        continue;
11849                }
11850
11851                if (dev_cntrs[i].flags & CNTR_VL) {
11852                        dev_cntrs[i].offset = dd->ndevcntrs;
11853                        for (j = 0; j < C_VL_COUNT; j++) {
11854                                snprintf(name, C_MAX_NAME, "%s%d",
11855                                         dev_cntrs[i].name, vl_from_idx(j));
11856                                sz += strlen(name);
11857                                /* Add ",32" for 32-bit counters */
11858                                if (dev_cntrs[i].flags & CNTR_32BIT)
11859                                        sz += bit_type_32_sz;
11860                                sz++;
11861                                dd->ndevcntrs++;
11862                        }
11863                } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11864                        dev_cntrs[i].offset = dd->ndevcntrs;
11865                        for (j = 0; j < dd->chip_sdma_engines; j++) {
11866                                snprintf(name, C_MAX_NAME, "%s%d",
11867                                         dev_cntrs[i].name, j);
11868                                sz += strlen(name);
11869                                /* Add ",32" for 32-bit counters */
11870                                if (dev_cntrs[i].flags & CNTR_32BIT)
11871                                        sz += bit_type_32_sz;
11872                                sz++;
11873                                dd->ndevcntrs++;
11874                        }
11875                } else {
11876                        /* +1 for newline. */
11877                        sz += strlen(dev_cntrs[i].name) + 1;
11878                        /* Add ",32" for 32-bit counters */
11879                        if (dev_cntrs[i].flags & CNTR_32BIT)
11880                                sz += bit_type_32_sz;
11881                        dev_cntrs[i].offset = dd->ndevcntrs;
11882                        dd->ndevcntrs++;
11883                }
11884        }
11885
11886        /* allocate space for the counter values */
11887        dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11888        if (!dd->cntrs)
11889                goto bail;
11890
11891        dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11892        if (!dd->scntrs)
11893                goto bail;
11894
11895        /* allocate space for the counter names */
11896        dd->cntrnameslen = sz;
11897        dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11898        if (!dd->cntrnames)
11899                goto bail;
11900
11901        /* fill in the names */
11902        for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
11903                if (dev_cntrs[i].flags & CNTR_DISABLED) {
11904                        /* Nothing */
11905                } else if (dev_cntrs[i].flags & CNTR_VL) {
11906                        for (j = 0; j < C_VL_COUNT; j++) {
11907                                snprintf(name, C_MAX_NAME, "%s%d",
11908                                         dev_cntrs[i].name,
11909                                         vl_from_idx(j));
11910                                memcpy(p, name, strlen(name));
11911                                p += strlen(name);
11912
11913                                /* Counter is 32 bits */
11914                                if (dev_cntrs[i].flags & CNTR_32BIT) {
11915                                        memcpy(p, bit_type_32, bit_type_32_sz);
11916                                        p += bit_type_32_sz;
11917                                }
11918
11919                                *p++ = '\n';
11920                        }
11921                } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11922                        for (j = 0; j < dd->chip_sdma_engines; j++) {
11923                                snprintf(name, C_MAX_NAME, "%s%d",
11924                                         dev_cntrs[i].name, j);
11925                                memcpy(p, name, strlen(name));
11926                                p += strlen(name);
11927
11928                                /* Counter is 32 bits */
11929                                if (dev_cntrs[i].flags & CNTR_32BIT) {
11930                                        memcpy(p, bit_type_32, bit_type_32_sz);
11931                                        p += bit_type_32_sz;
11932                                }
11933
11934                                *p++ = '\n';
11935                        }
11936                } else {
11937                        memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11938                        p += strlen(dev_cntrs[i].name);
11939
11940                        /* Counter is 32 bits */
11941                        if (dev_cntrs[i].flags & CNTR_32BIT) {
11942                                memcpy(p, bit_type_32, bit_type_32_sz);
11943                                p += bit_type_32_sz;
11944                        }
11945
11946                        *p++ = '\n';
11947                }
11948        }
11949
11950        /*********************/
11951        /* per port counters */
11952        /*********************/
11953
11954        /*
11955         * Go through the counters for the overflows and disable the ones we
11956         * don't need. This varies based on platform so we need to do it
11957         * dynamically here.
11958         */
11959        rcv_ctxts = dd->num_rcv_contexts;
11960        for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11961             i <= C_RCV_HDR_OVF_LAST; i++) {
11962                port_cntrs[i].flags |= CNTR_DISABLED;
11963        }
11964
11965        /* size port counter names and determine how many we have*/
11966        sz = 0;
11967        dd->nportcntrs = 0;
11968        for (i = 0; i < PORT_CNTR_LAST; i++) {
11969                if (port_cntrs[i].flags & CNTR_DISABLED) {
11970                        hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11971                        continue;
11972                }
11973
11974                if (port_cntrs[i].flags & CNTR_VL) {
11975                        port_cntrs[i].offset = dd->nportcntrs;
11976                        for (j = 0; j < C_VL_COUNT; j++) {
11977                                snprintf(name, C_MAX_NAME, "%s%d",
11978                                         port_cntrs[i].name, vl_from_idx(j));
11979                                sz += strlen(name);
11980                                /* Add ",32" for 32-bit counters */
11981                                if (port_cntrs[i].flags & CNTR_32BIT)
11982                                        sz += bit_type_32_sz;
11983                                sz++;
11984                                dd->nportcntrs++;
11985                        }
11986                } else {
11987                        /* +1 for newline */
11988                        sz += strlen(port_cntrs[i].name) + 1;
11989                        /* Add ",32" for 32-bit counters */
11990                        if (port_cntrs[i].flags & CNTR_32BIT)
11991                                sz += bit_type_32_sz;
11992                        port_cntrs[i].offset = dd->nportcntrs;
11993                        dd->nportcntrs++;
11994                }
11995        }
11996
11997        /* allocate space for the counter names */
11998        dd->portcntrnameslen = sz;
11999        dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12000        if (!dd->portcntrnames)
12001                goto bail;
12002
12003        /* fill in port cntr names */
12004        for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12005                if (port_cntrs[i].flags & CNTR_DISABLED)
12006                        continue;
12007
12008                if (port_cntrs[i].flags & CNTR_VL) {
12009                        for (j = 0; j < C_VL_COUNT; j++) {
12010                                snprintf(name, C_MAX_NAME, "%s%d",
12011                                         port_cntrs[i].name, vl_from_idx(j));
12012                                memcpy(p, name, strlen(name));
12013                                p += strlen(name);
12014
12015                                /* Counter is 32 bits */
12016                                if (port_cntrs[i].flags & CNTR_32BIT) {
12017                                        memcpy(p, bit_type_32, bit_type_32_sz);
12018                                        p += bit_type_32_sz;
12019                                }
12020
12021                                *p++ = '\n';
12022                        }
12023                } else {
12024                        memcpy(p, port_cntrs[i].name,
12025                               strlen(port_cntrs[i].name));
12026                        p += strlen(port_cntrs[i].name);
12027
12028                        /* Counter is 32 bits */
12029                        if (port_cntrs[i].flags & CNTR_32BIT) {
12030                                memcpy(p, bit_type_32, bit_type_32_sz);
12031                                p += bit_type_32_sz;
12032                        }
12033
12034                        *p++ = '\n';
12035                }
12036        }
12037
12038        /* allocate per port storage for counter values */
12039        ppd = (struct hfi1_pportdata *)(dd + 1);
12040        for (i = 0; i < dd->num_pports; i++, ppd++) {
12041                ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12042                if (!ppd->cntrs)
12043                        goto bail;
12044
12045                ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12046                if (!ppd->scntrs)
12047                        goto bail;
12048        }
12049
12050        /* CPU counters need to be allocated and zeroed */
12051        if (init_cpu_counters(dd))
12052                goto bail;
12053
12054        mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12055        return 0;
12056bail:
12057        free_cntrs(dd);
12058        return -ENOMEM;
12059}
12060
12061static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12062{
12063        switch (chip_lstate) {
12064        default:
12065                dd_dev_err(dd,
12066                           "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12067                           chip_lstate);
12068                /* fall through */
12069        case LSTATE_DOWN:
12070                return IB_PORT_DOWN;
12071        case LSTATE_INIT:
12072                return IB_PORT_INIT;
12073        case LSTATE_ARMED:
12074                return IB_PORT_ARMED;
12075        case LSTATE_ACTIVE:
12076                return IB_PORT_ACTIVE;
12077        }
12078}
12079
12080u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12081{
12082        /* look at the HFI meta-states only */
12083        switch (chip_pstate & 0xf0) {
12084        default:
12085                dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12086                           chip_pstate);
12087                /* fall through */
12088        case PLS_DISABLED:
12089                return IB_PORTPHYSSTATE_DISABLED;
12090        case PLS_OFFLINE:
12091                return OPA_PORTPHYSSTATE_OFFLINE;
12092        case PLS_POLLING:
12093                return IB_PORTPHYSSTATE_POLLING;
12094        case PLS_CONFIGPHY:
12095                return IB_PORTPHYSSTATE_TRAINING;
12096        case PLS_LINKUP:
12097                return IB_PORTPHYSSTATE_LINKUP;
12098        case PLS_PHYTEST:
12099                return IB_PORTPHYSSTATE_PHY_TEST;
12100        }
12101}
12102
12103/* return the OPA port logical state name */
12104const char *opa_lstate_name(u32 lstate)
12105{
12106        static const char * const port_logical_names[] = {
12107                "PORT_NOP",
12108                "PORT_DOWN",
12109                "PORT_INIT",
12110                "PORT_ARMED",
12111                "PORT_ACTIVE",
12112                "PORT_ACTIVE_DEFER",
12113        };
12114        if (lstate < ARRAY_SIZE(port_logical_names))
12115                return port_logical_names[lstate];
12116        return "unknown";
12117}
12118
12119/* return the OPA port physical state name */
12120const char *opa_pstate_name(u32 pstate)
12121{
12122        static const char * const port_physical_names[] = {
12123                "PHYS_NOP",
12124                "reserved1",
12125                "PHYS_POLL",
12126                "PHYS_DISABLED",
12127                "PHYS_TRAINING",
12128                "PHYS_LINKUP",
12129                "PHYS_LINK_ERR_RECOVER",
12130                "PHYS_PHY_TEST",
12131                "reserved8",
12132                "PHYS_OFFLINE",
12133                "PHYS_GANGED",
12134                "PHYS_TEST",
12135        };
12136        if (pstate < ARRAY_SIZE(port_physical_names))
12137                return port_physical_names[pstate];
12138        return "unknown";
12139}
12140
12141/*
12142 * Read the hardware link state and set the driver's cached value of it.
12143 * Return the (new) current value.
12144 */
12145u32 get_logical_state(struct hfi1_pportdata *ppd)
12146{
12147        u32 new_state;
12148
12149        new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12150        if (new_state != ppd->lstate) {
12151                dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12152                            opa_lstate_name(new_state), new_state);
12153                ppd->lstate = new_state;
12154        }
12155        /*
12156         * Set port status flags in the page mapped into userspace
12157         * memory. Do it here to ensure a reliable state - this is
12158         * the only function called by all state handling code.
12159         * Always set the flags due to the fact that the cache value
12160         * might have been changed explicitly outside of this
12161         * function.
12162         */
12163        if (ppd->statusp) {
12164                switch (ppd->lstate) {
12165                case IB_PORT_DOWN:
12166                case IB_PORT_INIT:
12167                        *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12168                                           HFI1_STATUS_IB_READY);
12169                        break;
12170                case IB_PORT_ARMED:
12171                        *ppd->statusp |= HFI1_STATUS_IB_CONF;
12172                        break;
12173                case IB_PORT_ACTIVE:
12174                        *ppd->statusp |= HFI1_STATUS_IB_READY;
12175                        break;
12176                }
12177        }
12178        return ppd->lstate;
12179}
12180
12181/**
12182 * wait_logical_linkstate - wait for an IB link state change to occur
12183 * @ppd: port device
12184 * @state: the state to wait for
12185 * @msecs: the number of milliseconds to wait
12186 *
12187 * Wait up to msecs milliseconds for IB link state change to occur.
12188 * For now, take the easy polling route.
12189 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12190 */
12191static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12192                                  int msecs)
12193{
12194        unsigned long timeout;
12195
12196        timeout = jiffies + msecs_to_jiffies(msecs);
12197        while (1) {
12198                if (get_logical_state(ppd) == state)
12199                        return 0;
12200                if (time_after(jiffies, timeout))
12201                        break;
12202                msleep(20);
12203        }
12204        dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12205
12206        return -ETIMEDOUT;
12207}
12208
12209u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12210{
12211        u32 pstate;
12212        u32 ib_pstate;
12213
12214        pstate = read_physical_state(ppd->dd);
12215        ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12216        if (ppd->last_pstate != ib_pstate) {
12217                dd_dev_info(ppd->dd,
12218                            "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12219                            __func__, opa_pstate_name(ib_pstate), ib_pstate,
12220                            pstate);
12221                ppd->last_pstate = ib_pstate;
12222        }
12223        return ib_pstate;
12224}
12225
12226/*
12227 * Read/modify/write ASIC_QSFP register bits as selected by mask
12228 * data: 0 or 1 in the positions depending on what needs to be written
12229 * dir: 0 for read, 1 for write
12230 * mask: select by setting
12231 *      I2CCLK  (bit 0)
12232 *      I2CDATA (bit 1)
12233 */
12234u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12235                  u32 mask)
12236{
12237        u64 qsfp_oe, target_oe;
12238
12239        target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12240        if (mask) {
12241                /* We are writing register bits, so lock access */
12242                dir &= mask;
12243                data &= mask;
12244
12245                qsfp_oe = read_csr(dd, target_oe);
12246                qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12247                write_csr(dd, target_oe, qsfp_oe);
12248        }
12249        /* We are exclusively reading bits here, but it is unlikely
12250         * we'll get valid data when we set the direction of the pin
12251         * in the same call, so read should call this function again
12252         * to get valid data
12253         */
12254        return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12255}
12256
12257#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12258(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12259
12260#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12261(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12262
12263int hfi1_init_ctxt(struct send_context *sc)
12264{
12265        if (sc) {
12266                struct hfi1_devdata *dd = sc->dd;
12267                u64 reg;
12268                u8 set = (sc->type == SC_USER ?
12269                          HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12270                          HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12271                reg = read_kctxt_csr(dd, sc->hw_context,
12272                                     SEND_CTXT_CHECK_ENABLE);
12273                if (set)
12274                        CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12275                else
12276                        SET_STATIC_RATE_CONTROL_SMASK(reg);
12277                write_kctxt_csr(dd, sc->hw_context,
12278                                SEND_CTXT_CHECK_ENABLE, reg);
12279        }
12280        return 0;
12281}
12282
12283int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12284{
12285        int ret = 0;
12286        u64 reg;
12287
12288        if (dd->icode != ICODE_RTL_SILICON) {
12289                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12290                        dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12291                                    __func__);
12292                return -EINVAL;
12293        }
12294        reg = read_csr(dd, ASIC_STS_THERM);
12295        temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12296                      ASIC_STS_THERM_CURR_TEMP_MASK);
12297        temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12298                        ASIC_STS_THERM_LO_TEMP_MASK);
12299        temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12300                        ASIC_STS_THERM_HI_TEMP_MASK);
12301        temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12302                          ASIC_STS_THERM_CRIT_TEMP_MASK);
12303        /* triggers is a 3-bit value - 1 bit per trigger. */
12304        temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12305
12306        return ret;
12307}
12308
12309/* ========================================================================= */
12310
12311/*
12312 * Enable/disable chip from delivering interrupts.
12313 */
12314void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12315{
12316        int i;
12317
12318        /*
12319         * In HFI, the mask needs to be 1 to allow interrupts.
12320         */
12321        if (enable) {
12322                /* enable all interrupts */
12323                for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12324                        write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12325
12326                init_qsfp_int(dd);
12327        } else {
12328                for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12329                        write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12330        }
12331}
12332
12333/*
12334 * Clear all interrupt sources on the chip.
12335 */
12336static void clear_all_interrupts(struct hfi1_devdata *dd)
12337{
12338        int i;
12339
12340        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12341                write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12342
12343        write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12344        write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12345        write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12346        write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12347        write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12348        write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12349        write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12350        for (i = 0; i < dd->chip_send_contexts; i++)
12351                write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12352        for (i = 0; i < dd->chip_sdma_engines; i++)
12353                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12354
12355        write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12356        write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12357        write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12358}
12359
12360/* Move to pcie.c? */
12361static void disable_intx(struct pci_dev *pdev)
12362{
12363        pci_intx(pdev, 0);
12364}
12365
12366static void clean_up_interrupts(struct hfi1_devdata *dd)
12367{
12368        int i;
12369
12370        /* remove irqs - must happen before disabling/turning off */
12371        if (dd->num_msix_entries) {
12372                /* MSI-X */
12373                struct hfi1_msix_entry *me = dd->msix_entries;
12374
12375                for (i = 0; i < dd->num_msix_entries; i++, me++) {
12376                        if (!me->arg) /* => no irq, no affinity */
12377                                continue;
12378                        hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12379                        free_irq(me->msix.vector, me->arg);
12380                }
12381        } else {
12382                /* INTx */
12383                if (dd->requested_intx_irq) {
12384                        free_irq(dd->pcidev->irq, dd);
12385                        dd->requested_intx_irq = 0;
12386                }
12387        }
12388
12389        /* turn off interrupts */
12390        if (dd->num_msix_entries) {
12391                /* MSI-X */
12392                pci_disable_msix(dd->pcidev);
12393        } else {
12394                /* INTx */
12395                disable_intx(dd->pcidev);
12396        }
12397
12398        /* clean structures */
12399        kfree(dd->msix_entries);
12400        dd->msix_entries = NULL;
12401        dd->num_msix_entries = 0;
12402}
12403
12404/*
12405 * Remap the interrupt source from the general handler to the given MSI-X
12406 * interrupt.
12407 */
12408static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12409{
12410        u64 reg;
12411        int m, n;
12412
12413        /* clear from the handled mask of the general interrupt */
12414        m = isrc / 64;
12415        n = isrc % 64;
12416        dd->gi_mask[m] &= ~((u64)1 << n);
12417
12418        /* direct the chip source to the given MSI-X interrupt */
12419        m = isrc / 8;
12420        n = isrc % 8;
12421        reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12422        reg &= ~((u64)0xff << (8 * n));
12423        reg |= ((u64)msix_intr & 0xff) << (8 * n);
12424        write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12425}
12426
12427static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12428                                  int engine, int msix_intr)
12429{
12430        /*
12431         * SDMA engine interrupt sources grouped by type, rather than
12432         * engine.  Per-engine interrupts are as follows:
12433         *      SDMA
12434         *      SDMAProgress
12435         *      SDMAIdle
12436         */
12437        remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12438                   msix_intr);
12439        remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12440                   msix_intr);
12441        remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12442                   msix_intr);
12443}
12444
12445static int request_intx_irq(struct hfi1_devdata *dd)
12446{
12447        int ret;
12448
12449        snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12450                 dd->unit);
12451        ret = request_irq(dd->pcidev->irq, general_interrupt,
12452                          IRQF_SHARED, dd->intx_name, dd);
12453        if (ret)
12454                dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12455                           ret);
12456        else
12457                dd->requested_intx_irq = 1;
12458        return ret;
12459}
12460
12461static int request_msix_irqs(struct hfi1_devdata *dd)
12462{
12463        int first_general, last_general;
12464        int first_sdma, last_sdma;
12465        int first_rx, last_rx;
12466        int i, ret = 0;
12467
12468        /* calculate the ranges we are going to use */
12469        first_general = 0;
12470        last_general = first_general + 1;
12471        first_sdma = last_general;
12472        last_sdma = first_sdma + dd->num_sdma;
12473        first_rx = last_sdma;
12474        last_rx = first_rx + dd->n_krcv_queues;
12475
12476        /*
12477         * Sanity check - the code expects all SDMA chip source
12478         * interrupts to be in the same CSR, starting at bit 0.  Verify
12479         * that this is true by checking the bit location of the start.
12480         */
12481        BUILD_BUG_ON(IS_SDMA_START % 64);
12482
12483        for (i = 0; i < dd->num_msix_entries; i++) {
12484                struct hfi1_msix_entry *me = &dd->msix_entries[i];
12485                const char *err_info;
12486                irq_handler_t handler;
12487                irq_handler_t thread = NULL;
12488                void *arg;
12489                int idx;
12490                struct hfi1_ctxtdata *rcd = NULL;
12491                struct sdma_engine *sde = NULL;
12492
12493                /* obtain the arguments to request_irq */
12494                if (first_general <= i && i < last_general) {
12495                        idx = i - first_general;
12496                        handler = general_interrupt;
12497                        arg = dd;
12498                        snprintf(me->name, sizeof(me->name),
12499                                 DRIVER_NAME "_%d", dd->unit);
12500                        err_info = "general";
12501                        me->type = IRQ_GENERAL;
12502                } else if (first_sdma <= i && i < last_sdma) {
12503                        idx = i - first_sdma;
12504                        sde = &dd->per_sdma[idx];
12505                        handler = sdma_interrupt;
12506                        arg = sde;
12507                        snprintf(me->name, sizeof(me->name),
12508                                 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12509                        err_info = "sdma";
12510                        remap_sdma_interrupts(dd, idx, i);
12511                        me->type = IRQ_SDMA;
12512                } else if (first_rx <= i && i < last_rx) {
12513                        idx = i - first_rx;
12514                        rcd = dd->rcd[idx];
12515                        /* no interrupt if no rcd */
12516                        if (!rcd)
12517                                continue;
12518                        /*
12519                         * Set the interrupt register and mask for this
12520                         * context's interrupt.
12521                         */
12522                        rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12523                        rcd->imask = ((u64)1) <<
12524                                        ((IS_RCVAVAIL_START + idx) % 64);
12525                        handler = receive_context_interrupt;
12526                        thread = receive_context_thread;
12527                        arg = rcd;
12528                        snprintf(me->name, sizeof(me->name),
12529                                 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12530                        err_info = "receive context";
12531                        remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12532                        me->type = IRQ_RCVCTXT;
12533                } else {
12534                        /* not in our expected range - complain, then
12535                         * ignore it
12536                         */
12537                        dd_dev_err(dd,
12538                                   "Unexpected extra MSI-X interrupt %d\n", i);
12539                        continue;
12540                }
12541                /* no argument, no interrupt */
12542                if (!arg)
12543                        continue;
12544                /* make sure the name is terminated */
12545                me->name[sizeof(me->name) - 1] = 0;
12546
12547                ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12548                                           me->name, arg);
12549                if (ret) {
12550                        dd_dev_err(dd,
12551                                   "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12552                                   err_info, me->msix.vector, idx, ret);
12553                        return ret;
12554                }
12555                /*
12556                 * assign arg after request_irq call, so it will be
12557                 * cleaned up
12558                 */
12559                me->arg = arg;
12560
12561                ret = hfi1_get_irq_affinity(dd, me);
12562                if (ret)
12563                        dd_dev_err(dd,
12564                                   "unable to pin IRQ %d\n", ret);
12565        }
12566
12567        return ret;
12568}
12569
12570/*
12571 * Set the general handler to accept all interrupts, remap all
12572 * chip interrupts back to MSI-X 0.
12573 */
12574static void reset_interrupts(struct hfi1_devdata *dd)
12575{
12576        int i;
12577
12578        /* all interrupts handled by the general handler */
12579        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12580                dd->gi_mask[i] = ~(u64)0;
12581
12582        /* all chip interrupts map to MSI-X 0 */
12583        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12584                write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12585}
12586
12587static int set_up_interrupts(struct hfi1_devdata *dd)
12588{
12589        struct hfi1_msix_entry *entries;
12590        u32 total, request;
12591        int i, ret;
12592        int single_interrupt = 0; /* we expect to have all the interrupts */
12593
12594        /*
12595         * Interrupt count:
12596         *      1 general, "slow path" interrupt (includes the SDMA engines
12597         *              slow source, SDMACleanupDone)
12598         *      N interrupts - one per used SDMA engine
12599         *      M interrupt - one per kernel receive context
12600         */
12601        total = 1 + dd->num_sdma + dd->n_krcv_queues;
12602
12603        entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12604        if (!entries) {
12605                ret = -ENOMEM;
12606                goto fail;
12607        }
12608        /* 1-1 MSI-X entry assignment */
12609        for (i = 0; i < total; i++)
12610                entries[i].msix.entry = i;
12611
12612        /* ask for MSI-X interrupts */
12613        request = total;
12614        request_msix(dd, &request, entries);
12615
12616        if (request == 0) {
12617                /* using INTx */
12618                /* dd->num_msix_entries already zero */
12619                kfree(entries);
12620                single_interrupt = 1;
12621                dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12622        } else {
12623                /* using MSI-X */
12624                dd->num_msix_entries = request;
12625                dd->msix_entries = entries;
12626
12627                if (request != total) {
12628                        /* using MSI-X, with reduced interrupts */
12629                        dd_dev_err(
12630                                dd,
12631                                "cannot handle reduced interrupt case, want %u, got %u\n",
12632                                total, request);
12633                        ret = -EINVAL;
12634                        goto fail;
12635                }
12636                dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12637        }
12638
12639        /* mask all interrupts */
12640        set_intr_state(dd, 0);
12641        /* clear all pending interrupts */
12642        clear_all_interrupts(dd);
12643
12644        /* reset general handler mask, chip MSI-X mappings */
12645        reset_interrupts(dd);
12646
12647        if (single_interrupt)
12648                ret = request_intx_irq(dd);
12649        else
12650                ret = request_msix_irqs(dd);
12651        if (ret)
12652                goto fail;
12653
12654        return 0;
12655
12656fail:
12657        clean_up_interrupts(dd);
12658        return ret;
12659}
12660
12661/*
12662 * Set up context values in dd.  Sets:
12663 *
12664 *      num_rcv_contexts - number of contexts being used
12665 *      n_krcv_queues - number of kernel contexts
12666 *      first_user_ctxt - first non-kernel context in array of contexts
12667 *      freectxts  - number of free user contexts
12668 *      num_send_contexts - number of PIO send contexts being used
12669 */
12670static int set_up_context_variables(struct hfi1_devdata *dd)
12671{
12672        int num_kernel_contexts;
12673        int total_contexts;
12674        int ret;
12675        unsigned ngroups;
12676
12677        /*
12678         * Kernel contexts: (to be fixed later):
12679         * - min or 2 or 1 context/numa
12680         * - Context 0 - control context (VL15/multicast/error)
12681         * - Context 1 - default context
12682         */
12683        if (n_krcvqs)
12684                /*
12685                 * Don't count context 0 in n_krcvqs since
12686                 * is isn't used for normal verbs traffic.
12687                 *
12688                 * krcvqs will reflect number of kernel
12689                 * receive contexts above 0.
12690                 */
12691                num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
12692        else
12693                num_kernel_contexts = num_online_nodes() + 1;
12694        num_kernel_contexts =
12695                max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12696        /*
12697         * Every kernel receive context needs an ACK send context.
12698         * one send context is allocated for each VL{0-7} and VL15
12699         */
12700        if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12701                dd_dev_err(dd,
12702                           "Reducing # kernel rcv contexts to: %d, from %d\n",
12703                           (int)(dd->chip_send_contexts - num_vls - 1),
12704                           (int)num_kernel_contexts);
12705                num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12706        }
12707        /*
12708         * User contexts: (to be fixed later)
12709         *      - default to 1 user context per CPU if num_user_contexts is
12710         *        negative
12711         */
12712        if (num_user_contexts < 0)
12713                num_user_contexts = num_online_cpus();
12714
12715        total_contexts = num_kernel_contexts + num_user_contexts;
12716
12717        /*
12718         * Adjust the counts given a global max.
12719         */
12720        if (total_contexts > dd->chip_rcv_contexts) {
12721                dd_dev_err(dd,
12722                           "Reducing # user receive contexts to: %d, from %d\n",
12723                           (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12724                           (int)num_user_contexts);
12725                num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12726                /* recalculate */
12727                total_contexts = num_kernel_contexts + num_user_contexts;
12728        }
12729
12730        /* the first N are kernel contexts, the rest are user contexts */
12731        dd->num_rcv_contexts = total_contexts;
12732        dd->n_krcv_queues = num_kernel_contexts;
12733        dd->first_user_ctxt = num_kernel_contexts;
12734        dd->num_user_contexts = num_user_contexts;
12735        dd->freectxts = num_user_contexts;
12736        dd_dev_info(dd,
12737                    "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12738                    (int)dd->chip_rcv_contexts,
12739                    (int)dd->num_rcv_contexts,
12740                    (int)dd->n_krcv_queues,
12741                    (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12742
12743        /*
12744         * Receive array allocation:
12745         *   All RcvArray entries are divided into groups of 8. This
12746         *   is required by the hardware and will speed up writes to
12747         *   consecutive entries by using write-combining of the entire
12748         *   cacheline.
12749         *
12750         *   The number of groups are evenly divided among all contexts.
12751         *   any left over groups will be given to the first N user
12752         *   contexts.
12753         */
12754        dd->rcv_entries.group_size = RCV_INCREMENT;
12755        ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12756        dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12757        dd->rcv_entries.nctxt_extra = ngroups -
12758                (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12759        dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12760                    dd->rcv_entries.ngroups,
12761                    dd->rcv_entries.nctxt_extra);
12762        if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12763            MAX_EAGER_ENTRIES * 2) {
12764                dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12765                        dd->rcv_entries.group_size;
12766                dd_dev_info(dd,
12767                            "RcvArray group count too high, change to %u\n",
12768                            dd->rcv_entries.ngroups);
12769                dd->rcv_entries.nctxt_extra = 0;
12770        }
12771        /*
12772         * PIO send contexts
12773         */
12774        ret = init_sc_pools_and_sizes(dd);
12775        if (ret >= 0) { /* success */
12776                dd->num_send_contexts = ret;
12777                dd_dev_info(
12778                        dd,
12779                        "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12780                        dd->chip_send_contexts,
12781                        dd->num_send_contexts,
12782                        dd->sc_sizes[SC_KERNEL].count,
12783                        dd->sc_sizes[SC_ACK].count,
12784                        dd->sc_sizes[SC_USER].count);
12785                ret = 0;        /* success */
12786        }
12787
12788        return ret;
12789}
12790
12791/*
12792 * Set the device/port partition key table. The MAD code
12793 * will ensure that, at least, the partial management
12794 * partition key is present in the table.
12795 */
12796static void set_partition_keys(struct hfi1_pportdata *ppd)
12797{
12798        struct hfi1_devdata *dd = ppd->dd;
12799        u64 reg = 0;
12800        int i;
12801
12802        dd_dev_info(dd, "Setting partition keys\n");
12803        for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12804                reg |= (ppd->pkeys[i] &
12805                        RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12806                        ((i % 4) *
12807                         RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12808                /* Each register holds 4 PKey values. */
12809                if ((i % 4) == 3) {
12810                        write_csr(dd, RCV_PARTITION_KEY +
12811                                  ((i - 3) * 2), reg);
12812                        reg = 0;
12813                }
12814        }
12815
12816        /* Always enable HW pkeys check when pkeys table is set */
12817        add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12818}
12819
12820/*
12821 * These CSRs and memories are uninitialized on reset and must be
12822 * written before reading to set the ECC/parity bits.
12823 *
12824 * NOTE: All user context CSRs that are not mmaped write-only
12825 * (e.g. the TID flows) must be initialized even if the driver never
12826 * reads them.
12827 */
12828static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12829{
12830        int i, j;
12831
12832        /* CceIntMap */
12833        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12834                write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12835
12836        /* SendCtxtCreditReturnAddr */
12837        for (i = 0; i < dd->chip_send_contexts; i++)
12838                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12839
12840        /* PIO Send buffers */
12841        /* SDMA Send buffers */
12842        /*
12843         * These are not normally read, and (presently) have no method
12844         * to be read, so are not pre-initialized
12845         */
12846
12847        /* RcvHdrAddr */
12848        /* RcvHdrTailAddr */
12849        /* RcvTidFlowTable */
12850        for (i = 0; i < dd->chip_rcv_contexts; i++) {
12851                write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12852                write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12853                for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12854                        write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
12855        }
12856
12857        /* RcvArray */
12858        for (i = 0; i < dd->chip_rcv_array_count; i++)
12859                write_csr(dd, RCV_ARRAY + (8 * i),
12860                          RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12861
12862        /* RcvQPMapTable */
12863        for (i = 0; i < 32; i++)
12864                write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12865}
12866
12867/*
12868 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12869 */
12870static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12871                             u64 ctrl_bits)
12872{
12873        unsigned long timeout;
12874        u64 reg;
12875
12876        /* is the condition present? */
12877        reg = read_csr(dd, CCE_STATUS);
12878        if ((reg & status_bits) == 0)
12879                return;
12880
12881        /* clear the condition */
12882        write_csr(dd, CCE_CTRL, ctrl_bits);
12883
12884        /* wait for the condition to clear */
12885        timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12886        while (1) {
12887                reg = read_csr(dd, CCE_STATUS);
12888                if ((reg & status_bits) == 0)
12889                        return;
12890                if (time_after(jiffies, timeout)) {
12891                        dd_dev_err(dd,
12892                                   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12893                                   status_bits, reg & status_bits);
12894                        return;
12895                }
12896                udelay(1);
12897        }
12898}
12899
12900/* set CCE CSRs to chip reset defaults */
12901static void reset_cce_csrs(struct hfi1_devdata *dd)
12902{
12903        int i;
12904
12905        /* CCE_REVISION read-only */
12906        /* CCE_REVISION2 read-only */
12907        /* CCE_CTRL - bits clear automatically */
12908        /* CCE_STATUS read-only, use CceCtrl to clear */
12909        clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12910        clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12911        clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12912        for (i = 0; i < CCE_NUM_SCRATCH; i++)
12913                write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12914        /* CCE_ERR_STATUS read-only */
12915        write_csr(dd, CCE_ERR_MASK, 0);
12916        write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12917        /* CCE_ERR_FORCE leave alone */
12918        for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12919                write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12920        write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12921        /* CCE_PCIE_CTRL leave alone */
12922        for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12923                write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12924                write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12925                          CCE_MSIX_TABLE_UPPER_RESETCSR);
12926        }
12927        for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12928                /* CCE_MSIX_PBA read-only */
12929                write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12930                write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12931        }
12932        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12933                write_csr(dd, CCE_INT_MAP, 0);
12934        for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12935                /* CCE_INT_STATUS read-only */
12936                write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12937                write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12938                /* CCE_INT_FORCE leave alone */
12939                /* CCE_INT_BLOCKED read-only */
12940        }
12941        for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12942                write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12943}
12944
12945/* set MISC CSRs to chip reset defaults */
12946static void reset_misc_csrs(struct hfi1_devdata *dd)
12947{
12948        int i;
12949
12950        for (i = 0; i < 32; i++) {
12951                write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12952                write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12953                write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12954        }
12955        /*
12956         * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12957         * only be written 128-byte chunks
12958         */
12959        /* init RSA engine to clear lingering errors */
12960        write_csr(dd, MISC_CFG_RSA_CMD, 1);
12961        write_csr(dd, MISC_CFG_RSA_MU, 0);
12962        write_csr(dd, MISC_CFG_FW_CTRL, 0);
12963        /* MISC_STS_8051_DIGEST read-only */
12964        /* MISC_STS_SBM_DIGEST read-only */
12965        /* MISC_STS_PCIE_DIGEST read-only */
12966        /* MISC_STS_FAB_DIGEST read-only */
12967        /* MISC_ERR_STATUS read-only */
12968        write_csr(dd, MISC_ERR_MASK, 0);
12969        write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12970        /* MISC_ERR_FORCE leave alone */
12971}
12972
12973/* set TXE CSRs to chip reset defaults */
12974static void reset_txe_csrs(struct hfi1_devdata *dd)
12975{
12976        int i;
12977
12978        /*
12979         * TXE Kernel CSRs
12980         */
12981        write_csr(dd, SEND_CTRL, 0);
12982        __cm_reset(dd, 0);      /* reset CM internal state */
12983        /* SEND_CONTEXTS read-only */
12984        /* SEND_DMA_ENGINES read-only */
12985        /* SEND_PIO_MEM_SIZE read-only */
12986        /* SEND_DMA_MEM_SIZE read-only */
12987        write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12988        pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
12989        /* SEND_PIO_ERR_STATUS read-only */
12990        write_csr(dd, SEND_PIO_ERR_MASK, 0);
12991        write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12992        /* SEND_PIO_ERR_FORCE leave alone */
12993        /* SEND_DMA_ERR_STATUS read-only */
12994        write_csr(dd, SEND_DMA_ERR_MASK, 0);
12995        write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12996        /* SEND_DMA_ERR_FORCE leave alone */
12997        /* SEND_EGRESS_ERR_STATUS read-only */
12998        write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12999        write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13000        /* SEND_EGRESS_ERR_FORCE leave alone */
13001        write_csr(dd, SEND_BTH_QP, 0);
13002        write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13003        write_csr(dd, SEND_SC2VLT0, 0);
13004        write_csr(dd, SEND_SC2VLT1, 0);
13005        write_csr(dd, SEND_SC2VLT2, 0);
13006        write_csr(dd, SEND_SC2VLT3, 0);
13007        write_csr(dd, SEND_LEN_CHECK0, 0);
13008        write_csr(dd, SEND_LEN_CHECK1, 0);
13009        /* SEND_ERR_STATUS read-only */
13010        write_csr(dd, SEND_ERR_MASK, 0);
13011        write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13012        /* SEND_ERR_FORCE read-only */
13013        for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13014                write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13015        for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13016                write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13017        for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13018                write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13019        for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13020                write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13021        for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13022                write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13023        write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13024        write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13025        /* SEND_CM_CREDIT_USED_STATUS read-only */
13026        write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13027        write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13028        write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13029        write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13030        write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13031        for (i = 0; i < TXE_NUM_DATA_VL; i++)
13032                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13033        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13034        /* SEND_CM_CREDIT_USED_VL read-only */
13035        /* SEND_CM_CREDIT_USED_VL15 read-only */
13036        /* SEND_EGRESS_CTXT_STATUS read-only */
13037        /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13038        write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13039        /* SEND_EGRESS_ERR_INFO read-only */
13040        /* SEND_EGRESS_ERR_SOURCE read-only */
13041
13042        /*
13043         * TXE Per-Context CSRs
13044         */
13045        for (i = 0; i < dd->chip_send_contexts; i++) {
13046                write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13047                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13048                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13049                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13050                write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13051                write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13052                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13053                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13054                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13055                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13056                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13057                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13058        }
13059
13060        /*
13061         * TXE Per-SDMA CSRs
13062         */
13063        for (i = 0; i < dd->chip_sdma_engines; i++) {
13064                write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13065                /* SEND_DMA_STATUS read-only */
13066                write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13067                write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13068                write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13069                /* SEND_DMA_HEAD read-only */
13070                write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13071                write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13072                /* SEND_DMA_IDLE_CNT read-only */
13073                write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13074                write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13075                /* SEND_DMA_DESC_FETCHED_CNT read-only */
13076                /* SEND_DMA_ENG_ERR_STATUS read-only */
13077                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13078                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13079                /* SEND_DMA_ENG_ERR_FORCE leave alone */
13080                write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13081                write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13082                write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13083                write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13084                write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13085                write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13086                write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13087        }
13088}
13089
13090/*
13091 * Expect on entry:
13092 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13093 */
13094static void init_rbufs(struct hfi1_devdata *dd)
13095{
13096        u64 reg;
13097        int count;
13098
13099        /*
13100         * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13101         * clear.
13102         */
13103        count = 0;
13104        while (1) {
13105                reg = read_csr(dd, RCV_STATUS);
13106                if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13107                            | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13108                        break;
13109                /*
13110                 * Give up after 1ms - maximum wait time.
13111                 *
13112                 * RBuf size is 148KiB.  Slowest possible is PCIe Gen1 x1 at
13113                 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13114                 *      148 KB / (66% * 250MB/s) = 920us
13115                 */
13116                if (count++ > 500) {
13117                        dd_dev_err(dd,
13118                                   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13119                                   __func__, reg);
13120                        break;
13121                }
13122                udelay(2); /* do not busy-wait the CSR */
13123        }
13124
13125        /* start the init - expect RcvCtrl to be 0 */
13126        write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13127
13128        /*
13129         * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13130         * period after the write before RcvStatus.RxRbufInitDone is valid.
13131         * The delay in the first run through the loop below is sufficient and
13132         * required before the first read of RcvStatus.RxRbufInintDone.
13133         */
13134        read_csr(dd, RCV_CTRL);
13135
13136        /* wait for the init to finish */
13137        count = 0;
13138        while (1) {
13139                /* delay is required first time through - see above */
13140                udelay(2); /* do not busy-wait the CSR */
13141                reg = read_csr(dd, RCV_STATUS);
13142                if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13143                        break;
13144
13145                /* give up after 100us - slowest possible at 33MHz is 73us */
13146                if (count++ > 50) {
13147                        dd_dev_err(dd,
13148                                   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13149                                   __func__);
13150                        break;
13151                }
13152        }
13153}
13154
13155/* set RXE CSRs to chip reset defaults */
13156static void reset_rxe_csrs(struct hfi1_devdata *dd)
13157{
13158        int i, j;
13159
13160        /*
13161         * RXE Kernel CSRs
13162         */
13163        write_csr(dd, RCV_CTRL, 0);
13164        init_rbufs(dd);
13165        /* RCV_STATUS read-only */
13166        /* RCV_CONTEXTS read-only */
13167        /* RCV_ARRAY_CNT read-only */
13168        /* RCV_BUF_SIZE read-only */
13169        write_csr(dd, RCV_BTH_QP, 0);
13170        write_csr(dd, RCV_MULTICAST, 0);
13171        write_csr(dd, RCV_BYPASS, 0);
13172        write_csr(dd, RCV_VL15, 0);
13173        /* this is a clear-down */
13174        write_csr(dd, RCV_ERR_INFO,
13175                  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13176        /* RCV_ERR_STATUS read-only */
13177        write_csr(dd, RCV_ERR_MASK, 0);
13178        write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13179        /* RCV_ERR_FORCE leave alone */
13180        for (i = 0; i < 32; i++)
13181                write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13182        for (i = 0; i < 4; i++)
13183                write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13184        for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13185                write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13186        for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13187                write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13188        for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13189                write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13190                write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13191                write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13192        }
13193        for (i = 0; i < 32; i++)
13194                write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13195
13196        /*
13197         * RXE Kernel and User Per-Context CSRs
13198         */
13199        for (i = 0; i < dd->chip_rcv_contexts; i++) {
13200                /* kernel */
13201                write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13202                /* RCV_CTXT_STATUS read-only */
13203                write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13204                write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13205                write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13206                write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13207                write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13208                write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13209                write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13210                write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13211                write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13212                write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13213
13214                /* user */
13215                /* RCV_HDR_TAIL read-only */
13216                write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13217                /* RCV_EGR_INDEX_TAIL read-only */
13218                write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13219                /* RCV_EGR_OFFSET_TAIL read-only */
13220                for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13221                        write_uctxt_csr(dd, i,
13222                                        RCV_TID_FLOW_TABLE + (8 * j), 0);
13223                }
13224        }
13225}
13226
13227/*
13228 * Set sc2vl tables.
13229 *
13230 * They power on to zeros, so to avoid send context errors
13231 * they need to be set:
13232 *
13233 * SC 0-7 -> VL 0-7 (respectively)
13234 * SC 15  -> VL 15
13235 * otherwise
13236 *        -> VL 0
13237 */
13238static void init_sc2vl_tables(struct hfi1_devdata *dd)
13239{
13240        int i;
13241        /* init per architecture spec, constrained by hardware capability */
13242
13243        /* HFI maps sent packets */
13244        write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13245                0,
13246                0, 0, 1, 1,
13247                2, 2, 3, 3,
13248                4, 4, 5, 5,
13249                6, 6, 7, 7));
13250        write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13251                1,
13252                8, 0, 9, 0,
13253                10, 0, 11, 0,
13254                12, 0, 13, 0,
13255                14, 0, 15, 15));
13256        write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13257                2,
13258                16, 0, 17, 0,
13259                18, 0, 19, 0,
13260                20, 0, 21, 0,
13261                22, 0, 23, 0));
13262        write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13263                3,
13264                24, 0, 25, 0,
13265                26, 0, 27, 0,
13266                28, 0, 29, 0,
13267                30, 0, 31, 0));
13268
13269        /* DC maps received packets */
13270        write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13271                15_0,
13272                0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13273                8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13274        write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13275                31_16,
13276                16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13277                24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13278
13279        /* initialize the cached sc2vl values consistently with h/w */
13280        for (i = 0; i < 32; i++) {
13281                if (i < 8 || i == 15)
13282                        *((u8 *)(dd->sc2vl) + i) = (u8)i;
13283                else
13284                        *((u8 *)(dd->sc2vl) + i) = 0;
13285        }
13286}
13287
13288/*
13289 * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13290 * depend on the chip going through a power-on reset - a driver may be loaded
13291 * and unloaded many times.
13292 *
13293 * Do not write any CSR values to the chip in this routine - there may be
13294 * a reset following the (possible) FLR in this routine.
13295 *
13296 */
13297static void init_chip(struct hfi1_devdata *dd)
13298{
13299        int i;
13300
13301        /*
13302         * Put the HFI CSRs in a known state.
13303         * Combine this with a DC reset.
13304         *
13305         * Stop the device from doing anything while we do a
13306         * reset.  We know there are no other active users of
13307         * the device since we are now in charge.  Turn off
13308         * off all outbound and inbound traffic and make sure
13309         * the device does not generate any interrupts.
13310         */
13311
13312        /* disable send contexts and SDMA engines */
13313        write_csr(dd, SEND_CTRL, 0);
13314        for (i = 0; i < dd->chip_send_contexts; i++)
13315                write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13316        for (i = 0; i < dd->chip_sdma_engines; i++)
13317                write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13318        /* disable port (turn off RXE inbound traffic) and contexts */
13319        write_csr(dd, RCV_CTRL, 0);
13320        for (i = 0; i < dd->chip_rcv_contexts; i++)
13321                write_csr(dd, RCV_CTXT_CTRL, 0);
13322        /* mask all interrupt sources */
13323        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13324                write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13325
13326        /*
13327         * DC Reset: do a full DC reset before the register clear.
13328         * A recommended length of time to hold is one CSR read,
13329         * so reread the CceDcCtrl.  Then, hold the DC in reset
13330         * across the clear.
13331         */
13332        write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13333        (void)read_csr(dd, CCE_DC_CTRL);
13334
13335        if (use_flr) {
13336                /*
13337                 * A FLR will reset the SPC core and part of the PCIe.
13338                 * The parts that need to be restored have already been
13339                 * saved.
13340                 */
13341                dd_dev_info(dd, "Resetting CSRs with FLR\n");
13342
13343                /* do the FLR, the DC reset will remain */
13344                hfi1_pcie_flr(dd);
13345
13346                /* restore command and BARs */
13347                restore_pci_variables(dd);
13348
13349                if (is_ax(dd)) {
13350                        dd_dev_info(dd, "Resetting CSRs with FLR\n");
13351                        hfi1_pcie_flr(dd);
13352                        restore_pci_variables(dd);
13353                }
13354        } else {
13355                dd_dev_info(dd, "Resetting CSRs with writes\n");
13356                reset_cce_csrs(dd);
13357                reset_txe_csrs(dd);
13358                reset_rxe_csrs(dd);
13359                reset_misc_csrs(dd);
13360        }
13361        /* clear the DC reset */
13362        write_csr(dd, CCE_DC_CTRL, 0);
13363
13364        /* Set the LED off */
13365        setextled(dd, 0);
13366
13367        /*
13368         * Clear the QSFP reset.
13369         * An FLR enforces a 0 on all out pins. The driver does not touch
13370         * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13371         * anything plugged constantly in reset, if it pays attention
13372         * to RESET_N.
13373         * Prime examples of this are optical cables. Set all pins high.
13374         * I2CCLK and I2CDAT will change per direction, and INT_N and
13375         * MODPRS_N are input only and their value is ignored.
13376         */
13377        write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13378        write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13379        init_chip_resources(dd);
13380}
13381
13382static void init_early_variables(struct hfi1_devdata *dd)
13383{
13384        int i;
13385
13386        /* assign link credit variables */
13387        dd->vau = CM_VAU;
13388        dd->link_credits = CM_GLOBAL_CREDITS;
13389        if (is_ax(dd))
13390                dd->link_credits--;
13391        dd->vcu = cu_to_vcu(hfi1_cu);
13392        /* enough room for 8 MAD packets plus header - 17K */
13393        dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13394        if (dd->vl15_init > dd->link_credits)
13395                dd->vl15_init = dd->link_credits;
13396
13397        write_uninitialized_csrs_and_memories(dd);
13398
13399        if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13400                for (i = 0; i < dd->num_pports; i++) {
13401                        struct hfi1_pportdata *ppd = &dd->pport[i];
13402
13403                        set_partition_keys(ppd);
13404                }
13405        init_sc2vl_tables(dd);
13406}
13407
13408static void init_kdeth_qp(struct hfi1_devdata *dd)
13409{
13410        /* user changed the KDETH_QP */
13411        if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13412                /* out of range or illegal value */
13413                dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13414                kdeth_qp = 0;
13415        }
13416        if (kdeth_qp == 0)      /* not set, or failed range check */
13417                kdeth_qp = DEFAULT_KDETH_QP;
13418
13419        write_csr(dd, SEND_BTH_QP,
13420                  (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13421                  SEND_BTH_QP_KDETH_QP_SHIFT);
13422
13423        write_csr(dd, RCV_BTH_QP,
13424                  (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13425                  RCV_BTH_QP_KDETH_QP_SHIFT);
13426}
13427
13428/**
13429 * init_qpmap_table
13430 * @dd - device data
13431 * @first_ctxt - first context
13432 * @last_ctxt - first context
13433 *
13434 * This return sets the qpn mapping table that
13435 * is indexed by qpn[8:1].
13436 *
13437 * The routine will round robin the 256 settings
13438 * from first_ctxt to last_ctxt.
13439 *
13440 * The first/last looks ahead to having specialized
13441 * receive contexts for mgmt and bypass.  Normal
13442 * verbs traffic will assumed to be on a range
13443 * of receive contexts.
13444 */
13445static void init_qpmap_table(struct hfi1_devdata *dd,
13446                             u32 first_ctxt,
13447                             u32 last_ctxt)
13448{
13449        u64 reg = 0;
13450        u64 regno = RCV_QP_MAP_TABLE;
13451        int i;
13452        u64 ctxt = first_ctxt;
13453
13454        for (i = 0; i < 256;) {
13455                reg |= ctxt << (8 * (i % 8));
13456                i++;
13457                ctxt++;
13458                if (ctxt > last_ctxt)
13459                        ctxt = first_ctxt;
13460                if (i % 8 == 0) {
13461                        write_csr(dd, regno, reg);
13462                        reg = 0;
13463                        regno += 8;
13464                }
13465        }
13466        if (i % 8)
13467                write_csr(dd, regno, reg);
13468
13469        add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13470                        | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13471}
13472
13473/**
13474 * init_qos - init RX qos
13475 * @dd - device data
13476 * @first_context
13477 *
13478 * This routine initializes Rule 0 and the
13479 * RSM map table to implement qos.
13480 *
13481 * If all of the limit tests succeed,
13482 * qos is applied based on the array
13483 * interpretation of krcvqs where
13484 * entry 0 is VL0.
13485 *
13486 * The number of vl bits (n) and the number of qpn
13487 * bits (m) are computed to feed both the RSM map table
13488 * and the single rule.
13489 *
13490 */
13491static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13492{
13493        u8 max_by_vl = 0;
13494        unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13495        u64 *rsmmap;
13496        u64 reg;
13497        u8  rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
13498
13499        /* validate */
13500        if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13501            num_vls == 1 ||
13502            krcvqsset <= 1)
13503                goto bail;
13504        for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13505                if (krcvqs[i] > max_by_vl)
13506                        max_by_vl = krcvqs[i];
13507        if (max_by_vl > 32)
13508                goto bail;
13509        qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13510        /* determine bits vl */
13511        n = ilog2(num_vls);
13512        /* determine bits for qpn */
13513        m = ilog2(qpns_per_vl);
13514        if ((m + n) > 7)
13515                goto bail;
13516        if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13517                goto bail;
13518        rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
13519        if (!rsmmap)
13520                goto bail;
13521        memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13522        /* init the local copy of the table */
13523        for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13524                unsigned tctxt;
13525
13526                for (qpn = 0, tctxt = ctxt;
13527                     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13528                        unsigned idx, regoff, regidx;
13529
13530                        /* generate index <= 128 */
13531                        idx = (qpn << n) ^ i;
13532                        regoff = (idx % 8) * 8;
13533                        regidx = idx / 8;
13534                        reg = rsmmap[regidx];
13535                        /* replace 0xff with context number */
13536                        reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13537                                << regoff);
13538                        reg |= (u64)(tctxt++) << regoff;
13539                        rsmmap[regidx] = reg;
13540                        if (tctxt == ctxt + krcvqs[i])
13541                                tctxt = ctxt;
13542                }
13543                ctxt += krcvqs[i];
13544        }
13545        /* flush cached copies to chip */
13546        for (i = 0; i < NUM_MAP_REGS; i++)
13547                write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13548        /* add rule0 */
13549        write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13550                  RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
13551                  RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13552                  2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13553        write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13554                  LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13555                  LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13556                  LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13557                  ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13558                  QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13559                  ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13560        write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13561                  LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13562                  LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13563                  LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13564                  LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13565        /* Enable RSM */
13566        add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13567        kfree(rsmmap);
13568        /* map everything else to first context */
13569        init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
13570        dd->qos_shift = n + 1;
13571        return;
13572bail:
13573        dd->qos_shift = 1;
13574        init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13575}
13576
13577static void init_rxe(struct hfi1_devdata *dd)
13578{
13579        /* enable all receive errors */
13580        write_csr(dd, RCV_ERR_MASK, ~0ull);
13581        /* setup QPN map table - start where VL15 context leaves off */
13582        init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
13583                 MIN_KERNEL_KCTXTS : 0);
13584        /*
13585         * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13586         * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13587         * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
13588         * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13589         * Max_PayLoad_Size set to its minimum of 128.
13590         *
13591         * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13592         * (64 bytes).  Max_Payload_Size is possibly modified upward in
13593         * tune_pcie_caps() which is called after this routine.
13594         */
13595}
13596
13597static void init_other(struct hfi1_devdata *dd)
13598{
13599        /* enable all CCE errors */
13600        write_csr(dd, CCE_ERR_MASK, ~0ull);
13601        /* enable *some* Misc errors */
13602        write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13603        /* enable all DC errors, except LCB */
13604        write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13605        write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13606}
13607
13608/*
13609 * Fill out the given AU table using the given CU.  A CU is defined in terms
13610 * AUs.  The table is a an encoding: given the index, how many AUs does that
13611 * represent?
13612 *
13613 * NOTE: Assumes that the register layout is the same for the
13614 * local and remote tables.
13615 */
13616static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13617                               u32 csr0to3, u32 csr4to7)
13618{
13619        write_csr(dd, csr0to3,
13620                  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13621                  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13622                  2ull * cu <<
13623                  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13624                  4ull * cu <<
13625                  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13626        write_csr(dd, csr4to7,
13627                  8ull * cu <<
13628                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13629                  16ull * cu <<
13630                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13631                  32ull * cu <<
13632                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13633                  64ull * cu <<
13634                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13635}
13636
13637static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13638{
13639        assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13640                           SEND_CM_LOCAL_AU_TABLE4_TO7);
13641}
13642
13643void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13644{
13645        assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13646                           SEND_CM_REMOTE_AU_TABLE4_TO7);
13647}
13648
13649static void init_txe(struct hfi1_devdata *dd)
13650{
13651        int i;
13652
13653        /* enable all PIO, SDMA, general, and Egress errors */
13654        write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13655        write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13656        write_csr(dd, SEND_ERR_MASK, ~0ull);
13657        write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13658
13659        /* enable all per-context and per-SDMA engine errors */
13660        for (i = 0; i < dd->chip_send_contexts; i++)
13661                write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13662        for (i = 0; i < dd->chip_sdma_engines; i++)
13663                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13664
13665        /* set the local CU to AU mapping */
13666        assign_local_cm_au_table(dd, dd->vcu);
13667
13668        /*
13669         * Set reasonable default for Credit Return Timer
13670         * Don't set on Simulator - causes it to choke.
13671         */
13672        if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13673                write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13674}
13675
13676int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13677{
13678        struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13679        unsigned sctxt;
13680        int ret = 0;
13681        u64 reg;
13682
13683        if (!rcd || !rcd->sc) {
13684                ret = -EINVAL;
13685                goto done;
13686        }
13687        sctxt = rcd->sc->hw_context;
13688        reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13689                ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13690                 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13691        /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13692        if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13693                reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13694        write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13695        /*
13696         * Enable send-side J_KEY integrity check, unless this is A0 h/w
13697         */
13698        if (!is_ax(dd)) {
13699                reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13700                reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13701                write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13702        }
13703
13704        /* Enable J_KEY check on receive context. */
13705        reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13706                ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13707                 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13708        write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13709done:
13710        return ret;
13711}
13712
13713int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13714{
13715        struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13716        unsigned sctxt;
13717        int ret = 0;
13718        u64 reg;
13719
13720        if (!rcd || !rcd->sc) {
13721                ret = -EINVAL;
13722                goto done;
13723        }
13724        sctxt = rcd->sc->hw_context;
13725        write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13726        /*
13727         * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13728         * This check would not have been enabled for A0 h/w, see
13729         * set_ctxt_jkey().
13730         */
13731        if (!is_ax(dd)) {
13732                reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13733                reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13734                write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13735        }
13736        /* Turn off the J_KEY on the receive side */
13737        write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13738done:
13739        return ret;
13740}
13741
13742int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13743{
13744        struct hfi1_ctxtdata *rcd;
13745        unsigned sctxt;
13746        int ret = 0;
13747        u64 reg;
13748
13749        if (ctxt < dd->num_rcv_contexts) {
13750                rcd = dd->rcd[ctxt];
13751        } else {
13752                ret = -EINVAL;
13753                goto done;
13754        }
13755        if (!rcd || !rcd->sc) {
13756                ret = -EINVAL;
13757                goto done;
13758        }
13759        sctxt = rcd->sc->hw_context;
13760        reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13761                SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13762        write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13763        reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13764        reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13765        write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13766done:
13767        return ret;
13768}
13769
13770int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13771{
13772        struct hfi1_ctxtdata *rcd;
13773        unsigned sctxt;
13774        int ret = 0;
13775        u64 reg;
13776
13777        if (ctxt < dd->num_rcv_contexts) {
13778                rcd = dd->rcd[ctxt];
13779        } else {
13780                ret = -EINVAL;
13781                goto done;
13782        }
13783        if (!rcd || !rcd->sc) {
13784                ret = -EINVAL;
13785                goto done;
13786        }
13787        sctxt = rcd->sc->hw_context;
13788        reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13789        reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13790        write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13791        write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13792done:
13793        return ret;
13794}
13795
13796/*
13797 * Start doing the clean up the the chip. Our clean up happens in multiple
13798 * stages and this is just the first.
13799 */
13800void hfi1_start_cleanup(struct hfi1_devdata *dd)
13801{
13802        aspm_exit(dd);
13803        free_cntrs(dd);
13804        free_rcverr(dd);
13805        clean_up_interrupts(dd);
13806        finish_chip_resources(dd);
13807}
13808
13809#define HFI_BASE_GUID(dev) \
13810        ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13811
13812/*
13813 * Information can be shared between the two HFIs on the same ASIC
13814 * in the same OS.  This function finds the peer device and sets
13815 * up a shared structure.
13816 */
13817static int init_asic_data(struct hfi1_devdata *dd)
13818{
13819        unsigned long flags;
13820        struct hfi1_devdata *tmp, *peer = NULL;
13821        int ret = 0;
13822
13823        spin_lock_irqsave(&hfi1_devs_lock, flags);
13824        /* Find our peer device */
13825        list_for_each_entry(tmp, &hfi1_dev_list, list) {
13826                if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13827                    dd->unit != tmp->unit) {
13828                        peer = tmp;
13829                        break;
13830                }
13831        }
13832
13833        if (peer) {
13834                dd->asic_data = peer->asic_data;
13835        } else {
13836                dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
13837                if (!dd->asic_data) {
13838                        ret = -ENOMEM;
13839                        goto done;
13840                }
13841                mutex_init(&dd->asic_data->asic_resource_mutex);
13842        }
13843        dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
13844
13845done:
13846        spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13847        return ret;
13848}
13849
13850/*
13851 * Set dd->boardname.  Use a generic name if a name is not returned from
13852 * EFI variable space.
13853 *
13854 * Return 0 on success, -ENOMEM if space could not be allocated.
13855 */
13856static int obtain_boardname(struct hfi1_devdata *dd)
13857{
13858        /* generic board description */
13859        const char generic[] =
13860                "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13861        unsigned long size;
13862        int ret;
13863
13864        ret = read_hfi1_efi_var(dd, "description", &size,
13865                                (void **)&dd->boardname);
13866        if (ret) {
13867                dd_dev_info(dd, "Board description not found\n");
13868                /* use generic description */
13869                dd->boardname = kstrdup(generic, GFP_KERNEL);
13870                if (!dd->boardname)
13871                        return -ENOMEM;
13872        }
13873        return 0;
13874}
13875
13876/*
13877 * Check the interrupt registers to make sure that they are mapped correctly.
13878 * It is intended to help user identify any mismapping by VMM when the driver
13879 * is running in a VM. This function should only be called before interrupt
13880 * is set up properly.
13881 *
13882 * Return 0 on success, -EINVAL on failure.
13883 */
13884static int check_int_registers(struct hfi1_devdata *dd)
13885{
13886        u64 reg;
13887        u64 all_bits = ~(u64)0;
13888        u64 mask;
13889
13890        /* Clear CceIntMask[0] to avoid raising any interrupts */
13891        mask = read_csr(dd, CCE_INT_MASK);
13892        write_csr(dd, CCE_INT_MASK, 0ull);
13893        reg = read_csr(dd, CCE_INT_MASK);
13894        if (reg)
13895                goto err_exit;
13896
13897        /* Clear all interrupt status bits */
13898        write_csr(dd, CCE_INT_CLEAR, all_bits);
13899        reg = read_csr(dd, CCE_INT_STATUS);
13900        if (reg)
13901                goto err_exit;
13902
13903        /* Set all interrupt status bits */
13904        write_csr(dd, CCE_INT_FORCE, all_bits);
13905        reg = read_csr(dd, CCE_INT_STATUS);
13906        if (reg != all_bits)
13907                goto err_exit;
13908
13909        /* Restore the interrupt mask */
13910        write_csr(dd, CCE_INT_CLEAR, all_bits);
13911        write_csr(dd, CCE_INT_MASK, mask);
13912
13913        return 0;
13914err_exit:
13915        write_csr(dd, CCE_INT_MASK, mask);
13916        dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
13917        return -EINVAL;
13918}
13919
13920/**
13921 * Allocate and initialize the device structure for the hfi.
13922 * @dev: the pci_dev for hfi1_ib device
13923 * @ent: pci_device_id struct for this dev
13924 *
13925 * Also allocates, initializes, and returns the devdata struct for this
13926 * device instance
13927 *
13928 * This is global, and is called directly at init to set up the
13929 * chip-specific function pointers for later use.
13930 */
13931struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13932                                  const struct pci_device_id *ent)
13933{
13934        struct hfi1_devdata *dd;
13935        struct hfi1_pportdata *ppd;
13936        u64 reg;
13937        int i, ret;
13938        static const char * const inames[] = { /* implementation names */
13939                "RTL silicon",
13940                "RTL VCS simulation",
13941                "RTL FPGA emulation",
13942                "Functional simulator"
13943        };
13944        struct pci_dev *parent = pdev->bus->self;
13945
13946        dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
13947                                sizeof(struct hfi1_pportdata));
13948        if (IS_ERR(dd))
13949                goto bail;
13950        ppd = dd->pport;
13951        for (i = 0; i < dd->num_pports; i++, ppd++) {
13952                int vl;
13953                /* init common fields */
13954                hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13955                /* DC supports 4 link widths */
13956                ppd->link_width_supported =
13957                        OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13958                        OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13959                ppd->link_width_downgrade_supported =
13960                        ppd->link_width_supported;
13961                /* start out enabling only 4X */
13962                ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13963                ppd->link_width_downgrade_enabled =
13964                                        ppd->link_width_downgrade_supported;
13965                /* link width active is 0 when link is down */
13966                /* link width downgrade active is 0 when link is down */
13967
13968                if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
13969                    num_vls > HFI1_MAX_VLS_SUPPORTED) {
13970                        hfi1_early_err(&pdev->dev,
13971                                       "Invalid num_vls %u, using %u VLs\n",
13972                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
13973                        num_vls = HFI1_MAX_VLS_SUPPORTED;
13974                }
13975                ppd->vls_supported = num_vls;
13976                ppd->vls_operational = ppd->vls_supported;
13977                ppd->actual_vls_operational = ppd->vls_supported;
13978                /* Set the default MTU. */
13979                for (vl = 0; vl < num_vls; vl++)
13980                        dd->vld[vl].mtu = hfi1_max_mtu;
13981                dd->vld[15].mtu = MAX_MAD_PACKET;
13982                /*
13983                 * Set the initial values to reasonable default, will be set
13984                 * for real when link is up.
13985                 */
13986                ppd->lstate = IB_PORT_DOWN;
13987                ppd->overrun_threshold = 0x4;
13988                ppd->phy_error_threshold = 0xf;
13989                ppd->port_crc_mode_enabled = link_crc_mask;
13990                /* initialize supported LTP CRC mode */
13991                ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13992                /* initialize enabled LTP CRC mode */
13993                ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13994                /* start in offline */
13995                ppd->host_link_state = HLS_DN_OFFLINE;
13996                init_vl_arb_caches(ppd);
13997                ppd->last_pstate = 0xff; /* invalid value */
13998        }
13999
14000        dd->link_default = HLS_DN_POLL;
14001
14002        /*
14003         * Do remaining PCIe setup and save PCIe values in dd.
14004         * Any error printing is already done by the init code.
14005         * On return, we have the chip mapped.
14006         */
14007        ret = hfi1_pcie_ddinit(dd, pdev, ent);
14008        if (ret < 0)
14009                goto bail_free;
14010
14011        /* verify that reads actually work, save revision for reset check */
14012        dd->revision = read_csr(dd, CCE_REVISION);
14013        if (dd->revision == ~(u64)0) {
14014                dd_dev_err(dd, "cannot read chip CSRs\n");
14015                ret = -EINVAL;
14016                goto bail_cleanup;
14017        }
14018        dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14019                        & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14020        dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14021                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
14022
14023        /*
14024         * Check interrupt registers mapping if the driver has no access to
14025         * the upstream component. In this case, it is likely that the driver
14026         * is running in a VM.
14027         */
14028        if (!parent) {
14029                ret = check_int_registers(dd);
14030                if (ret)
14031                        goto bail_cleanup;
14032        }
14033
14034        /*
14035         * obtain the hardware ID - NOT related to unit, which is a
14036         * software enumeration
14037         */
14038        reg = read_csr(dd, CCE_REVISION2);
14039        dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14040                                        & CCE_REVISION2_HFI_ID_MASK;
14041        /* the variable size will remove unwanted bits */
14042        dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14043        dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14044        dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14045                    dd->icode < ARRAY_SIZE(inames) ?
14046                    inames[dd->icode] : "unknown", (int)dd->irev);
14047
14048        /* speeds the hardware can support */
14049        dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14050        /* speeds allowed to run at */
14051        dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14052        /* give a reasonable active value, will be set on link up */
14053        dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14054
14055        dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14056        dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14057        dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14058        dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14059        dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14060        /* fix up link widths for emulation _p */
14061        ppd = dd->pport;
14062        if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14063                ppd->link_width_supported =
14064                        ppd->link_width_enabled =
14065                        ppd->link_width_downgrade_supported =
14066                        ppd->link_width_downgrade_enabled =
14067                                OPA_LINK_WIDTH_1X;
14068        }
14069        /* insure num_vls isn't larger than number of sdma engines */
14070        if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14071                dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14072                           num_vls, dd->chip_sdma_engines);
14073                num_vls = dd->chip_sdma_engines;
14074                ppd->vls_supported = dd->chip_sdma_engines;
14075                ppd->vls_operational = ppd->vls_supported;
14076        }
14077
14078        /*
14079         * Convert the ns parameter to the 64 * cclocks used in the CSR.
14080         * Limit the max if larger than the field holds.  If timeout is
14081         * non-zero, then the calculated field will be at least 1.
14082         *
14083         * Must be after icode is set up - the cclock rate depends
14084         * on knowing the hardware being used.
14085         */
14086        dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14087        if (dd->rcv_intr_timeout_csr >
14088                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14089                dd->rcv_intr_timeout_csr =
14090                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14091        else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14092                dd->rcv_intr_timeout_csr = 1;
14093
14094        /* needs to be done before we look for the peer device */
14095        read_guid(dd);
14096
14097        /* set up shared ASIC data with peer device */
14098        ret = init_asic_data(dd);
14099        if (ret)
14100                goto bail_cleanup;
14101
14102        /* obtain chip sizes, reset chip CSRs */
14103        init_chip(dd);
14104
14105        /* read in the PCIe link speed information */
14106        ret = pcie_speeds(dd);
14107        if (ret)
14108                goto bail_cleanup;
14109
14110        /* Needs to be called before hfi1_firmware_init */
14111        get_platform_config(dd);
14112
14113        /* read in firmware */
14114        ret = hfi1_firmware_init(dd);
14115        if (ret)
14116                goto bail_cleanup;
14117
14118        /*
14119         * In general, the PCIe Gen3 transition must occur after the
14120         * chip has been idled (so it won't initiate any PCIe transactions
14121         * e.g. an interrupt) and before the driver changes any registers
14122         * (the transition will reset the registers).
14123         *
14124         * In particular, place this call after:
14125         * - init_chip()     - the chip will not initiate any PCIe transactions
14126         * - pcie_speeds()   - reads the current link speed
14127         * - hfi1_firmware_init() - the needed firmware is ready to be
14128         *                          downloaded
14129         */
14130        ret = do_pcie_gen3_transition(dd);
14131        if (ret)
14132                goto bail_cleanup;
14133
14134        /* start setting dd values and adjusting CSRs */
14135        init_early_variables(dd);
14136
14137        parse_platform_config(dd);
14138
14139        ret = obtain_boardname(dd);
14140        if (ret)
14141                goto bail_cleanup;
14142
14143        snprintf(dd->boardversion, BOARD_VERS_MAX,
14144                 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14145                 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14146                 (u32)dd->majrev,
14147                 (u32)dd->minrev,
14148                 (dd->revision >> CCE_REVISION_SW_SHIFT)
14149                    & CCE_REVISION_SW_MASK);
14150
14151        ret = set_up_context_variables(dd);
14152        if (ret)
14153                goto bail_cleanup;
14154
14155        /* set initial RXE CSRs */
14156        init_rxe(dd);
14157        /* set initial TXE CSRs */
14158        init_txe(dd);
14159        /* set initial non-RXE, non-TXE CSRs */
14160        init_other(dd);
14161        /* set up KDETH QP prefix in both RX and TX CSRs */
14162        init_kdeth_qp(dd);
14163
14164        ret = hfi1_dev_affinity_init(dd);
14165        if (ret)
14166                goto bail_cleanup;
14167
14168        /* send contexts must be set up before receive contexts */
14169        ret = init_send_contexts(dd);
14170        if (ret)
14171                goto bail_cleanup;
14172
14173        ret = hfi1_create_ctxts(dd);
14174        if (ret)
14175                goto bail_cleanup;
14176
14177        dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14178        /*
14179         * rcd[0] is guaranteed to be valid by this point. Also, all
14180         * context are using the same value, as per the module parameter.
14181         */
14182        dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14183
14184        ret = init_pervl_scs(dd);
14185        if (ret)
14186                goto bail_cleanup;
14187
14188        /* sdma init */
14189        for (i = 0; i < dd->num_pports; ++i) {
14190                ret = sdma_init(dd, i);
14191                if (ret)
14192                        goto bail_cleanup;
14193        }
14194
14195        /* use contexts created by hfi1_create_ctxts */
14196        ret = set_up_interrupts(dd);
14197        if (ret)
14198                goto bail_cleanup;
14199
14200        /* set up LCB access - must be after set_up_interrupts() */
14201        init_lcb_access(dd);
14202
14203        snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14204                 dd->base_guid & 0xFFFFFF);
14205
14206        dd->oui1 = dd->base_guid >> 56 & 0xFF;
14207        dd->oui2 = dd->base_guid >> 48 & 0xFF;
14208        dd->oui3 = dd->base_guid >> 40 & 0xFF;
14209
14210        ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14211        if (ret)
14212                goto bail_clear_intr;
14213        check_fabric_firmware_versions(dd);
14214
14215        thermal_init(dd);
14216
14217        ret = init_cntrs(dd);
14218        if (ret)
14219                goto bail_clear_intr;
14220
14221        ret = init_rcverr(dd);
14222        if (ret)
14223                goto bail_free_cntrs;
14224
14225        ret = eprom_init(dd);
14226        if (ret)
14227                goto bail_free_rcverr;
14228
14229        goto bail;
14230
14231bail_free_rcverr:
14232        free_rcverr(dd);
14233bail_free_cntrs:
14234        free_cntrs(dd);
14235bail_clear_intr:
14236        clean_up_interrupts(dd);
14237bail_cleanup:
14238        hfi1_pcie_ddcleanup(dd);
14239bail_free:
14240        hfi1_free_devdata(dd);
14241        dd = ERR_PTR(ret);
14242bail:
14243        return dd;
14244}
14245
14246static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14247                        u32 dw_len)
14248{
14249        u32 delta_cycles;
14250        u32 current_egress_rate = ppd->current_egress_rate;
14251        /* rates here are in units of 10^6 bits/sec */
14252
14253        if (desired_egress_rate == -1)
14254                return 0; /* shouldn't happen */
14255
14256        if (desired_egress_rate >= current_egress_rate)
14257                return 0; /* we can't help go faster, only slower */
14258
14259        delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14260                        egress_cycles(dw_len * 4, current_egress_rate);
14261
14262        return (u16)delta_cycles;
14263}
14264
14265/**
14266 * create_pbc - build a pbc for transmission
14267 * @flags: special case flags or-ed in built pbc
14268 * @srate: static rate
14269 * @vl: vl
14270 * @dwlen: dword length (header words + data words + pbc words)
14271 *
14272 * Create a PBC with the given flags, rate, VL, and length.
14273 *
14274 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14275 * for verbs, which does not use this PSM feature.  The lone other caller
14276 * is for the diagnostic interface which calls this if the user does not
14277 * supply their own PBC.
14278 */
14279u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14280               u32 dw_len)
14281{
14282        u64 pbc, delay = 0;
14283
14284        if (unlikely(srate_mbs))
14285                delay = delay_cycles(ppd, srate_mbs, dw_len);
14286
14287        pbc = flags
14288                | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14289                | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14290                | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14291                | (dw_len & PBC_LENGTH_DWS_MASK)
14292                        << PBC_LENGTH_DWS_SHIFT;
14293
14294        return pbc;
14295}
14296
14297#define SBUS_THERMAL    0x4f
14298#define SBUS_THERM_MONITOR_MODE 0x1
14299
14300#define THERM_FAILURE(dev, ret, reason) \
14301        dd_dev_err((dd),                                                \
14302                   "Thermal sensor initialization failed: %s (%d)\n",   \
14303                   (reason), (ret))
14304
14305/*
14306 * Initialize the Avago Thermal sensor.
14307 *
14308 * After initialization, enable polling of thermal sensor through
14309 * SBus interface. In order for this to work, the SBus Master
14310 * firmware has to be loaded due to the fact that the HW polling
14311 * logic uses SBus interrupts, which are not supported with
14312 * default firmware. Otherwise, no data will be returned through
14313 * the ASIC_STS_THERM CSR.
14314 */
14315static int thermal_init(struct hfi1_devdata *dd)
14316{
14317        int ret = 0;
14318
14319        if (dd->icode != ICODE_RTL_SILICON ||
14320            check_chip_resource(dd, CR_THERM_INIT, NULL))
14321                return ret;
14322
14323        ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14324        if (ret) {
14325                THERM_FAILURE(dd, ret, "Acquire SBus");
14326                return ret;
14327        }
14328
14329        dd_dev_info(dd, "Initializing thermal sensor\n");
14330        /* Disable polling of thermal readings */
14331        write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14332        msleep(100);
14333        /* Thermal Sensor Initialization */
14334        /*    Step 1: Reset the Thermal SBus Receiver */
14335        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14336                                RESET_SBUS_RECEIVER, 0);
14337        if (ret) {
14338                THERM_FAILURE(dd, ret, "Bus Reset");
14339                goto done;
14340        }
14341        /*    Step 2: Set Reset bit in Thermal block */
14342        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14343                                WRITE_SBUS_RECEIVER, 0x1);
14344        if (ret) {
14345                THERM_FAILURE(dd, ret, "Therm Block Reset");
14346                goto done;
14347        }
14348        /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
14349        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14350                                WRITE_SBUS_RECEIVER, 0x32);
14351        if (ret) {
14352                THERM_FAILURE(dd, ret, "Write Clock Div");
14353                goto done;
14354        }
14355        /*    Step 4: Select temperature mode */
14356        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14357                                WRITE_SBUS_RECEIVER,
14358                                SBUS_THERM_MONITOR_MODE);
14359        if (ret) {
14360                THERM_FAILURE(dd, ret, "Write Mode Sel");
14361                goto done;
14362        }
14363        /*    Step 5: De-assert block reset and start conversion */
14364        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14365                                WRITE_SBUS_RECEIVER, 0x2);
14366        if (ret) {
14367                THERM_FAILURE(dd, ret, "Write Reset Deassert");
14368                goto done;
14369        }
14370        /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
14371        msleep(22);
14372
14373        /* Enable polling of thermal readings */
14374        write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14375
14376        /* Set initialized flag */
14377        ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14378        if (ret)
14379                THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14380
14381done:
14382        release_chip_resource(dd, CR_SBUS);
14383        return ret;
14384}
14385
14386static void handle_temp_err(struct hfi1_devdata *dd)
14387{
14388        struct hfi1_pportdata *ppd = &dd->pport[0];
14389        /*
14390         * Thermal Critical Interrupt
14391         * Put the device into forced freeze mode, take link down to
14392         * offline, and put DC into reset.
14393         */
14394        dd_dev_emerg(dd,
14395                     "Critical temperature reached! Forcing device into freeze mode!\n");
14396        dd->flags |= HFI1_FORCED_FREEZE;
14397        start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14398        /*
14399         * Shut DC down as much and as quickly as possible.
14400         *
14401         * Step 1: Take the link down to OFFLINE. This will cause the
14402         *         8051 to put the Serdes in reset. However, we don't want to
14403         *         go through the entire link state machine since we want to
14404         *         shutdown ASAP. Furthermore, this is not a graceful shutdown
14405         *         but rather an attempt to save the chip.
14406         *         Code below is almost the same as quiet_serdes() but avoids
14407         *         all the extra work and the sleeps.
14408         */
14409        ppd->driver_link_ready = 0;
14410        ppd->link_enabled = 0;
14411        set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14412                                PLS_OFFLINE);
14413        /*
14414         * Step 2: Shutdown LCB and 8051
14415         *         After shutdown, do not restore DC_CFG_RESET value.
14416         */
14417        dc_shutdown(dd);
14418}
14419